xref: /openbmc/qemu/target/sparc/translate.c (revision 6fbc032cbc83ba80009c4a2a18e4d5578bc9ba35)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
41 # define gen_helper_rett(E)                     qemu_build_not_reached()
42 # define gen_helper_power_down(E)               qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
46 # define gen_helper_done(E)                     qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_cmask8               ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16              ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32              ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8              ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8              ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8              ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16            ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32            ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8             ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16            ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32            ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16              ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32              ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx                ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi              ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK                             0
102 #endif
103 
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC         1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC            2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP  3
110 
111 #define DISAS_EXIT  DISAS_TARGET_0
112 
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133 
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z  cpu_xcc_Z
136 #define cpu_cc_C  cpu_xcc_C
137 #else
138 #define cpu_cc_Z  cpu_icc_Z
139 #define cpu_cc_C  cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143 
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146 
147 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X)  env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X)  env_field_offsetof(X)
153 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
154 #endif
155 
156 typedef struct DisasCompare {
157     TCGCond cond;
158     TCGv c1;
159     int c2;
160 } DisasCompare;
161 
162 typedef struct DisasDelayException {
163     struct DisasDelayException *next;
164     TCGLabel *lab;
165     TCGv_i32 excp;
166     /* Saved state at parent insn. */
167     target_ulong pc;
168     target_ulong npc;
169 } DisasDelayException;
170 
171 typedef struct DisasContext {
172     DisasContextBase base;
173     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
174     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175 
176     /* Used when JUMP_PC value is used. */
177     DisasCompare jump;
178     target_ulong jump_pc[2];
179 
180     int mem_idx;
181     bool cpu_cond_live;
182     bool fpu_enabled;
183     bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185     bool supervisor;
186 #ifdef TARGET_SPARC64
187     bool hypervisor;
188 #endif
189 #endif
190 
191     sparc_def_t *def;
192 #ifdef TARGET_SPARC64
193     int fprs_dirty;
194     int asi;
195 #endif
196     DisasDelayException *delay_excp_list;
197 } DisasContext;
198 
199 // This function uses non-native bit order
200 #define GET_FIELD(X, FROM, TO)                                  \
201     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
202 
203 // This function uses the order in the manuals, i.e. bit 0 is 2^0
204 #define GET_FIELD_SP(X, FROM, TO)               \
205     GET_FIELD(X, 31 - (TO), 31 - (FROM))
206 
207 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
208 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
209 
210 #define UA2005_HTRAP_MASK 0xff
211 #define V8_TRAP_MASK 0x7f
212 
213 #define IS_IMM (insn & (1<<13))
214 
215 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
216 {
217 #if defined(TARGET_SPARC64)
218     int bit = (rd < 32) ? 1 : 2;
219     /* If we know we've already set this bit within the TB,
220        we can avoid setting it again.  */
221     if (!(dc->fprs_dirty & bit)) {
222         dc->fprs_dirty |= bit;
223         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
224     }
225 #endif
226 }
227 
228 /* floating point registers moves */
229 
230 static int gen_offset_fpr_F(unsigned int reg)
231 {
232     int ret;
233 
234     tcg_debug_assert(reg < 32);
235     ret= offsetof(CPUSPARCState, fpr[reg / 2]);
236     if (reg & 1) {
237         ret += offsetof(CPU_DoubleU, l.lower);
238     } else {
239         ret += offsetof(CPU_DoubleU, l.upper);
240     }
241     return ret;
242 }
243 
244 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
245 {
246     TCGv_i32 ret = tcg_temp_new_i32();
247     tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
248     return ret;
249 }
250 
251 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
252 {
253     tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
254     gen_update_fprs_dirty(dc, dst);
255 }
256 
257 static int gen_offset_fpr_D(unsigned int reg)
258 {
259     tcg_debug_assert(reg < 64);
260     tcg_debug_assert(reg % 2 == 0);
261     return offsetof(CPUSPARCState, fpr[reg / 2]);
262 }
263 
264 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
265 {
266     TCGv_i64 ret = tcg_temp_new_i64();
267     tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
268     return ret;
269 }
270 
271 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
272 {
273     tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
274     gen_update_fprs_dirty(dc, dst);
275 }
276 
277 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
278 {
279     TCGv_i128 ret = tcg_temp_new_i128();
280     TCGv_i64 h = gen_load_fpr_D(dc, src);
281     TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
282 
283     tcg_gen_concat_i64_i128(ret, l, h);
284     return ret;
285 }
286 
287 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
288 {
289     TCGv_i64 h = tcg_temp_new_i64();
290     TCGv_i64 l = tcg_temp_new_i64();
291 
292     tcg_gen_extr_i128_i64(l, h, v);
293     gen_store_fpr_D(dc, dst, h);
294     gen_store_fpr_D(dc, dst + 2, l);
295 }
296 
297 /* moves */
298 #ifdef CONFIG_USER_ONLY
299 #define supervisor(dc) 0
300 #define hypervisor(dc) 0
301 #else
302 #ifdef TARGET_SPARC64
303 #define hypervisor(dc) (dc->hypervisor)
304 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #else
306 #define supervisor(dc) (dc->supervisor)
307 #define hypervisor(dc) 0
308 #endif
309 #endif
310 
311 #if !defined(TARGET_SPARC64)
312 # define AM_CHECK(dc)  false
313 #elif defined(TARGET_ABI32)
314 # define AM_CHECK(dc)  true
315 #elif defined(CONFIG_USER_ONLY)
316 # define AM_CHECK(dc)  false
317 #else
318 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
319 #endif
320 
321 static void gen_address_mask(DisasContext *dc, TCGv addr)
322 {
323     if (AM_CHECK(dc)) {
324         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
325     }
326 }
327 
328 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
329 {
330     return AM_CHECK(dc) ? (uint32_t)addr : addr;
331 }
332 
333 static TCGv gen_load_gpr(DisasContext *dc, int reg)
334 {
335     if (reg > 0) {
336         assert(reg < 32);
337         return cpu_regs[reg];
338     } else {
339         TCGv t = tcg_temp_new();
340         tcg_gen_movi_tl(t, 0);
341         return t;
342     }
343 }
344 
345 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
346 {
347     if (reg > 0) {
348         assert(reg < 32);
349         tcg_gen_mov_tl(cpu_regs[reg], v);
350     }
351 }
352 
353 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
354 {
355     if (reg > 0) {
356         assert(reg < 32);
357         return cpu_regs[reg];
358     } else {
359         return tcg_temp_new();
360     }
361 }
362 
363 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
364 {
365     return translator_use_goto_tb(&s->base, pc) &&
366            translator_use_goto_tb(&s->base, npc);
367 }
368 
369 static void gen_goto_tb(DisasContext *s, int tb_num,
370                         target_ulong pc, target_ulong npc)
371 {
372     if (use_goto_tb(s, pc, npc))  {
373         /* jump to same page: we can use a direct jump */
374         tcg_gen_goto_tb(tb_num);
375         tcg_gen_movi_tl(cpu_pc, pc);
376         tcg_gen_movi_tl(cpu_npc, npc);
377         tcg_gen_exit_tb(s->base.tb, tb_num);
378     } else {
379         /* jump to another page: we can use an indirect jump */
380         tcg_gen_movi_tl(cpu_pc, pc);
381         tcg_gen_movi_tl(cpu_npc, npc);
382         tcg_gen_lookup_and_goto_ptr();
383     }
384 }
385 
386 static TCGv gen_carry32(void)
387 {
388     if (TARGET_LONG_BITS == 64) {
389         TCGv t = tcg_temp_new();
390         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
391         return t;
392     }
393     return cpu_icc_C;
394 }
395 
396 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
397 {
398     TCGv z = tcg_constant_tl(0);
399 
400     if (cin) {
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
402         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
403     } else {
404         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
405     }
406     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
407     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
408     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
409     if (TARGET_LONG_BITS == 64) {
410         /*
411          * Carry-in to bit 32 is result ^ src1 ^ src2.
412          * We already have the src xor term in Z, from computation of V.
413          */
414         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
415         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
416     }
417     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
418     tcg_gen_mov_tl(dst, cpu_cc_N);
419 }
420 
421 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
422 {
423     gen_op_addcc_int(dst, src1, src2, NULL);
424 }
425 
426 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
427 {
428     TCGv t = tcg_temp_new();
429 
430     /* Save the tag bits around modification of dst. */
431     tcg_gen_or_tl(t, src1, src2);
432 
433     gen_op_addcc(dst, src1, src2);
434 
435     /* Incorprate tag bits into icc.V */
436     tcg_gen_andi_tl(t, t, 3);
437     tcg_gen_neg_tl(t, t);
438     tcg_gen_ext32u_tl(t, t);
439     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
440 }
441 
442 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
443 {
444     tcg_gen_add_tl(dst, src1, src2);
445     tcg_gen_add_tl(dst, dst, gen_carry32());
446 }
447 
448 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
449 {
450     gen_op_addcc_int(dst, src1, src2, gen_carry32());
451 }
452 
453 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     tcg_gen_add_tl(dst, src1, src2);
456     tcg_gen_add_tl(dst, dst, cpu_cc_C);
457 }
458 
459 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
460 {
461     gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
462 }
463 
464 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
465 {
466     TCGv z = tcg_constant_tl(0);
467 
468     if (cin) {
469         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
470         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
471     } else {
472         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
473     }
474     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
475     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
476     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
477     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
478 #ifdef TARGET_SPARC64
479     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
480     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
481 #endif
482     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
483     tcg_gen_mov_tl(dst, cpu_cc_N);
484 }
485 
486 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
487 {
488     gen_op_subcc_int(dst, src1, src2, NULL);
489 }
490 
491 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
492 {
493     TCGv t = tcg_temp_new();
494 
495     /* Save the tag bits around modification of dst. */
496     tcg_gen_or_tl(t, src1, src2);
497 
498     gen_op_subcc(dst, src1, src2);
499 
500     /* Incorprate tag bits into icc.V */
501     tcg_gen_andi_tl(t, t, 3);
502     tcg_gen_neg_tl(t, t);
503     tcg_gen_ext32u_tl(t, t);
504     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
505 }
506 
507 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
508 {
509     tcg_gen_sub_tl(dst, src1, src2);
510     tcg_gen_sub_tl(dst, dst, gen_carry32());
511 }
512 
513 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
514 {
515     gen_op_subcc_int(dst, src1, src2, gen_carry32());
516 }
517 
518 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
519 {
520     tcg_gen_sub_tl(dst, src1, src2);
521     tcg_gen_sub_tl(dst, dst, cpu_cc_C);
522 }
523 
524 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
525 {
526     gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
527 }
528 
529 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
530 {
531     TCGv zero = tcg_constant_tl(0);
532     TCGv one = tcg_constant_tl(1);
533     TCGv t_src1 = tcg_temp_new();
534     TCGv t_src2 = tcg_temp_new();
535     TCGv t0 = tcg_temp_new();
536 
537     tcg_gen_ext32u_tl(t_src1, src1);
538     tcg_gen_ext32u_tl(t_src2, src2);
539 
540     /*
541      * if (!(env->y & 1))
542      *   src2 = 0;
543      */
544     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
545 
546     /*
547      * b2 = src1 & 1;
548      * y = (b2 << 31) | (y >> 1);
549      */
550     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
551     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
552 
553     // b1 = N ^ V;
554     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
555 
556     /*
557      * src1 = (b1 << 31) | (src1 >> 1)
558      */
559     tcg_gen_andi_tl(t0, t0, 1u << 31);
560     tcg_gen_shri_tl(t_src1, t_src1, 1);
561     tcg_gen_or_tl(t_src1, t_src1, t0);
562 
563     gen_op_addcc(dst, t_src1, t_src2);
564 }
565 
566 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
567 {
568 #if TARGET_LONG_BITS == 32
569     if (sign_ext) {
570         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
571     } else {
572         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
573     }
574 #else
575     TCGv t0 = tcg_temp_new_i64();
576     TCGv t1 = tcg_temp_new_i64();
577 
578     if (sign_ext) {
579         tcg_gen_ext32s_i64(t0, src1);
580         tcg_gen_ext32s_i64(t1, src2);
581     } else {
582         tcg_gen_ext32u_i64(t0, src1);
583         tcg_gen_ext32u_i64(t1, src2);
584     }
585 
586     tcg_gen_mul_i64(dst, t0, t1);
587     tcg_gen_shri_i64(cpu_y, dst, 32);
588 #endif
589 }
590 
591 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
592 {
593     /* zero-extend truncated operands before multiplication */
594     gen_op_multiply(dst, src1, src2, 0);
595 }
596 
597 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
598 {
599     /* sign-extend truncated operands before multiplication */
600     gen_op_multiply(dst, src1, src2, 1);
601 }
602 
603 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
604 {
605     TCGv discard = tcg_temp_new();
606     tcg_gen_mulu2_tl(discard, dst, src1, src2);
607 }
608 
609 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
610                            TCGv_i64 src2, TCGv_i64 src3)
611 {
612     TCGv_i64 t = tcg_temp_new_i64();
613 
614     tcg_gen_mul_i64(t, src1, src2);
615     tcg_gen_add_i64(dst, src3, t);
616 }
617 
618 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
619                              TCGv_i64 src2, TCGv_i64 src3)
620 {
621     TCGv_i64 l = tcg_temp_new_i64();
622     TCGv_i64 h = tcg_temp_new_i64();
623     TCGv_i64 z = tcg_constant_i64(0);
624 
625     tcg_gen_mulu2_i64(l, h, src1, src2);
626     tcg_gen_add2_i64(l, dst, l, h, src3, z);
627 }
628 
629 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
630 {
631 #ifdef TARGET_SPARC64
632     gen_helper_sdiv(dst, tcg_env, src1, src2);
633     tcg_gen_ext32s_tl(dst, dst);
634 #else
635     TCGv_i64 t64 = tcg_temp_new_i64();
636     gen_helper_sdiv(t64, tcg_env, src1, src2);
637     tcg_gen_trunc_i64_tl(dst, t64);
638 #endif
639 }
640 
641 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
642 {
643     TCGv_i64 t64;
644 
645 #ifdef TARGET_SPARC64
646     t64 = cpu_cc_V;
647 #else
648     t64 = tcg_temp_new_i64();
649 #endif
650 
651     gen_helper_udiv(t64, tcg_env, src1, src2);
652 
653 #ifdef TARGET_SPARC64
654     tcg_gen_ext32u_tl(cpu_cc_N, t64);
655     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
656     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
657     tcg_gen_movi_tl(cpu_icc_C, 0);
658 #else
659     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
660 #endif
661     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
662     tcg_gen_movi_tl(cpu_cc_C, 0);
663     tcg_gen_mov_tl(dst, cpu_cc_N);
664 }
665 
666 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
667 {
668     TCGv_i64 t64;
669 
670 #ifdef TARGET_SPARC64
671     t64 = cpu_cc_V;
672 #else
673     t64 = tcg_temp_new_i64();
674 #endif
675 
676     gen_helper_sdiv(t64, tcg_env, src1, src2);
677 
678 #ifdef TARGET_SPARC64
679     tcg_gen_ext32s_tl(cpu_cc_N, t64);
680     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
681     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
682     tcg_gen_movi_tl(cpu_icc_C, 0);
683 #else
684     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
685 #endif
686     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
687     tcg_gen_movi_tl(cpu_cc_C, 0);
688     tcg_gen_mov_tl(dst, cpu_cc_N);
689 }
690 
691 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
692 {
693     gen_helper_taddcctv(dst, tcg_env, src1, src2);
694 }
695 
696 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
697 {
698     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
699 }
700 
701 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
702 {
703     tcg_gen_ctpop_tl(dst, src2);
704 }
705 
706 static void gen_op_lzcnt(TCGv dst, TCGv src)
707 {
708     tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
709 }
710 
711 #ifndef TARGET_SPARC64
712 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
713 {
714     g_assert_not_reached();
715 }
716 #endif
717 
718 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
719 {
720     gen_helper_array8(dst, src1, src2);
721     tcg_gen_shli_tl(dst, dst, 1);
722 }
723 
724 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
725 {
726     gen_helper_array8(dst, src1, src2);
727     tcg_gen_shli_tl(dst, dst, 2);
728 }
729 
730 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
731 {
732 #ifdef TARGET_SPARC64
733     gen_helper_fpack16(dst, cpu_gsr, src);
734 #else
735     g_assert_not_reached();
736 #endif
737 }
738 
739 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
740 {
741 #ifdef TARGET_SPARC64
742     gen_helper_fpackfix(dst, cpu_gsr, src);
743 #else
744     g_assert_not_reached();
745 #endif
746 }
747 
748 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
749 {
750 #ifdef TARGET_SPARC64
751     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
752 #else
753     g_assert_not_reached();
754 #endif
755 }
756 
757 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
758 {
759     TCGv_i32 t[2];
760 
761     for (int i = 0; i < 2; i++) {
762         TCGv_i32 u = tcg_temp_new_i32();
763         TCGv_i32 v = tcg_temp_new_i32();
764 
765         tcg_gen_sextract_i32(u, src1, i * 16, 16);
766         tcg_gen_sextract_i32(v, src2, i * 16, 16);
767         tcg_gen_add_i32(u, u, v);
768         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
769         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
770         t[i] = u;
771     }
772     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
773 }
774 
775 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
776 {
777     TCGv_i32 t[2];
778 
779     for (int i = 0; i < 2; i++) {
780         TCGv_i32 u = tcg_temp_new_i32();
781         TCGv_i32 v = tcg_temp_new_i32();
782 
783         tcg_gen_sextract_i32(u, src1, i * 16, 16);
784         tcg_gen_sextract_i32(v, src2, i * 16, 16);
785         tcg_gen_sub_i32(u, u, v);
786         tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
787         tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
788         t[i] = u;
789     }
790     tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
791 }
792 
793 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
794 {
795     TCGv_i32 r = tcg_temp_new_i32();
796     TCGv_i32 t = tcg_temp_new_i32();
797     TCGv_i32 v = tcg_temp_new_i32();
798     TCGv_i32 z = tcg_constant_i32(0);
799 
800     tcg_gen_add_i32(r, src1, src2);
801     tcg_gen_xor_i32(t, src1, src2);
802     tcg_gen_xor_i32(v, r, src2);
803     tcg_gen_andc_i32(v, v, t);
804 
805     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
806     tcg_gen_addi_i32(t, t, INT32_MAX);
807 
808     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
809 }
810 
811 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
812 {
813     TCGv_i32 r = tcg_temp_new_i32();
814     TCGv_i32 t = tcg_temp_new_i32();
815     TCGv_i32 v = tcg_temp_new_i32();
816     TCGv_i32 z = tcg_constant_i32(0);
817 
818     tcg_gen_sub_i32(r, src1, src2);
819     tcg_gen_xor_i32(t, src1, src2);
820     tcg_gen_xor_i32(v, r, src1);
821     tcg_gen_and_i32(v, v, t);
822 
823     tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
824     tcg_gen_addi_i32(t, t, INT32_MAX);
825 
826     tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
827 }
828 
829 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
830                                 TCGv_i64 s2, TCGv gsr)
831 {
832 #ifdef TARGET_SPARC64
833     TCGv t1, t2, shift;
834 
835     t1 = tcg_temp_new();
836     t2 = tcg_temp_new();
837     shift = tcg_temp_new();
838 
839     tcg_gen_andi_tl(shift, gsr, 7);
840     tcg_gen_shli_tl(shift, shift, 3);
841     tcg_gen_shl_tl(t1, s1, shift);
842 
843     /*
844      * A shift of 64 does not produce 0 in TCG.  Divide this into a
845      * shift of (up to 63) followed by a constant shift of 1.
846      */
847     tcg_gen_xori_tl(shift, shift, 63);
848     tcg_gen_shr_tl(t2, s2, shift);
849     tcg_gen_shri_tl(t2, t2, 1);
850 
851     tcg_gen_or_tl(dst, t1, t2);
852 #else
853     g_assert_not_reached();
854 #endif
855 }
856 
857 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
858 {
859     gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
860 }
861 
862 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
863 {
864 #ifdef TARGET_SPARC64
865     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
866 #else
867     g_assert_not_reached();
868 #endif
869 }
870 
871 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
872 {
873 #ifdef TARGET_SPARC64
874     gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
875 #else
876     g_assert_not_reached();
877 #endif
878 }
879 
880 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
881 {
882     tcg_gen_ext16s_i32(src2, src2);
883     gen_helper_fmul8x16a(dst, src1, src2);
884 }
885 
886 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
887 {
888     tcg_gen_sari_i32(src2, src2, 16);
889     gen_helper_fmul8x16a(dst, src1, src2);
890 }
891 
892 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
893 {
894     TCGv_i32 t0 = tcg_temp_new_i32();
895     TCGv_i32 t1 = tcg_temp_new_i32();
896     TCGv_i32 t2 = tcg_temp_new_i32();
897 
898     tcg_gen_ext8u_i32(t0, src1);
899     tcg_gen_ext16s_i32(t1, src2);
900     tcg_gen_mul_i32(t0, t0, t1);
901 
902     tcg_gen_extract_i32(t1, src1, 16, 8);
903     tcg_gen_sextract_i32(t2, src2, 16, 16);
904     tcg_gen_mul_i32(t1, t1, t2);
905 
906     tcg_gen_concat_i32_i64(dst, t0, t1);
907 }
908 
909 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
910 {
911     TCGv_i32 t0 = tcg_temp_new_i32();
912     TCGv_i32 t1 = tcg_temp_new_i32();
913     TCGv_i32 t2 = tcg_temp_new_i32();
914 
915     /*
916      * The insn description talks about extracting the upper 8 bits
917      * of the signed 16-bit input rs1, performing the multiply, then
918      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
919      * the rs1 input, which avoids the need for two shifts.
920      */
921     tcg_gen_ext16s_i32(t0, src1);
922     tcg_gen_andi_i32(t0, t0, ~0xff);
923     tcg_gen_ext16s_i32(t1, src2);
924     tcg_gen_mul_i32(t0, t0, t1);
925 
926     tcg_gen_sextract_i32(t1, src1, 16, 16);
927     tcg_gen_andi_i32(t1, t1, ~0xff);
928     tcg_gen_sextract_i32(t2, src2, 16, 16);
929     tcg_gen_mul_i32(t1, t1, t2);
930 
931     tcg_gen_concat_i32_i64(dst, t0, t1);
932 }
933 
934 #ifdef TARGET_SPARC64
935 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
936                              TCGv_vec src1, TCGv_vec src2)
937 {
938     TCGv_vec a = tcg_temp_new_vec_matching(dst);
939     TCGv_vec c = tcg_temp_new_vec_matching(dst);
940 
941     tcg_gen_add_vec(vece, a, src1, src2);
942     tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
943     /* Vector cmp produces -1 for true, so subtract to add carry. */
944     tcg_gen_sub_vec(vece, dst, a, c);
945 }
946 
947 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
948                             uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
949 {
950     static const TCGOpcode vecop_list[] = {
951         INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
952     };
953     static const GVecGen3 op = {
954         .fni8 = gen_helper_fchksm16,
955         .fniv = gen_vec_fchksm16,
956         .opt_opc = vecop_list,
957         .vece = MO_16,
958     };
959     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
960 }
961 
962 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
963                             TCGv_vec src1, TCGv_vec src2)
964 {
965     TCGv_vec t = tcg_temp_new_vec_matching(dst);
966 
967     tcg_gen_or_vec(vece, t, src1, src2);
968     tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
969     tcg_gen_sari_vec(vece, src1, src1, 1);
970     tcg_gen_sari_vec(vece, src2, src2, 1);
971     tcg_gen_add_vec(vece, dst, src1, src2);
972     tcg_gen_add_vec(vece, dst, dst, t);
973 }
974 
975 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
976                            uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
977 {
978     static const TCGOpcode vecop_list[] = {
979         INDEX_op_add_vec, INDEX_op_sari_vec,
980     };
981     static const GVecGen3 op = {
982         .fni8 = gen_helper_fmean16,
983         .fniv = gen_vec_fmean16,
984         .opt_opc = vecop_list,
985         .vece = MO_16,
986     };
987     tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
988 }
989 #else
990 #define gen_op_fchksm16   ({ qemu_build_not_reached(); NULL; })
991 #define gen_op_fmean16    ({ qemu_build_not_reached(); NULL; })
992 #endif
993 
994 static void finishing_insn(DisasContext *dc)
995 {
996     /*
997      * From here, there is no future path through an unwinding exception.
998      * If the current insn cannot raise an exception, the computation of
999      * cpu_cond may be able to be elided.
1000      */
1001     if (dc->cpu_cond_live) {
1002         tcg_gen_discard_tl(cpu_cond);
1003         dc->cpu_cond_live = false;
1004     }
1005 }
1006 
1007 static void gen_generic_branch(DisasContext *dc)
1008 {
1009     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1010     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1011     TCGv c2 = tcg_constant_tl(dc->jump.c2);
1012 
1013     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1014 }
1015 
1016 /* call this function before using the condition register as it may
1017    have been set for a jump */
1018 static void flush_cond(DisasContext *dc)
1019 {
1020     if (dc->npc == JUMP_PC) {
1021         gen_generic_branch(dc);
1022         dc->npc = DYNAMIC_PC_LOOKUP;
1023     }
1024 }
1025 
1026 static void save_npc(DisasContext *dc)
1027 {
1028     if (dc->npc & 3) {
1029         switch (dc->npc) {
1030         case JUMP_PC:
1031             gen_generic_branch(dc);
1032             dc->npc = DYNAMIC_PC_LOOKUP;
1033             break;
1034         case DYNAMIC_PC:
1035         case DYNAMIC_PC_LOOKUP:
1036             break;
1037         default:
1038             g_assert_not_reached();
1039         }
1040     } else {
1041         tcg_gen_movi_tl(cpu_npc, dc->npc);
1042     }
1043 }
1044 
1045 static void save_state(DisasContext *dc)
1046 {
1047     tcg_gen_movi_tl(cpu_pc, dc->pc);
1048     save_npc(dc);
1049 }
1050 
1051 static void gen_exception(DisasContext *dc, int which)
1052 {
1053     finishing_insn(dc);
1054     save_state(dc);
1055     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1056     dc->base.is_jmp = DISAS_NORETURN;
1057 }
1058 
1059 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1060 {
1061     DisasDelayException *e = g_new0(DisasDelayException, 1);
1062 
1063     e->next = dc->delay_excp_list;
1064     dc->delay_excp_list = e;
1065 
1066     e->lab = gen_new_label();
1067     e->excp = excp;
1068     e->pc = dc->pc;
1069     /* Caller must have used flush_cond before branch. */
1070     assert(e->npc != JUMP_PC);
1071     e->npc = dc->npc;
1072 
1073     return e->lab;
1074 }
1075 
1076 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1077 {
1078     return delay_exceptionv(dc, tcg_constant_i32(excp));
1079 }
1080 
1081 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1082 {
1083     TCGv t = tcg_temp_new();
1084     TCGLabel *lab;
1085 
1086     tcg_gen_andi_tl(t, addr, mask);
1087 
1088     flush_cond(dc);
1089     lab = delay_exception(dc, TT_UNALIGNED);
1090     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1091 }
1092 
1093 static void gen_mov_pc_npc(DisasContext *dc)
1094 {
1095     finishing_insn(dc);
1096 
1097     if (dc->npc & 3) {
1098         switch (dc->npc) {
1099         case JUMP_PC:
1100             gen_generic_branch(dc);
1101             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102             dc->pc = DYNAMIC_PC_LOOKUP;
1103             break;
1104         case DYNAMIC_PC:
1105         case DYNAMIC_PC_LOOKUP:
1106             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1107             dc->pc = dc->npc;
1108             break;
1109         default:
1110             g_assert_not_reached();
1111         }
1112     } else {
1113         dc->pc = dc->npc;
1114     }
1115 }
1116 
1117 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1118                         DisasContext *dc)
1119 {
1120     TCGv t1;
1121 
1122     cmp->c1 = t1 = tcg_temp_new();
1123     cmp->c2 = 0;
1124 
1125     switch (cond & 7) {
1126     case 0x0: /* never */
1127         cmp->cond = TCG_COND_NEVER;
1128         cmp->c1 = tcg_constant_tl(0);
1129         break;
1130 
1131     case 0x1: /* eq: Z */
1132         cmp->cond = TCG_COND_EQ;
1133         if (TARGET_LONG_BITS == 32 || xcc) {
1134             tcg_gen_mov_tl(t1, cpu_cc_Z);
1135         } else {
1136             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1137         }
1138         break;
1139 
1140     case 0x2: /* le: Z | (N ^ V) */
1141         /*
1142          * Simplify:
1143          *   cc_Z || (N ^ V) < 0        NE
1144          *   cc_Z && !((N ^ V) < 0)     EQ
1145          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1146          */
1147         cmp->cond = TCG_COND_EQ;
1148         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1149         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1150         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1151         if (TARGET_LONG_BITS == 64 && !xcc) {
1152             tcg_gen_ext32u_tl(t1, t1);
1153         }
1154         break;
1155 
1156     case 0x3: /* lt: N ^ V */
1157         cmp->cond = TCG_COND_LT;
1158         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1159         if (TARGET_LONG_BITS == 64 && !xcc) {
1160             tcg_gen_ext32s_tl(t1, t1);
1161         }
1162         break;
1163 
1164     case 0x4: /* leu: Z | C */
1165         /*
1166          * Simplify:
1167          *   cc_Z == 0 || cc_C != 0     NE
1168          *   cc_Z != 0 && cc_C == 0     EQ
1169          *   cc_Z & (cc_C ? 0 : -1)     EQ
1170          *   cc_Z & (cc_C - 1)          EQ
1171          */
1172         cmp->cond = TCG_COND_EQ;
1173         if (TARGET_LONG_BITS == 32 || xcc) {
1174             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1175             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1176         } else {
1177             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1178             tcg_gen_subi_tl(t1, t1, 1);
1179             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1180             tcg_gen_ext32u_tl(t1, t1);
1181         }
1182         break;
1183 
1184     case 0x5: /* ltu: C */
1185         cmp->cond = TCG_COND_NE;
1186         if (TARGET_LONG_BITS == 32 || xcc) {
1187             tcg_gen_mov_tl(t1, cpu_cc_C);
1188         } else {
1189             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1190         }
1191         break;
1192 
1193     case 0x6: /* neg: N */
1194         cmp->cond = TCG_COND_LT;
1195         if (TARGET_LONG_BITS == 32 || xcc) {
1196             tcg_gen_mov_tl(t1, cpu_cc_N);
1197         } else {
1198             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1199         }
1200         break;
1201 
1202     case 0x7: /* vs: V */
1203         cmp->cond = TCG_COND_LT;
1204         if (TARGET_LONG_BITS == 32 || xcc) {
1205             tcg_gen_mov_tl(t1, cpu_cc_V);
1206         } else {
1207             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1208         }
1209         break;
1210     }
1211     if (cond & 8) {
1212         cmp->cond = tcg_invert_cond(cmp->cond);
1213     }
1214 }
1215 
1216 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1217 {
1218     TCGv_i32 fcc = cpu_fcc[cc];
1219     TCGv_i32 c1 = fcc;
1220     int c2 = 0;
1221     TCGCond tcond;
1222 
1223     /*
1224      * FCC values:
1225      * 0 =
1226      * 1 <
1227      * 2 >
1228      * 3 unordered
1229      */
1230     switch (cond & 7) {
1231     case 0x0: /* fbn */
1232         tcond = TCG_COND_NEVER;
1233         break;
1234     case 0x1: /* fbne : !0 */
1235         tcond = TCG_COND_NE;
1236         break;
1237     case 0x2: /* fblg : 1 or 2 */
1238         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1239         c1 = tcg_temp_new_i32();
1240         tcg_gen_addi_i32(c1, fcc, -1);
1241         c2 = 1;
1242         tcond = TCG_COND_LEU;
1243         break;
1244     case 0x3: /* fbul : 1 or 3 */
1245         c1 = tcg_temp_new_i32();
1246         tcg_gen_andi_i32(c1, fcc, 1);
1247         tcond = TCG_COND_NE;
1248         break;
1249     case 0x4: /* fbl  : 1 */
1250         c2 = 1;
1251         tcond = TCG_COND_EQ;
1252         break;
1253     case 0x5: /* fbug : 2 or 3 */
1254         c2 = 2;
1255         tcond = TCG_COND_GEU;
1256         break;
1257     case 0x6: /* fbg  : 2 */
1258         c2 = 2;
1259         tcond = TCG_COND_EQ;
1260         break;
1261     case 0x7: /* fbu  : 3 */
1262         c2 = 3;
1263         tcond = TCG_COND_EQ;
1264         break;
1265     }
1266     if (cond & 8) {
1267         tcond = tcg_invert_cond(tcond);
1268     }
1269 
1270     cmp->cond = tcond;
1271     cmp->c2 = c2;
1272     cmp->c1 = tcg_temp_new();
1273     tcg_gen_extu_i32_tl(cmp->c1, c1);
1274 }
1275 
1276 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1277 {
1278     static const TCGCond cond_reg[4] = {
1279         TCG_COND_NEVER,  /* reserved */
1280         TCG_COND_EQ,
1281         TCG_COND_LE,
1282         TCG_COND_LT,
1283     };
1284     TCGCond tcond;
1285 
1286     if ((cond & 3) == 0) {
1287         return false;
1288     }
1289     tcond = cond_reg[cond & 3];
1290     if (cond & 4) {
1291         tcond = tcg_invert_cond(tcond);
1292     }
1293 
1294     cmp->cond = tcond;
1295     cmp->c1 = tcg_temp_new();
1296     cmp->c2 = 0;
1297     tcg_gen_mov_tl(cmp->c1, r_src);
1298     return true;
1299 }
1300 
1301 static void gen_op_clear_ieee_excp_and_FTT(void)
1302 {
1303     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1304                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1305 }
1306 
1307 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1308 {
1309     gen_op_clear_ieee_excp_and_FTT();
1310     tcg_gen_mov_i32(dst, src);
1311 }
1312 
1313 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1314 {
1315     gen_op_clear_ieee_excp_and_FTT();
1316     tcg_gen_xori_i32(dst, src, 1u << 31);
1317 }
1318 
1319 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1320 {
1321     gen_op_clear_ieee_excp_and_FTT();
1322     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1323 }
1324 
1325 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1326 {
1327     gen_op_clear_ieee_excp_and_FTT();
1328     tcg_gen_mov_i64(dst, src);
1329 }
1330 
1331 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1332 {
1333     gen_op_clear_ieee_excp_and_FTT();
1334     tcg_gen_xori_i64(dst, src, 1ull << 63);
1335 }
1336 
1337 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1338 {
1339     gen_op_clear_ieee_excp_and_FTT();
1340     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1341 }
1342 
1343 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1344 {
1345     TCGv_i64 l = tcg_temp_new_i64();
1346     TCGv_i64 h = tcg_temp_new_i64();
1347 
1348     tcg_gen_extr_i128_i64(l, h, src);
1349     tcg_gen_xori_i64(h, h, 1ull << 63);
1350     tcg_gen_concat_i64_i128(dst, l, h);
1351 }
1352 
1353 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1354 {
1355     TCGv_i64 l = tcg_temp_new_i64();
1356     TCGv_i64 h = tcg_temp_new_i64();
1357 
1358     tcg_gen_extr_i128_i64(l, h, src);
1359     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1360     tcg_gen_concat_i64_i128(dst, l, h);
1361 }
1362 
1363 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1364 {
1365     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1366 }
1367 
1368 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1369 {
1370     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1371 }
1372 
1373 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1374 {
1375     int op = float_muladd_negate_c;
1376     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1377 }
1378 
1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1380 {
1381     int op = float_muladd_negate_c;
1382     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1383 }
1384 
1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1386 {
1387     int op = float_muladd_negate_c | float_muladd_negate_result;
1388     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1389 }
1390 
1391 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1392 {
1393     int op = float_muladd_negate_c | float_muladd_negate_result;
1394     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1395 }
1396 
1397 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1398 {
1399     int op = float_muladd_negate_result;
1400     gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1401 }
1402 
1403 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1404 {
1405     int op = float_muladd_negate_result;
1406     gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1407 }
1408 
1409 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1410 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1411 {
1412     TCGv_i32 one = tcg_constant_i32(float32_one);
1413     int op = float_muladd_halve_result;
1414     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1415 }
1416 
1417 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1418 {
1419     TCGv_i64 one = tcg_constant_i64(float64_one);
1420     int op = float_muladd_halve_result;
1421     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1422 }
1423 
1424 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1425 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1426 {
1427     TCGv_i32 one = tcg_constant_i32(float32_one);
1428     int op = float_muladd_negate_c | float_muladd_halve_result;
1429     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1430 }
1431 
1432 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1433 {
1434     TCGv_i64 one = tcg_constant_i64(float64_one);
1435     int op = float_muladd_negate_c | float_muladd_halve_result;
1436     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1437 }
1438 
1439 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1440 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1441 {
1442     TCGv_i32 one = tcg_constant_i32(float32_one);
1443     int op = float_muladd_negate_result | float_muladd_halve_result;
1444     gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1445 }
1446 
1447 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1448 {
1449     TCGv_i64 one = tcg_constant_i64(float64_one);
1450     int op = float_muladd_negate_result | float_muladd_halve_result;
1451     gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1452 }
1453 
1454 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1455 {
1456     /*
1457      * CEXC is only set when succesfully completing an FPop,
1458      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1459      * Thus we can simply store FTT into this field.
1460      */
1461     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1462                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1463     gen_exception(dc, TT_FP_EXCP);
1464 }
1465 
1466 static int gen_trap_ifnofpu(DisasContext *dc)
1467 {
1468 #if !defined(CONFIG_USER_ONLY)
1469     if (!dc->fpu_enabled) {
1470         gen_exception(dc, TT_NFPU_INSN);
1471         return 1;
1472     }
1473 #endif
1474     return 0;
1475 }
1476 
1477 /* asi moves */
1478 typedef enum {
1479     GET_ASI_HELPER,
1480     GET_ASI_EXCP,
1481     GET_ASI_DIRECT,
1482     GET_ASI_DTWINX,
1483     GET_ASI_CODE,
1484     GET_ASI_BLOCK,
1485     GET_ASI_SHORT,
1486     GET_ASI_BCOPY,
1487     GET_ASI_BFILL,
1488 } ASIType;
1489 
1490 typedef struct {
1491     ASIType type;
1492     int asi;
1493     int mem_idx;
1494     MemOp memop;
1495 } DisasASI;
1496 
1497 /*
1498  * Build DisasASI.
1499  * For asi == -1, treat as non-asi.
1500  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1501  */
1502 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1503 {
1504     ASIType type = GET_ASI_HELPER;
1505     int mem_idx = dc->mem_idx;
1506 
1507     if (asi == -1) {
1508         /* Artificial "non-asi" case. */
1509         type = GET_ASI_DIRECT;
1510         goto done;
1511     }
1512 
1513 #ifndef TARGET_SPARC64
1514     /* Before v9, all asis are immediate and privileged.  */
1515     if (asi < 0) {
1516         gen_exception(dc, TT_ILL_INSN);
1517         type = GET_ASI_EXCP;
1518     } else if (supervisor(dc)
1519                /* Note that LEON accepts ASI_USERDATA in user mode, for
1520                   use with CASA.  Also note that previous versions of
1521                   QEMU allowed (and old versions of gcc emitted) ASI_P
1522                   for LEON, which is incorrect.  */
1523                || (asi == ASI_USERDATA
1524                    && (dc->def->features & CPU_FEATURE_CASA))) {
1525         switch (asi) {
1526         case ASI_USERDATA:    /* User data access */
1527             mem_idx = MMU_USER_IDX;
1528             type = GET_ASI_DIRECT;
1529             break;
1530         case ASI_KERNELDATA:  /* Supervisor data access */
1531             mem_idx = MMU_KERNEL_IDX;
1532             type = GET_ASI_DIRECT;
1533             break;
1534         case ASI_USERTXT:     /* User text access */
1535             mem_idx = MMU_USER_IDX;
1536             type = GET_ASI_CODE;
1537             break;
1538         case ASI_KERNELTXT:   /* Supervisor text access */
1539             mem_idx = MMU_KERNEL_IDX;
1540             type = GET_ASI_CODE;
1541             break;
1542         case ASI_M_BYPASS:    /* MMU passthrough */
1543         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1544             mem_idx = MMU_PHYS_IDX;
1545             type = GET_ASI_DIRECT;
1546             break;
1547         case ASI_M_BCOPY: /* Block copy, sta access */
1548             mem_idx = MMU_KERNEL_IDX;
1549             type = GET_ASI_BCOPY;
1550             break;
1551         case ASI_M_BFILL: /* Block fill, stda access */
1552             mem_idx = MMU_KERNEL_IDX;
1553             type = GET_ASI_BFILL;
1554             break;
1555         }
1556 
1557         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1558          * permissions check in get_physical_address(..).
1559          */
1560         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1561     } else {
1562         gen_exception(dc, TT_PRIV_INSN);
1563         type = GET_ASI_EXCP;
1564     }
1565 #else
1566     if (asi < 0) {
1567         asi = dc->asi;
1568     }
1569     /* With v9, all asis below 0x80 are privileged.  */
1570     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1571        down that bit into DisasContext.  For the moment that's ok,
1572        since the direct implementations below doesn't have any ASIs
1573        in the restricted [0x30, 0x7f] range, and the check will be
1574        done properly in the helper.  */
1575     if (!supervisor(dc) && asi < 0x80) {
1576         gen_exception(dc, TT_PRIV_ACT);
1577         type = GET_ASI_EXCP;
1578     } else {
1579         switch (asi) {
1580         case ASI_REAL:      /* Bypass */
1581         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1582         case ASI_REAL_L:    /* Bypass LE */
1583         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1584         case ASI_TWINX_REAL:   /* Real address, twinx */
1585         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1586         case ASI_QUAD_LDD_PHYS:
1587         case ASI_QUAD_LDD_PHYS_L:
1588             mem_idx = MMU_PHYS_IDX;
1589             break;
1590         case ASI_N:  /* Nucleus */
1591         case ASI_NL: /* Nucleus LE */
1592         case ASI_TWINX_N:
1593         case ASI_TWINX_NL:
1594         case ASI_NUCLEUS_QUAD_LDD:
1595         case ASI_NUCLEUS_QUAD_LDD_L:
1596             if (hypervisor(dc)) {
1597                 mem_idx = MMU_PHYS_IDX;
1598             } else {
1599                 mem_idx = MMU_NUCLEUS_IDX;
1600             }
1601             break;
1602         case ASI_AIUP:  /* As if user primary */
1603         case ASI_AIUPL: /* As if user primary LE */
1604         case ASI_TWINX_AIUP:
1605         case ASI_TWINX_AIUP_L:
1606         case ASI_BLK_AIUP_4V:
1607         case ASI_BLK_AIUP_L_4V:
1608         case ASI_BLK_AIUP:
1609         case ASI_BLK_AIUPL:
1610             mem_idx = MMU_USER_IDX;
1611             break;
1612         case ASI_AIUS:  /* As if user secondary */
1613         case ASI_AIUSL: /* As if user secondary LE */
1614         case ASI_TWINX_AIUS:
1615         case ASI_TWINX_AIUS_L:
1616         case ASI_BLK_AIUS_4V:
1617         case ASI_BLK_AIUS_L_4V:
1618         case ASI_BLK_AIUS:
1619         case ASI_BLK_AIUSL:
1620             mem_idx = MMU_USER_SECONDARY_IDX;
1621             break;
1622         case ASI_S:  /* Secondary */
1623         case ASI_SL: /* Secondary LE */
1624         case ASI_TWINX_S:
1625         case ASI_TWINX_SL:
1626         case ASI_BLK_COMMIT_S:
1627         case ASI_BLK_S:
1628         case ASI_BLK_SL:
1629         case ASI_FL8_S:
1630         case ASI_FL8_SL:
1631         case ASI_FL16_S:
1632         case ASI_FL16_SL:
1633             if (mem_idx == MMU_USER_IDX) {
1634                 mem_idx = MMU_USER_SECONDARY_IDX;
1635             } else if (mem_idx == MMU_KERNEL_IDX) {
1636                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1637             }
1638             break;
1639         case ASI_P:  /* Primary */
1640         case ASI_PL: /* Primary LE */
1641         case ASI_TWINX_P:
1642         case ASI_TWINX_PL:
1643         case ASI_BLK_COMMIT_P:
1644         case ASI_BLK_P:
1645         case ASI_BLK_PL:
1646         case ASI_FL8_P:
1647         case ASI_FL8_PL:
1648         case ASI_FL16_P:
1649         case ASI_FL16_PL:
1650             break;
1651         }
1652         switch (asi) {
1653         case ASI_REAL:
1654         case ASI_REAL_IO:
1655         case ASI_REAL_L:
1656         case ASI_REAL_IO_L:
1657         case ASI_N:
1658         case ASI_NL:
1659         case ASI_AIUP:
1660         case ASI_AIUPL:
1661         case ASI_AIUS:
1662         case ASI_AIUSL:
1663         case ASI_S:
1664         case ASI_SL:
1665         case ASI_P:
1666         case ASI_PL:
1667             type = GET_ASI_DIRECT;
1668             break;
1669         case ASI_TWINX_REAL:
1670         case ASI_TWINX_REAL_L:
1671         case ASI_TWINX_N:
1672         case ASI_TWINX_NL:
1673         case ASI_TWINX_AIUP:
1674         case ASI_TWINX_AIUP_L:
1675         case ASI_TWINX_AIUS:
1676         case ASI_TWINX_AIUS_L:
1677         case ASI_TWINX_P:
1678         case ASI_TWINX_PL:
1679         case ASI_TWINX_S:
1680         case ASI_TWINX_SL:
1681         case ASI_QUAD_LDD_PHYS:
1682         case ASI_QUAD_LDD_PHYS_L:
1683         case ASI_NUCLEUS_QUAD_LDD:
1684         case ASI_NUCLEUS_QUAD_LDD_L:
1685             type = GET_ASI_DTWINX;
1686             break;
1687         case ASI_BLK_COMMIT_P:
1688         case ASI_BLK_COMMIT_S:
1689         case ASI_BLK_AIUP_4V:
1690         case ASI_BLK_AIUP_L_4V:
1691         case ASI_BLK_AIUP:
1692         case ASI_BLK_AIUPL:
1693         case ASI_BLK_AIUS_4V:
1694         case ASI_BLK_AIUS_L_4V:
1695         case ASI_BLK_AIUS:
1696         case ASI_BLK_AIUSL:
1697         case ASI_BLK_S:
1698         case ASI_BLK_SL:
1699         case ASI_BLK_P:
1700         case ASI_BLK_PL:
1701             type = GET_ASI_BLOCK;
1702             break;
1703         case ASI_FL8_S:
1704         case ASI_FL8_SL:
1705         case ASI_FL8_P:
1706         case ASI_FL8_PL:
1707             memop = MO_UB;
1708             type = GET_ASI_SHORT;
1709             break;
1710         case ASI_FL16_S:
1711         case ASI_FL16_SL:
1712         case ASI_FL16_P:
1713         case ASI_FL16_PL:
1714             memop = MO_TEUW;
1715             type = GET_ASI_SHORT;
1716             break;
1717         }
1718         /* The little-endian asis all have bit 3 set.  */
1719         if (asi & 8) {
1720             memop ^= MO_BSWAP;
1721         }
1722     }
1723 #endif
1724 
1725  done:
1726     return (DisasASI){ type, asi, mem_idx, memop };
1727 }
1728 
1729 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1730 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1731                               TCGv_i32 asi, TCGv_i32 mop)
1732 {
1733     g_assert_not_reached();
1734 }
1735 
1736 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1737                               TCGv_i32 asi, TCGv_i32 mop)
1738 {
1739     g_assert_not_reached();
1740 }
1741 #endif
1742 
1743 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1744 {
1745     switch (da->type) {
1746     case GET_ASI_EXCP:
1747         break;
1748     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1749         gen_exception(dc, TT_ILL_INSN);
1750         break;
1751     case GET_ASI_DIRECT:
1752         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1753         break;
1754 
1755     case GET_ASI_CODE:
1756 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1757         {
1758             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1759             TCGv_i64 t64 = tcg_temp_new_i64();
1760 
1761             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1762             tcg_gen_trunc_i64_tl(dst, t64);
1763         }
1764         break;
1765 #else
1766         g_assert_not_reached();
1767 #endif
1768 
1769     default:
1770         {
1771             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1772             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1773 
1774             save_state(dc);
1775 #ifdef TARGET_SPARC64
1776             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1777 #else
1778             {
1779                 TCGv_i64 t64 = tcg_temp_new_i64();
1780                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1781                 tcg_gen_trunc_i64_tl(dst, t64);
1782             }
1783 #endif
1784         }
1785         break;
1786     }
1787 }
1788 
1789 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1790 {
1791     switch (da->type) {
1792     case GET_ASI_EXCP:
1793         break;
1794 
1795     case GET_ASI_DTWINX: /* Reserved for stda.  */
1796         if (TARGET_LONG_BITS == 32) {
1797             gen_exception(dc, TT_ILL_INSN);
1798             break;
1799         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1800             /* Pre OpenSPARC CPUs don't have these */
1801             gen_exception(dc, TT_ILL_INSN);
1802             break;
1803         }
1804         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1805         /* fall through */
1806 
1807     case GET_ASI_DIRECT:
1808         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1809         break;
1810 
1811     case GET_ASI_BCOPY:
1812         assert(TARGET_LONG_BITS == 32);
1813         /*
1814          * Copy 32 bytes from the address in SRC to ADDR.
1815          *
1816          * From Ross RT625 hyperSPARC manual, section 4.6:
1817          * "Block Copy and Block Fill will work only on cache line boundaries."
1818          *
1819          * It does not specify if an unaliged address is truncated or trapped.
1820          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1821          * is obviously wrong.  The only place I can see this used is in the
1822          * Linux kernel which begins with page alignment, advancing by 32,
1823          * so is always aligned.  Assume truncation as the simpler option.
1824          *
1825          * Since the loads and stores are paired, allow the copy to happen
1826          * in the host endianness.  The copy need not be atomic.
1827          */
1828         {
1829             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1830             TCGv saddr = tcg_temp_new();
1831             TCGv daddr = tcg_temp_new();
1832             TCGv_i128 tmp = tcg_temp_new_i128();
1833 
1834             tcg_gen_andi_tl(saddr, src, -32);
1835             tcg_gen_andi_tl(daddr, addr, -32);
1836             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1837             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1838             tcg_gen_addi_tl(saddr, saddr, 16);
1839             tcg_gen_addi_tl(daddr, daddr, 16);
1840             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1841             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1842         }
1843         break;
1844 
1845     default:
1846         {
1847             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1848             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1849 
1850             save_state(dc);
1851 #ifdef TARGET_SPARC64
1852             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1853 #else
1854             {
1855                 TCGv_i64 t64 = tcg_temp_new_i64();
1856                 tcg_gen_extu_tl_i64(t64, src);
1857                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1858             }
1859 #endif
1860 
1861             /* A write to a TLB register may alter page maps.  End the TB. */
1862             dc->npc = DYNAMIC_PC;
1863         }
1864         break;
1865     }
1866 }
1867 
1868 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1869                          TCGv dst, TCGv src, TCGv addr)
1870 {
1871     switch (da->type) {
1872     case GET_ASI_EXCP:
1873         break;
1874     case GET_ASI_DIRECT:
1875         tcg_gen_atomic_xchg_tl(dst, addr, src,
1876                                da->mem_idx, da->memop | MO_ALIGN);
1877         break;
1878     default:
1879         /* ??? Should be DAE_invalid_asi.  */
1880         gen_exception(dc, TT_DATA_ACCESS);
1881         break;
1882     }
1883 }
1884 
1885 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1886                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1887 {
1888     switch (da->type) {
1889     case GET_ASI_EXCP:
1890         return;
1891     case GET_ASI_DIRECT:
1892         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1893                                   da->mem_idx, da->memop | MO_ALIGN);
1894         break;
1895     default:
1896         /* ??? Should be DAE_invalid_asi.  */
1897         gen_exception(dc, TT_DATA_ACCESS);
1898         break;
1899     }
1900 }
1901 
1902 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1903 {
1904     switch (da->type) {
1905     case GET_ASI_EXCP:
1906         break;
1907     case GET_ASI_DIRECT:
1908         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1909                                da->mem_idx, MO_UB);
1910         break;
1911     default:
1912         /* ??? In theory, this should be raise DAE_invalid_asi.
1913            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1914         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1915             gen_helper_exit_atomic(tcg_env);
1916         } else {
1917             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1918             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1919             TCGv_i64 s64, t64;
1920 
1921             save_state(dc);
1922             t64 = tcg_temp_new_i64();
1923             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1924 
1925             s64 = tcg_constant_i64(0xff);
1926             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1927 
1928             tcg_gen_trunc_i64_tl(dst, t64);
1929 
1930             /* End the TB.  */
1931             dc->npc = DYNAMIC_PC;
1932         }
1933         break;
1934     }
1935 }
1936 
1937 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1938                         TCGv addr, int rd)
1939 {
1940     MemOp memop = da->memop;
1941     MemOp size = memop & MO_SIZE;
1942     TCGv_i32 d32;
1943     TCGv_i64 d64, l64;
1944     TCGv addr_tmp;
1945 
1946     /* TODO: Use 128-bit load/store below. */
1947     if (size == MO_128) {
1948         memop = (memop & ~MO_SIZE) | MO_64;
1949     }
1950 
1951     switch (da->type) {
1952     case GET_ASI_EXCP:
1953         break;
1954 
1955     case GET_ASI_DIRECT:
1956         memop |= MO_ALIGN_4;
1957         switch (size) {
1958         case MO_32:
1959             d32 = tcg_temp_new_i32();
1960             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1961             gen_store_fpr_F(dc, rd, d32);
1962             break;
1963 
1964         case MO_64:
1965             d64 = tcg_temp_new_i64();
1966             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1967             gen_store_fpr_D(dc, rd, d64);
1968             break;
1969 
1970         case MO_128:
1971             d64 = tcg_temp_new_i64();
1972             l64 = tcg_temp_new_i64();
1973             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1974             addr_tmp = tcg_temp_new();
1975             tcg_gen_addi_tl(addr_tmp, addr, 8);
1976             tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1977             gen_store_fpr_D(dc, rd, d64);
1978             gen_store_fpr_D(dc, rd + 2, l64);
1979             break;
1980         default:
1981             g_assert_not_reached();
1982         }
1983         break;
1984 
1985     case GET_ASI_BLOCK:
1986         /* Valid for lddfa on aligned registers only.  */
1987         if (orig_size == MO_64 && (rd & 7) == 0) {
1988             /* The first operation checks required alignment.  */
1989             addr_tmp = tcg_temp_new();
1990             d64 = tcg_temp_new_i64();
1991             for (int i = 0; ; ++i) {
1992                 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
1993                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1994                 gen_store_fpr_D(dc, rd + 2 * i, d64);
1995                 if (i == 7) {
1996                     break;
1997                 }
1998                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1999                 addr = addr_tmp;
2000             }
2001         } else {
2002             gen_exception(dc, TT_ILL_INSN);
2003         }
2004         break;
2005 
2006     case GET_ASI_SHORT:
2007         /* Valid for lddfa only.  */
2008         if (orig_size == MO_64) {
2009             d64 = tcg_temp_new_i64();
2010             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2011             gen_store_fpr_D(dc, rd, d64);
2012         } else {
2013             gen_exception(dc, TT_ILL_INSN);
2014         }
2015         break;
2016 
2017     default:
2018         {
2019             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2020             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2021 
2022             save_state(dc);
2023             /* According to the table in the UA2011 manual, the only
2024                other asis that are valid for ldfa/lddfa/ldqfa are
2025                the NO_FAULT asis.  We still need a helper for these,
2026                but we can just use the integer asi helper for them.  */
2027             switch (size) {
2028             case MO_32:
2029                 d64 = tcg_temp_new_i64();
2030                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2031                 d32 = tcg_temp_new_i32();
2032                 tcg_gen_extrl_i64_i32(d32, d64);
2033                 gen_store_fpr_F(dc, rd, d32);
2034                 break;
2035             case MO_64:
2036                 d64 = tcg_temp_new_i64();
2037                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2038                 gen_store_fpr_D(dc, rd, d64);
2039                 break;
2040             case MO_128:
2041                 d64 = tcg_temp_new_i64();
2042                 l64 = tcg_temp_new_i64();
2043                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2044                 addr_tmp = tcg_temp_new();
2045                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2046                 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2047                 gen_store_fpr_D(dc, rd, d64);
2048                 gen_store_fpr_D(dc, rd + 2, l64);
2049                 break;
2050             default:
2051                 g_assert_not_reached();
2052             }
2053         }
2054         break;
2055     }
2056 }
2057 
2058 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2059                         TCGv addr, int rd)
2060 {
2061     MemOp memop = da->memop;
2062     MemOp size = memop & MO_SIZE;
2063     TCGv_i32 d32;
2064     TCGv_i64 d64;
2065     TCGv addr_tmp;
2066 
2067     /* TODO: Use 128-bit load/store below. */
2068     if (size == MO_128) {
2069         memop = (memop & ~MO_SIZE) | MO_64;
2070     }
2071 
2072     switch (da->type) {
2073     case GET_ASI_EXCP:
2074         break;
2075 
2076     case GET_ASI_DIRECT:
2077         memop |= MO_ALIGN_4;
2078         switch (size) {
2079         case MO_32:
2080             d32 = gen_load_fpr_F(dc, rd);
2081             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2082             break;
2083         case MO_64:
2084             d64 = gen_load_fpr_D(dc, rd);
2085             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2086             break;
2087         case MO_128:
2088             /* Only 4-byte alignment required.  However, it is legal for the
2089                cpu to signal the alignment fault, and the OS trap handler is
2090                required to fix it up.  Requiring 16-byte alignment here avoids
2091                having to probe the second page before performing the first
2092                write.  */
2093             d64 = gen_load_fpr_D(dc, rd);
2094             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2095             addr_tmp = tcg_temp_new();
2096             tcg_gen_addi_tl(addr_tmp, addr, 8);
2097             d64 = gen_load_fpr_D(dc, rd + 2);
2098             tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2099             break;
2100         default:
2101             g_assert_not_reached();
2102         }
2103         break;
2104 
2105     case GET_ASI_BLOCK:
2106         /* Valid for stdfa on aligned registers only.  */
2107         if (orig_size == MO_64 && (rd & 7) == 0) {
2108             /* The first operation checks required alignment.  */
2109             addr_tmp = tcg_temp_new();
2110             for (int i = 0; ; ++i) {
2111                 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2112                 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2113                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2114                 if (i == 7) {
2115                     break;
2116                 }
2117                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2118                 addr = addr_tmp;
2119             }
2120         } else {
2121             gen_exception(dc, TT_ILL_INSN);
2122         }
2123         break;
2124 
2125     case GET_ASI_SHORT:
2126         /* Valid for stdfa only.  */
2127         if (orig_size == MO_64) {
2128             d64 = gen_load_fpr_D(dc, rd);
2129             tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2130         } else {
2131             gen_exception(dc, TT_ILL_INSN);
2132         }
2133         break;
2134 
2135     default:
2136         /* According to the table in the UA2011 manual, the only
2137            other asis that are valid for ldfa/lddfa/ldqfa are
2138            the PST* asis, which aren't currently handled.  */
2139         gen_exception(dc, TT_ILL_INSN);
2140         break;
2141     }
2142 }
2143 
2144 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2145 {
2146     TCGv hi = gen_dest_gpr(dc, rd);
2147     TCGv lo = gen_dest_gpr(dc, rd + 1);
2148 
2149     switch (da->type) {
2150     case GET_ASI_EXCP:
2151         return;
2152 
2153     case GET_ASI_DTWINX:
2154 #ifdef TARGET_SPARC64
2155         {
2156             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2157             TCGv_i128 t = tcg_temp_new_i128();
2158 
2159             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2160             /*
2161              * Note that LE twinx acts as if each 64-bit register result is
2162              * byte swapped.  We perform one 128-bit LE load, so must swap
2163              * the order of the writebacks.
2164              */
2165             if ((mop & MO_BSWAP) == MO_TE) {
2166                 tcg_gen_extr_i128_i64(lo, hi, t);
2167             } else {
2168                 tcg_gen_extr_i128_i64(hi, lo, t);
2169             }
2170         }
2171         break;
2172 #else
2173         g_assert_not_reached();
2174 #endif
2175 
2176     case GET_ASI_DIRECT:
2177         {
2178             TCGv_i64 tmp = tcg_temp_new_i64();
2179 
2180             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2181 
2182             /* Note that LE ldda acts as if each 32-bit register
2183                result is byte swapped.  Having just performed one
2184                64-bit bswap, we need now to swap the writebacks.  */
2185             if ((da->memop & MO_BSWAP) == MO_TE) {
2186                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2187             } else {
2188                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2189             }
2190         }
2191         break;
2192 
2193     case GET_ASI_CODE:
2194 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2195         {
2196             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2197             TCGv_i64 tmp = tcg_temp_new_i64();
2198 
2199             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2200 
2201             /* See above.  */
2202             if ((da->memop & MO_BSWAP) == MO_TE) {
2203                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2204             } else {
2205                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2206             }
2207         }
2208         break;
2209 #else
2210         g_assert_not_reached();
2211 #endif
2212 
2213     default:
2214         /* ??? In theory we've handled all of the ASIs that are valid
2215            for ldda, and this should raise DAE_invalid_asi.  However,
2216            real hardware allows others.  This can be seen with e.g.
2217            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2218         {
2219             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2220             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2221             TCGv_i64 tmp = tcg_temp_new_i64();
2222 
2223             save_state(dc);
2224             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2225 
2226             /* See above.  */
2227             if ((da->memop & MO_BSWAP) == MO_TE) {
2228                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2229             } else {
2230                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2231             }
2232         }
2233         break;
2234     }
2235 
2236     gen_store_gpr(dc, rd, hi);
2237     gen_store_gpr(dc, rd + 1, lo);
2238 }
2239 
2240 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2241 {
2242     TCGv hi = gen_load_gpr(dc, rd);
2243     TCGv lo = gen_load_gpr(dc, rd + 1);
2244 
2245     switch (da->type) {
2246     case GET_ASI_EXCP:
2247         break;
2248 
2249     case GET_ASI_DTWINX:
2250 #ifdef TARGET_SPARC64
2251         {
2252             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2253             TCGv_i128 t = tcg_temp_new_i128();
2254 
2255             /*
2256              * Note that LE twinx acts as if each 64-bit register result is
2257              * byte swapped.  We perform one 128-bit LE store, so must swap
2258              * the order of the construction.
2259              */
2260             if ((mop & MO_BSWAP) == MO_TE) {
2261                 tcg_gen_concat_i64_i128(t, lo, hi);
2262             } else {
2263                 tcg_gen_concat_i64_i128(t, hi, lo);
2264             }
2265             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2266         }
2267         break;
2268 #else
2269         g_assert_not_reached();
2270 #endif
2271 
2272     case GET_ASI_DIRECT:
2273         {
2274             TCGv_i64 t64 = tcg_temp_new_i64();
2275 
2276             /* Note that LE stda acts as if each 32-bit register result is
2277                byte swapped.  We will perform one 64-bit LE store, so now
2278                we must swap the order of the construction.  */
2279             if ((da->memop & MO_BSWAP) == MO_TE) {
2280                 tcg_gen_concat_tl_i64(t64, lo, hi);
2281             } else {
2282                 tcg_gen_concat_tl_i64(t64, hi, lo);
2283             }
2284             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2285         }
2286         break;
2287 
2288     case GET_ASI_BFILL:
2289         assert(TARGET_LONG_BITS == 32);
2290         /*
2291          * Store 32 bytes of [rd:rd+1] to ADDR.
2292          * See comments for GET_ASI_COPY above.
2293          */
2294         {
2295             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2296             TCGv_i64 t8 = tcg_temp_new_i64();
2297             TCGv_i128 t16 = tcg_temp_new_i128();
2298             TCGv daddr = tcg_temp_new();
2299 
2300             tcg_gen_concat_tl_i64(t8, lo, hi);
2301             tcg_gen_concat_i64_i128(t16, t8, t8);
2302             tcg_gen_andi_tl(daddr, addr, -32);
2303             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2304             tcg_gen_addi_tl(daddr, daddr, 16);
2305             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2306         }
2307         break;
2308 
2309     default:
2310         /* ??? In theory we've handled all of the ASIs that are valid
2311            for stda, and this should raise DAE_invalid_asi.  */
2312         {
2313             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2314             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2315             TCGv_i64 t64 = tcg_temp_new_i64();
2316 
2317             /* See above.  */
2318             if ((da->memop & MO_BSWAP) == MO_TE) {
2319                 tcg_gen_concat_tl_i64(t64, lo, hi);
2320             } else {
2321                 tcg_gen_concat_tl_i64(t64, hi, lo);
2322             }
2323 
2324             save_state(dc);
2325             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2326         }
2327         break;
2328     }
2329 }
2330 
2331 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2332 {
2333 #ifdef TARGET_SPARC64
2334     TCGv_i32 c32, zero, dst, s1, s2;
2335     TCGv_i64 c64 = tcg_temp_new_i64();
2336 
2337     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2338        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2339        the later.  */
2340     c32 = tcg_temp_new_i32();
2341     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2342     tcg_gen_extrl_i64_i32(c32, c64);
2343 
2344     s1 = gen_load_fpr_F(dc, rs);
2345     s2 = gen_load_fpr_F(dc, rd);
2346     dst = tcg_temp_new_i32();
2347     zero = tcg_constant_i32(0);
2348 
2349     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2350 
2351     gen_store_fpr_F(dc, rd, dst);
2352 #else
2353     qemu_build_not_reached();
2354 #endif
2355 }
2356 
2357 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2358 {
2359 #ifdef TARGET_SPARC64
2360     TCGv_i64 dst = tcg_temp_new_i64();
2361     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2362                         gen_load_fpr_D(dc, rs),
2363                         gen_load_fpr_D(dc, rd));
2364     gen_store_fpr_D(dc, rd, dst);
2365 #else
2366     qemu_build_not_reached();
2367 #endif
2368 }
2369 
2370 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2371 {
2372 #ifdef TARGET_SPARC64
2373     TCGv c2 = tcg_constant_tl(cmp->c2);
2374     TCGv_i64 h = tcg_temp_new_i64();
2375     TCGv_i64 l = tcg_temp_new_i64();
2376 
2377     tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2378                         gen_load_fpr_D(dc, rs),
2379                         gen_load_fpr_D(dc, rd));
2380     tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2381                         gen_load_fpr_D(dc, rs + 2),
2382                         gen_load_fpr_D(dc, rd + 2));
2383     gen_store_fpr_D(dc, rd, h);
2384     gen_store_fpr_D(dc, rd + 2, l);
2385 #else
2386     qemu_build_not_reached();
2387 #endif
2388 }
2389 
2390 #ifdef TARGET_SPARC64
2391 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2392 {
2393     TCGv_i32 r_tl = tcg_temp_new_i32();
2394 
2395     /* load env->tl into r_tl */
2396     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2397 
2398     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2399     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2400 
2401     /* calculate offset to current trap state from env->ts, reuse r_tl */
2402     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2403     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2404 
2405     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2406     {
2407         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2408         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2409         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2410     }
2411 }
2412 #endif
2413 
2414 static int extract_dfpreg(DisasContext *dc, int x)
2415 {
2416     int r = x & 0x1e;
2417 #ifdef TARGET_SPARC64
2418     r |= (x & 1) << 5;
2419 #endif
2420     return r;
2421 }
2422 
2423 static int extract_qfpreg(DisasContext *dc, int x)
2424 {
2425     int r = x & 0x1c;
2426 #ifdef TARGET_SPARC64
2427     r |= (x & 1) << 5;
2428 #endif
2429     return r;
2430 }
2431 
2432 /* Include the auto-generated decoder.  */
2433 #include "decode-insns.c.inc"
2434 
2435 #define TRANS(NAME, AVAIL, FUNC, ...) \
2436     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2437     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2438 
2439 #define avail_ALL(C)      true
2440 #ifdef TARGET_SPARC64
2441 # define avail_32(C)      false
2442 # define avail_ASR17(C)   false
2443 # define avail_CASA(C)    true
2444 # define avail_DIV(C)     true
2445 # define avail_MUL(C)     true
2446 # define avail_POWERDOWN(C) false
2447 # define avail_64(C)      true
2448 # define avail_FMAF(C)    ((C)->def->features & CPU_FEATURE_FMAF)
2449 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2450 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2451 # define avail_IMA(C)     ((C)->def->features & CPU_FEATURE_IMA)
2452 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2453 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2454 # define avail_VIS3(C)    ((C)->def->features & CPU_FEATURE_VIS3)
2455 # define avail_VIS3B(C)   avail_VIS3(C)
2456 # define avail_VIS4(C)    ((C)->def->features & CPU_FEATURE_VIS4)
2457 #else
2458 # define avail_32(C)      true
2459 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2460 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2461 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2462 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2463 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2464 # define avail_64(C)      false
2465 # define avail_FMAF(C)    false
2466 # define avail_GL(C)      false
2467 # define avail_HYPV(C)    false
2468 # define avail_IMA(C)     false
2469 # define avail_VIS1(C)    false
2470 # define avail_VIS2(C)    false
2471 # define avail_VIS3(C)    false
2472 # define avail_VIS3B(C)   false
2473 # define avail_VIS4(C)    false
2474 #endif
2475 
2476 /* Default case for non jump instructions. */
2477 static bool advance_pc(DisasContext *dc)
2478 {
2479     TCGLabel *l1;
2480 
2481     finishing_insn(dc);
2482 
2483     if (dc->npc & 3) {
2484         switch (dc->npc) {
2485         case DYNAMIC_PC:
2486         case DYNAMIC_PC_LOOKUP:
2487             dc->pc = dc->npc;
2488             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2489             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2490             break;
2491 
2492         case JUMP_PC:
2493             /* we can do a static jump */
2494             l1 = gen_new_label();
2495             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2496 
2497             /* jump not taken */
2498             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2499 
2500             /* jump taken */
2501             gen_set_label(l1);
2502             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2503 
2504             dc->base.is_jmp = DISAS_NORETURN;
2505             break;
2506 
2507         default:
2508             g_assert_not_reached();
2509         }
2510     } else {
2511         dc->pc = dc->npc;
2512         dc->npc = dc->npc + 4;
2513     }
2514     return true;
2515 }
2516 
2517 /*
2518  * Major opcodes 00 and 01 -- branches, call, and sethi
2519  */
2520 
2521 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2522                               bool annul, int disp)
2523 {
2524     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2525     target_ulong npc;
2526 
2527     finishing_insn(dc);
2528 
2529     if (cmp->cond == TCG_COND_ALWAYS) {
2530         if (annul) {
2531             dc->pc = dest;
2532             dc->npc = dest + 4;
2533         } else {
2534             gen_mov_pc_npc(dc);
2535             dc->npc = dest;
2536         }
2537         return true;
2538     }
2539 
2540     if (cmp->cond == TCG_COND_NEVER) {
2541         npc = dc->npc;
2542         if (npc & 3) {
2543             gen_mov_pc_npc(dc);
2544             if (annul) {
2545                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2546             }
2547             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2548         } else {
2549             dc->pc = npc + (annul ? 4 : 0);
2550             dc->npc = dc->pc + 4;
2551         }
2552         return true;
2553     }
2554 
2555     flush_cond(dc);
2556     npc = dc->npc;
2557 
2558     if (annul) {
2559         TCGLabel *l1 = gen_new_label();
2560 
2561         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2562         gen_goto_tb(dc, 0, npc, dest);
2563         gen_set_label(l1);
2564         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2565 
2566         dc->base.is_jmp = DISAS_NORETURN;
2567     } else {
2568         if (npc & 3) {
2569             switch (npc) {
2570             case DYNAMIC_PC:
2571             case DYNAMIC_PC_LOOKUP:
2572                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2573                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2574                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2575                                    cmp->c1, tcg_constant_tl(cmp->c2),
2576                                    tcg_constant_tl(dest), cpu_npc);
2577                 dc->pc = npc;
2578                 break;
2579             default:
2580                 g_assert_not_reached();
2581             }
2582         } else {
2583             dc->pc = npc;
2584             dc->npc = JUMP_PC;
2585             dc->jump = *cmp;
2586             dc->jump_pc[0] = dest;
2587             dc->jump_pc[1] = npc + 4;
2588 
2589             /* The condition for cpu_cond is always NE -- normalize. */
2590             if (cmp->cond == TCG_COND_NE) {
2591                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2592             } else {
2593                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2594             }
2595             dc->cpu_cond_live = true;
2596         }
2597     }
2598     return true;
2599 }
2600 
2601 static bool raise_priv(DisasContext *dc)
2602 {
2603     gen_exception(dc, TT_PRIV_INSN);
2604     return true;
2605 }
2606 
2607 static bool raise_unimpfpop(DisasContext *dc)
2608 {
2609     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2610     return true;
2611 }
2612 
2613 static bool gen_trap_float128(DisasContext *dc)
2614 {
2615     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2616         return false;
2617     }
2618     return raise_unimpfpop(dc);
2619 }
2620 
2621 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2622 {
2623     DisasCompare cmp;
2624 
2625     gen_compare(&cmp, a->cc, a->cond, dc);
2626     return advance_jump_cond(dc, &cmp, a->a, a->i);
2627 }
2628 
2629 TRANS(Bicc, ALL, do_bpcc, a)
2630 TRANS(BPcc,  64, do_bpcc, a)
2631 
2632 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2633 {
2634     DisasCompare cmp;
2635 
2636     if (gen_trap_ifnofpu(dc)) {
2637         return true;
2638     }
2639     gen_fcompare(&cmp, a->cc, a->cond);
2640     return advance_jump_cond(dc, &cmp, a->a, a->i);
2641 }
2642 
2643 TRANS(FBPfcc,  64, do_fbpfcc, a)
2644 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2645 
2646 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2647 {
2648     DisasCompare cmp;
2649 
2650     if (!avail_64(dc)) {
2651         return false;
2652     }
2653     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2654         return false;
2655     }
2656     return advance_jump_cond(dc, &cmp, a->a, a->i);
2657 }
2658 
2659 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2660 {
2661     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2662 
2663     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2664     gen_mov_pc_npc(dc);
2665     dc->npc = target;
2666     return true;
2667 }
2668 
2669 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2670 {
2671     /*
2672      * For sparc32, always generate the no-coprocessor exception.
2673      * For sparc64, always generate illegal instruction.
2674      */
2675 #ifdef TARGET_SPARC64
2676     return false;
2677 #else
2678     gen_exception(dc, TT_NCP_INSN);
2679     return true;
2680 #endif
2681 }
2682 
2683 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2684 {
2685     /* Special-case %g0 because that's the canonical nop.  */
2686     if (a->rd) {
2687         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2688     }
2689     return advance_pc(dc);
2690 }
2691 
2692 /*
2693  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2694  */
2695 
2696 static bool do_tcc(DisasContext *dc, int cond, int cc,
2697                    int rs1, bool imm, int rs2_or_imm)
2698 {
2699     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2700                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2701     DisasCompare cmp;
2702     TCGLabel *lab;
2703     TCGv_i32 trap;
2704 
2705     /* Trap never.  */
2706     if (cond == 0) {
2707         return advance_pc(dc);
2708     }
2709 
2710     /*
2711      * Immediate traps are the most common case.  Since this value is
2712      * live across the branch, it really pays to evaluate the constant.
2713      */
2714     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2715         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2716     } else {
2717         trap = tcg_temp_new_i32();
2718         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2719         if (imm) {
2720             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2721         } else {
2722             TCGv_i32 t2 = tcg_temp_new_i32();
2723             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2724             tcg_gen_add_i32(trap, trap, t2);
2725         }
2726         tcg_gen_andi_i32(trap, trap, mask);
2727         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2728     }
2729 
2730     finishing_insn(dc);
2731 
2732     /* Trap always.  */
2733     if (cond == 8) {
2734         save_state(dc);
2735         gen_helper_raise_exception(tcg_env, trap);
2736         dc->base.is_jmp = DISAS_NORETURN;
2737         return true;
2738     }
2739 
2740     /* Conditional trap.  */
2741     flush_cond(dc);
2742     lab = delay_exceptionv(dc, trap);
2743     gen_compare(&cmp, cc, cond, dc);
2744     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2745 
2746     return advance_pc(dc);
2747 }
2748 
2749 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2750 {
2751     if (avail_32(dc) && a->cc) {
2752         return false;
2753     }
2754     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2755 }
2756 
2757 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2758 {
2759     if (avail_64(dc)) {
2760         return false;
2761     }
2762     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2763 }
2764 
2765 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2766 {
2767     if (avail_32(dc)) {
2768         return false;
2769     }
2770     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2771 }
2772 
2773 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2774 {
2775     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2776     return advance_pc(dc);
2777 }
2778 
2779 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2780 {
2781     if (avail_32(dc)) {
2782         return false;
2783     }
2784     if (a->mmask) {
2785         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2786         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2787     }
2788     if (a->cmask) {
2789         /* For #Sync, etc, end the TB to recognize interrupts. */
2790         dc->base.is_jmp = DISAS_EXIT;
2791     }
2792     return advance_pc(dc);
2793 }
2794 
2795 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2796                           TCGv (*func)(DisasContext *, TCGv))
2797 {
2798     if (!priv) {
2799         return raise_priv(dc);
2800     }
2801     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2802     return advance_pc(dc);
2803 }
2804 
2805 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2806 {
2807     return cpu_y;
2808 }
2809 
2810 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2811 {
2812     /*
2813      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2814      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2815      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2816      */
2817     if (avail_64(dc) && a->rs1 != 0) {
2818         return false;
2819     }
2820     return do_rd_special(dc, true, a->rd, do_rdy);
2821 }
2822 
2823 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2824 {
2825     gen_helper_rdasr17(dst, tcg_env);
2826     return dst;
2827 }
2828 
2829 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2830 
2831 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2832 {
2833     gen_helper_rdccr(dst, tcg_env);
2834     return dst;
2835 }
2836 
2837 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2838 
2839 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2840 {
2841 #ifdef TARGET_SPARC64
2842     return tcg_constant_tl(dc->asi);
2843 #else
2844     qemu_build_not_reached();
2845 #endif
2846 }
2847 
2848 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2849 
2850 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2851 {
2852     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2853 
2854     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2855     if (translator_io_start(&dc->base)) {
2856         dc->base.is_jmp = DISAS_EXIT;
2857     }
2858     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2859                               tcg_constant_i32(dc->mem_idx));
2860     return dst;
2861 }
2862 
2863 /* TODO: non-priv access only allowed when enabled. */
2864 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2865 
2866 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2867 {
2868     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2869 }
2870 
2871 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2872 
2873 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2874 {
2875     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2876     return dst;
2877 }
2878 
2879 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2880 
2881 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2882 {
2883     gen_trap_ifnofpu(dc);
2884     return cpu_gsr;
2885 }
2886 
2887 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2888 
2889 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2890 {
2891     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2892     return dst;
2893 }
2894 
2895 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2896 
2897 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2898 {
2899     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2900     return dst;
2901 }
2902 
2903 /* TODO: non-priv access only allowed when enabled. */
2904 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2905 
2906 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2907 {
2908     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2909 
2910     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2911     if (translator_io_start(&dc->base)) {
2912         dc->base.is_jmp = DISAS_EXIT;
2913     }
2914     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2915                               tcg_constant_i32(dc->mem_idx));
2916     return dst;
2917 }
2918 
2919 /* TODO: non-priv access only allowed when enabled. */
2920 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2921 
2922 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2923 {
2924     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2925     return dst;
2926 }
2927 
2928 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2929 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2930 
2931 /*
2932  * UltraSPARC-T1 Strand status.
2933  * HYPV check maybe not enough, UA2005 & UA2007 describe
2934  * this ASR as impl. dep
2935  */
2936 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2937 {
2938     return tcg_constant_tl(1);
2939 }
2940 
2941 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2942 
2943 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2944 {
2945     gen_helper_rdpsr(dst, tcg_env);
2946     return dst;
2947 }
2948 
2949 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2950 
2951 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2952 {
2953     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2954     return dst;
2955 }
2956 
2957 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2958 
2959 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2960 {
2961     TCGv_i32 tl = tcg_temp_new_i32();
2962     TCGv_ptr tp = tcg_temp_new_ptr();
2963 
2964     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2965     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2966     tcg_gen_shli_i32(tl, tl, 3);
2967     tcg_gen_ext_i32_ptr(tp, tl);
2968     tcg_gen_add_ptr(tp, tp, tcg_env);
2969 
2970     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2971     return dst;
2972 }
2973 
2974 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2975 
2976 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2977 {
2978     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2979     return dst;
2980 }
2981 
2982 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2983 
2984 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2985 {
2986     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2987     return dst;
2988 }
2989 
2990 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2991 
2992 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2993 {
2994     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2995     return dst;
2996 }
2997 
2998 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2999 
3000 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3001 {
3002     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3003     return dst;
3004 }
3005 
3006 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3007       do_rdhstick_cmpr)
3008 
3009 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3010 {
3011     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3012     return dst;
3013 }
3014 
3015 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3016 
3017 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3018 {
3019 #ifdef TARGET_SPARC64
3020     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3021 
3022     gen_load_trap_state_at_tl(r_tsptr);
3023     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3024     return dst;
3025 #else
3026     qemu_build_not_reached();
3027 #endif
3028 }
3029 
3030 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3031 
3032 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3033 {
3034 #ifdef TARGET_SPARC64
3035     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3036 
3037     gen_load_trap_state_at_tl(r_tsptr);
3038     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3039     return dst;
3040 #else
3041     qemu_build_not_reached();
3042 #endif
3043 }
3044 
3045 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3046 
3047 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3048 {
3049 #ifdef TARGET_SPARC64
3050     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3051 
3052     gen_load_trap_state_at_tl(r_tsptr);
3053     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3054     return dst;
3055 #else
3056     qemu_build_not_reached();
3057 #endif
3058 }
3059 
3060 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3061 
3062 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3063 {
3064 #ifdef TARGET_SPARC64
3065     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3066 
3067     gen_load_trap_state_at_tl(r_tsptr);
3068     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3069     return dst;
3070 #else
3071     qemu_build_not_reached();
3072 #endif
3073 }
3074 
3075 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3076 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3077 
3078 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3079 {
3080     return cpu_tbr;
3081 }
3082 
3083 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3084 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3085 
3086 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3087 {
3088     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3089     return dst;
3090 }
3091 
3092 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3093 
3094 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3095 {
3096     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3097     return dst;
3098 }
3099 
3100 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3101 
3102 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3103 {
3104     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3105     return dst;
3106 }
3107 
3108 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3109 
3110 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3111 {
3112     gen_helper_rdcwp(dst, tcg_env);
3113     return dst;
3114 }
3115 
3116 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3117 
3118 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3119 {
3120     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3121     return dst;
3122 }
3123 
3124 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3125 
3126 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3127 {
3128     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3129     return dst;
3130 }
3131 
3132 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3133       do_rdcanrestore)
3134 
3135 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3136 {
3137     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3138     return dst;
3139 }
3140 
3141 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3142 
3143 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3144 {
3145     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3146     return dst;
3147 }
3148 
3149 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3150 
3151 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3152 {
3153     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3154     return dst;
3155 }
3156 
3157 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3158 
3159 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3160 {
3161     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3162     return dst;
3163 }
3164 
3165 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3166 
3167 /* UA2005 strand status */
3168 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3169 {
3170     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3171     return dst;
3172 }
3173 
3174 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3175 
3176 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3177 {
3178     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3179     return dst;
3180 }
3181 
3182 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3183 
3184 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3185 {
3186     if (avail_64(dc)) {
3187         gen_helper_flushw(tcg_env);
3188         return advance_pc(dc);
3189     }
3190     return false;
3191 }
3192 
3193 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3194                           void (*func)(DisasContext *, TCGv))
3195 {
3196     TCGv src;
3197 
3198     /* For simplicity, we under-decoded the rs2 form. */
3199     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3200         return false;
3201     }
3202     if (!priv) {
3203         return raise_priv(dc);
3204     }
3205 
3206     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3207         src = tcg_constant_tl(a->rs2_or_imm);
3208     } else {
3209         TCGv src1 = gen_load_gpr(dc, a->rs1);
3210         if (a->rs2_or_imm == 0) {
3211             src = src1;
3212         } else {
3213             src = tcg_temp_new();
3214             if (a->imm) {
3215                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3216             } else {
3217                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3218             }
3219         }
3220     }
3221     func(dc, src);
3222     return advance_pc(dc);
3223 }
3224 
3225 static void do_wry(DisasContext *dc, TCGv src)
3226 {
3227     tcg_gen_ext32u_tl(cpu_y, src);
3228 }
3229 
3230 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3231 
3232 static void do_wrccr(DisasContext *dc, TCGv src)
3233 {
3234     gen_helper_wrccr(tcg_env, src);
3235 }
3236 
3237 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3238 
3239 static void do_wrasi(DisasContext *dc, TCGv src)
3240 {
3241     TCGv tmp = tcg_temp_new();
3242 
3243     tcg_gen_ext8u_tl(tmp, src);
3244     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3245     /* End TB to notice changed ASI. */
3246     dc->base.is_jmp = DISAS_EXIT;
3247 }
3248 
3249 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3250 
3251 static void do_wrfprs(DisasContext *dc, TCGv src)
3252 {
3253 #ifdef TARGET_SPARC64
3254     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3255     dc->fprs_dirty = 0;
3256     dc->base.is_jmp = DISAS_EXIT;
3257 #else
3258     qemu_build_not_reached();
3259 #endif
3260 }
3261 
3262 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3263 
3264 static void do_wrgsr(DisasContext *dc, TCGv src)
3265 {
3266     gen_trap_ifnofpu(dc);
3267     tcg_gen_mov_tl(cpu_gsr, src);
3268 }
3269 
3270 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3271 
3272 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3273 {
3274     gen_helper_set_softint(tcg_env, src);
3275 }
3276 
3277 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3278 
3279 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3280 {
3281     gen_helper_clear_softint(tcg_env, src);
3282 }
3283 
3284 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3285 
3286 static void do_wrsoftint(DisasContext *dc, TCGv src)
3287 {
3288     gen_helper_write_softint(tcg_env, src);
3289 }
3290 
3291 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3292 
3293 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3294 {
3295     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3296 
3297     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3298     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3299     translator_io_start(&dc->base);
3300     gen_helper_tick_set_limit(r_tickptr, src);
3301     /* End TB to handle timer interrupt */
3302     dc->base.is_jmp = DISAS_EXIT;
3303 }
3304 
3305 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3306 
3307 static void do_wrstick(DisasContext *dc, TCGv src)
3308 {
3309 #ifdef TARGET_SPARC64
3310     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3311 
3312     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3313     translator_io_start(&dc->base);
3314     gen_helper_tick_set_count(r_tickptr, src);
3315     /* End TB to handle timer interrupt */
3316     dc->base.is_jmp = DISAS_EXIT;
3317 #else
3318     qemu_build_not_reached();
3319 #endif
3320 }
3321 
3322 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3323 
3324 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3325 {
3326     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3327 
3328     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3329     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3330     translator_io_start(&dc->base);
3331     gen_helper_tick_set_limit(r_tickptr, src);
3332     /* End TB to handle timer interrupt */
3333     dc->base.is_jmp = DISAS_EXIT;
3334 }
3335 
3336 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3337 
3338 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3339 {
3340     finishing_insn(dc);
3341     save_state(dc);
3342     gen_helper_power_down(tcg_env);
3343 }
3344 
3345 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3346 
3347 static void do_wrmwait(DisasContext *dc, TCGv src)
3348 {
3349     /*
3350      * TODO: This is a stub version of mwait, which merely recognizes
3351      * interrupts immediately and does not wait.
3352      */
3353     dc->base.is_jmp = DISAS_EXIT;
3354 }
3355 
3356 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3357 
3358 static void do_wrpsr(DisasContext *dc, TCGv src)
3359 {
3360     gen_helper_wrpsr(tcg_env, src);
3361     dc->base.is_jmp = DISAS_EXIT;
3362 }
3363 
3364 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3365 
3366 static void do_wrwim(DisasContext *dc, TCGv src)
3367 {
3368     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3369     TCGv tmp = tcg_temp_new();
3370 
3371     tcg_gen_andi_tl(tmp, src, mask);
3372     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3373 }
3374 
3375 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3376 
3377 static void do_wrtpc(DisasContext *dc, TCGv src)
3378 {
3379 #ifdef TARGET_SPARC64
3380     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3381 
3382     gen_load_trap_state_at_tl(r_tsptr);
3383     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3384 #else
3385     qemu_build_not_reached();
3386 #endif
3387 }
3388 
3389 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3390 
3391 static void do_wrtnpc(DisasContext *dc, TCGv src)
3392 {
3393 #ifdef TARGET_SPARC64
3394     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3395 
3396     gen_load_trap_state_at_tl(r_tsptr);
3397     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3398 #else
3399     qemu_build_not_reached();
3400 #endif
3401 }
3402 
3403 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3404 
3405 static void do_wrtstate(DisasContext *dc, TCGv src)
3406 {
3407 #ifdef TARGET_SPARC64
3408     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3409 
3410     gen_load_trap_state_at_tl(r_tsptr);
3411     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3412 #else
3413     qemu_build_not_reached();
3414 #endif
3415 }
3416 
3417 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3418 
3419 static void do_wrtt(DisasContext *dc, TCGv src)
3420 {
3421 #ifdef TARGET_SPARC64
3422     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3423 
3424     gen_load_trap_state_at_tl(r_tsptr);
3425     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3426 #else
3427     qemu_build_not_reached();
3428 #endif
3429 }
3430 
3431 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3432 
3433 static void do_wrtick(DisasContext *dc, TCGv src)
3434 {
3435     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3436 
3437     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3438     translator_io_start(&dc->base);
3439     gen_helper_tick_set_count(r_tickptr, src);
3440     /* End TB to handle timer interrupt */
3441     dc->base.is_jmp = DISAS_EXIT;
3442 }
3443 
3444 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3445 
3446 static void do_wrtba(DisasContext *dc, TCGv src)
3447 {
3448     tcg_gen_mov_tl(cpu_tbr, src);
3449 }
3450 
3451 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3452 
3453 static void do_wrpstate(DisasContext *dc, TCGv src)
3454 {
3455     save_state(dc);
3456     if (translator_io_start(&dc->base)) {
3457         dc->base.is_jmp = DISAS_EXIT;
3458     }
3459     gen_helper_wrpstate(tcg_env, src);
3460     dc->npc = DYNAMIC_PC;
3461 }
3462 
3463 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3464 
3465 static void do_wrtl(DisasContext *dc, TCGv src)
3466 {
3467     save_state(dc);
3468     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3469     dc->npc = DYNAMIC_PC;
3470 }
3471 
3472 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3473 
3474 static void do_wrpil(DisasContext *dc, TCGv src)
3475 {
3476     if (translator_io_start(&dc->base)) {
3477         dc->base.is_jmp = DISAS_EXIT;
3478     }
3479     gen_helper_wrpil(tcg_env, src);
3480 }
3481 
3482 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3483 
3484 static void do_wrcwp(DisasContext *dc, TCGv src)
3485 {
3486     gen_helper_wrcwp(tcg_env, src);
3487 }
3488 
3489 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3490 
3491 static void do_wrcansave(DisasContext *dc, TCGv src)
3492 {
3493     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3494 }
3495 
3496 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3497 
3498 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3499 {
3500     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3501 }
3502 
3503 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3504 
3505 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3506 {
3507     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3508 }
3509 
3510 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3511 
3512 static void do_wrotherwin(DisasContext *dc, TCGv src)
3513 {
3514     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3515 }
3516 
3517 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3518 
3519 static void do_wrwstate(DisasContext *dc, TCGv src)
3520 {
3521     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3522 }
3523 
3524 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3525 
3526 static void do_wrgl(DisasContext *dc, TCGv src)
3527 {
3528     gen_helper_wrgl(tcg_env, src);
3529 }
3530 
3531 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3532 
3533 /* UA2005 strand status */
3534 static void do_wrssr(DisasContext *dc, TCGv src)
3535 {
3536     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3537 }
3538 
3539 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3540 
3541 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3542 
3543 static void do_wrhpstate(DisasContext *dc, TCGv src)
3544 {
3545     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3546     dc->base.is_jmp = DISAS_EXIT;
3547 }
3548 
3549 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3550 
3551 static void do_wrhtstate(DisasContext *dc, TCGv src)
3552 {
3553     TCGv_i32 tl = tcg_temp_new_i32();
3554     TCGv_ptr tp = tcg_temp_new_ptr();
3555 
3556     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3557     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3558     tcg_gen_shli_i32(tl, tl, 3);
3559     tcg_gen_ext_i32_ptr(tp, tl);
3560     tcg_gen_add_ptr(tp, tp, tcg_env);
3561 
3562     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3563 }
3564 
3565 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3566 
3567 static void do_wrhintp(DisasContext *dc, TCGv src)
3568 {
3569     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3570 }
3571 
3572 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3573 
3574 static void do_wrhtba(DisasContext *dc, TCGv src)
3575 {
3576     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3577 }
3578 
3579 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3580 
3581 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3582 {
3583     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3584 
3585     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3586     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3587     translator_io_start(&dc->base);
3588     gen_helper_tick_set_limit(r_tickptr, src);
3589     /* End TB to handle timer interrupt */
3590     dc->base.is_jmp = DISAS_EXIT;
3591 }
3592 
3593 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3594       do_wrhstick_cmpr)
3595 
3596 static bool do_saved_restored(DisasContext *dc, bool saved)
3597 {
3598     if (!supervisor(dc)) {
3599         return raise_priv(dc);
3600     }
3601     if (saved) {
3602         gen_helper_saved(tcg_env);
3603     } else {
3604         gen_helper_restored(tcg_env);
3605     }
3606     return advance_pc(dc);
3607 }
3608 
3609 TRANS(SAVED, 64, do_saved_restored, true)
3610 TRANS(RESTORED, 64, do_saved_restored, false)
3611 
3612 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3613 {
3614     return advance_pc(dc);
3615 }
3616 
3617 /*
3618  * TODO: Need a feature bit for sparcv8.
3619  * In the meantime, treat all 32-bit cpus like sparcv7.
3620  */
3621 TRANS(NOP_v7, 32, trans_NOP, a)
3622 TRANS(NOP_v9, 64, trans_NOP, a)
3623 
3624 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3625                          void (*func)(TCGv, TCGv, TCGv),
3626                          void (*funci)(TCGv, TCGv, target_long),
3627                          bool logic_cc)
3628 {
3629     TCGv dst, src1;
3630 
3631     /* For simplicity, we under-decoded the rs2 form. */
3632     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3633         return false;
3634     }
3635 
3636     if (logic_cc) {
3637         dst = cpu_cc_N;
3638     } else {
3639         dst = gen_dest_gpr(dc, a->rd);
3640     }
3641     src1 = gen_load_gpr(dc, a->rs1);
3642 
3643     if (a->imm || a->rs2_or_imm == 0) {
3644         if (funci) {
3645             funci(dst, src1, a->rs2_or_imm);
3646         } else {
3647             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3648         }
3649     } else {
3650         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3651     }
3652 
3653     if (logic_cc) {
3654         if (TARGET_LONG_BITS == 64) {
3655             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3656             tcg_gen_movi_tl(cpu_icc_C, 0);
3657         }
3658         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3659         tcg_gen_movi_tl(cpu_cc_C, 0);
3660         tcg_gen_movi_tl(cpu_cc_V, 0);
3661     }
3662 
3663     gen_store_gpr(dc, a->rd, dst);
3664     return advance_pc(dc);
3665 }
3666 
3667 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3668                      void (*func)(TCGv, TCGv, TCGv),
3669                      void (*funci)(TCGv, TCGv, target_long),
3670                      void (*func_cc)(TCGv, TCGv, TCGv))
3671 {
3672     if (a->cc) {
3673         return do_arith_int(dc, a, func_cc, NULL, false);
3674     }
3675     return do_arith_int(dc, a, func, funci, false);
3676 }
3677 
3678 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3679                      void (*func)(TCGv, TCGv, TCGv),
3680                      void (*funci)(TCGv, TCGv, target_long))
3681 {
3682     return do_arith_int(dc, a, func, funci, a->cc);
3683 }
3684 
3685 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3686 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3687 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3688 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3689 
3690 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3691 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3692 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3693 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3694 
3695 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3696 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3697 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3698 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3699 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3700 
3701 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3702 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3703 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3704 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3705 
3706 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3707 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3708 
3709 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3710 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3711 
3712 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3713 {
3714     /* OR with %g0 is the canonical alias for MOV. */
3715     if (!a->cc && a->rs1 == 0) {
3716         if (a->imm || a->rs2_or_imm == 0) {
3717             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3718         } else if (a->rs2_or_imm & ~0x1f) {
3719             /* For simplicity, we under-decoded the rs2 form. */
3720             return false;
3721         } else {
3722             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3723         }
3724         return advance_pc(dc);
3725     }
3726     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3727 }
3728 
3729 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3730 {
3731     TCGv_i64 t1, t2;
3732     TCGv dst;
3733 
3734     if (!avail_DIV(dc)) {
3735         return false;
3736     }
3737     /* For simplicity, we under-decoded the rs2 form. */
3738     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3739         return false;
3740     }
3741 
3742     if (unlikely(a->rs2_or_imm == 0)) {
3743         gen_exception(dc, TT_DIV_ZERO);
3744         return true;
3745     }
3746 
3747     if (a->imm) {
3748         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3749     } else {
3750         TCGLabel *lab;
3751         TCGv_i32 n2;
3752 
3753         finishing_insn(dc);
3754         flush_cond(dc);
3755 
3756         n2 = tcg_temp_new_i32();
3757         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3758 
3759         lab = delay_exception(dc, TT_DIV_ZERO);
3760         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3761 
3762         t2 = tcg_temp_new_i64();
3763 #ifdef TARGET_SPARC64
3764         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3765 #else
3766         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3767 #endif
3768     }
3769 
3770     t1 = tcg_temp_new_i64();
3771     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3772 
3773     tcg_gen_divu_i64(t1, t1, t2);
3774     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3775 
3776     dst = gen_dest_gpr(dc, a->rd);
3777     tcg_gen_trunc_i64_tl(dst, t1);
3778     gen_store_gpr(dc, a->rd, dst);
3779     return advance_pc(dc);
3780 }
3781 
3782 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3783 {
3784     TCGv dst, src1, src2;
3785 
3786     if (!avail_64(dc)) {
3787         return false;
3788     }
3789     /* For simplicity, we under-decoded the rs2 form. */
3790     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3791         return false;
3792     }
3793 
3794     if (unlikely(a->rs2_or_imm == 0)) {
3795         gen_exception(dc, TT_DIV_ZERO);
3796         return true;
3797     }
3798 
3799     if (a->imm) {
3800         src2 = tcg_constant_tl(a->rs2_or_imm);
3801     } else {
3802         TCGLabel *lab;
3803 
3804         finishing_insn(dc);
3805         flush_cond(dc);
3806 
3807         lab = delay_exception(dc, TT_DIV_ZERO);
3808         src2 = cpu_regs[a->rs2_or_imm];
3809         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3810     }
3811 
3812     dst = gen_dest_gpr(dc, a->rd);
3813     src1 = gen_load_gpr(dc, a->rs1);
3814 
3815     tcg_gen_divu_tl(dst, src1, src2);
3816     gen_store_gpr(dc, a->rd, dst);
3817     return advance_pc(dc);
3818 }
3819 
3820 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3821 {
3822     TCGv dst, src1, src2;
3823 
3824     if (!avail_64(dc)) {
3825         return false;
3826     }
3827     /* For simplicity, we under-decoded the rs2 form. */
3828     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3829         return false;
3830     }
3831 
3832     if (unlikely(a->rs2_or_imm == 0)) {
3833         gen_exception(dc, TT_DIV_ZERO);
3834         return true;
3835     }
3836 
3837     dst = gen_dest_gpr(dc, a->rd);
3838     src1 = gen_load_gpr(dc, a->rs1);
3839 
3840     if (a->imm) {
3841         if (unlikely(a->rs2_or_imm == -1)) {
3842             tcg_gen_neg_tl(dst, src1);
3843             gen_store_gpr(dc, a->rd, dst);
3844             return advance_pc(dc);
3845         }
3846         src2 = tcg_constant_tl(a->rs2_or_imm);
3847     } else {
3848         TCGLabel *lab;
3849         TCGv t1, t2;
3850 
3851         finishing_insn(dc);
3852         flush_cond(dc);
3853 
3854         lab = delay_exception(dc, TT_DIV_ZERO);
3855         src2 = cpu_regs[a->rs2_or_imm];
3856         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3857 
3858         /*
3859          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3860          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3861          */
3862         t1 = tcg_temp_new();
3863         t2 = tcg_temp_new();
3864         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3865         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3866         tcg_gen_and_tl(t1, t1, t2);
3867         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3868                            tcg_constant_tl(1), src2);
3869         src2 = t1;
3870     }
3871 
3872     tcg_gen_div_tl(dst, src1, src2);
3873     gen_store_gpr(dc, a->rd, dst);
3874     return advance_pc(dc);
3875 }
3876 
3877 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3878                      int width, bool cc, bool little_endian)
3879 {
3880     TCGv dst, s1, s2, l, r, t, m;
3881     uint64_t amask = address_mask_i(dc, -8);
3882 
3883     dst = gen_dest_gpr(dc, a->rd);
3884     s1 = gen_load_gpr(dc, a->rs1);
3885     s2 = gen_load_gpr(dc, a->rs2);
3886 
3887     if (cc) {
3888         gen_op_subcc(cpu_cc_N, s1, s2);
3889     }
3890 
3891     l = tcg_temp_new();
3892     r = tcg_temp_new();
3893     t = tcg_temp_new();
3894 
3895     switch (width) {
3896     case 8:
3897         tcg_gen_andi_tl(l, s1, 7);
3898         tcg_gen_andi_tl(r, s2, 7);
3899         tcg_gen_xori_tl(r, r, 7);
3900         m = tcg_constant_tl(0xff);
3901         break;
3902     case 16:
3903         tcg_gen_extract_tl(l, s1, 1, 2);
3904         tcg_gen_extract_tl(r, s2, 1, 2);
3905         tcg_gen_xori_tl(r, r, 3);
3906         m = tcg_constant_tl(0xf);
3907         break;
3908     case 32:
3909         tcg_gen_extract_tl(l, s1, 2, 1);
3910         tcg_gen_extract_tl(r, s2, 2, 1);
3911         tcg_gen_xori_tl(r, r, 1);
3912         m = tcg_constant_tl(0x3);
3913         break;
3914     default:
3915         abort();
3916     }
3917 
3918     /* Compute Left Edge */
3919     if (little_endian) {
3920         tcg_gen_shl_tl(l, m, l);
3921         tcg_gen_and_tl(l, l, m);
3922     } else {
3923         tcg_gen_shr_tl(l, m, l);
3924     }
3925     /* Compute Right Edge */
3926     if (little_endian) {
3927         tcg_gen_shr_tl(r, m, r);
3928     } else {
3929         tcg_gen_shl_tl(r, m, r);
3930         tcg_gen_and_tl(r, r, m);
3931     }
3932 
3933     /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3934     tcg_gen_xor_tl(t, s1, s2);
3935     tcg_gen_and_tl(r, r, l);
3936     tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3937 
3938     gen_store_gpr(dc, a->rd, dst);
3939     return advance_pc(dc);
3940 }
3941 
3942 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3943 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3944 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3945 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3946 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3947 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3948 
3949 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3950 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3951 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3952 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3953 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3954 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3955 
3956 static bool do_rr(DisasContext *dc, arg_r_r *a,
3957                   void (*func)(TCGv, TCGv))
3958 {
3959     TCGv dst = gen_dest_gpr(dc, a->rd);
3960     TCGv src = gen_load_gpr(dc, a->rs);
3961 
3962     func(dst, src);
3963     gen_store_gpr(dc, a->rd, dst);
3964     return advance_pc(dc);
3965 }
3966 
3967 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3968 
3969 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3970                    void (*func)(TCGv, TCGv, TCGv))
3971 {
3972     TCGv dst = gen_dest_gpr(dc, a->rd);
3973     TCGv src1 = gen_load_gpr(dc, a->rs1);
3974     TCGv src2 = gen_load_gpr(dc, a->rs2);
3975 
3976     func(dst, src1, src2);
3977     gen_store_gpr(dc, a->rd, dst);
3978     return advance_pc(dc);
3979 }
3980 
3981 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3982 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3983 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3984 
3985 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3986 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3987 
3988 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
3989 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
3990 
3991 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
3992 
3993 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3994 {
3995 #ifdef TARGET_SPARC64
3996     TCGv tmp = tcg_temp_new();
3997 
3998     tcg_gen_add_tl(tmp, s1, s2);
3999     tcg_gen_andi_tl(dst, tmp, -8);
4000     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4001 #else
4002     g_assert_not_reached();
4003 #endif
4004 }
4005 
4006 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4007 {
4008 #ifdef TARGET_SPARC64
4009     TCGv tmp = tcg_temp_new();
4010 
4011     tcg_gen_add_tl(tmp, s1, s2);
4012     tcg_gen_andi_tl(dst, tmp, -8);
4013     tcg_gen_neg_tl(tmp, tmp);
4014     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4015 #else
4016     g_assert_not_reached();
4017 #endif
4018 }
4019 
4020 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4021 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4022 
4023 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4024 {
4025 #ifdef TARGET_SPARC64
4026     tcg_gen_add_tl(dst, s1, s2);
4027     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4028 #else
4029     g_assert_not_reached();
4030 #endif
4031 }
4032 
4033 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4034 
4035 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4036 {
4037     func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4038     return true;
4039 }
4040 
4041 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4042 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4043 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4044 
4045 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4046 {
4047     TCGv dst, src1, src2;
4048 
4049     /* Reject 64-bit shifts for sparc32. */
4050     if (avail_32(dc) && a->x) {
4051         return false;
4052     }
4053 
4054     src2 = tcg_temp_new();
4055     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4056     src1 = gen_load_gpr(dc, a->rs1);
4057     dst = gen_dest_gpr(dc, a->rd);
4058 
4059     if (l) {
4060         tcg_gen_shl_tl(dst, src1, src2);
4061         if (!a->x) {
4062             tcg_gen_ext32u_tl(dst, dst);
4063         }
4064     } else if (u) {
4065         if (!a->x) {
4066             tcg_gen_ext32u_tl(dst, src1);
4067             src1 = dst;
4068         }
4069         tcg_gen_shr_tl(dst, src1, src2);
4070     } else {
4071         if (!a->x) {
4072             tcg_gen_ext32s_tl(dst, src1);
4073             src1 = dst;
4074         }
4075         tcg_gen_sar_tl(dst, src1, src2);
4076     }
4077     gen_store_gpr(dc, a->rd, dst);
4078     return advance_pc(dc);
4079 }
4080 
4081 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4082 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4083 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4084 
4085 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4086 {
4087     TCGv dst, src1;
4088 
4089     /* Reject 64-bit shifts for sparc32. */
4090     if (avail_32(dc) && (a->x || a->i >= 32)) {
4091         return false;
4092     }
4093 
4094     src1 = gen_load_gpr(dc, a->rs1);
4095     dst = gen_dest_gpr(dc, a->rd);
4096 
4097     if (avail_32(dc) || a->x) {
4098         if (l) {
4099             tcg_gen_shli_tl(dst, src1, a->i);
4100         } else if (u) {
4101             tcg_gen_shri_tl(dst, src1, a->i);
4102         } else {
4103             tcg_gen_sari_tl(dst, src1, a->i);
4104         }
4105     } else {
4106         if (l) {
4107             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4108         } else if (u) {
4109             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4110         } else {
4111             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4112         }
4113     }
4114     gen_store_gpr(dc, a->rd, dst);
4115     return advance_pc(dc);
4116 }
4117 
4118 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4119 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4120 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4121 
4122 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4123 {
4124     /* For simplicity, we under-decoded the rs2 form. */
4125     if (!imm && rs2_or_imm & ~0x1f) {
4126         return NULL;
4127     }
4128     if (imm || rs2_or_imm == 0) {
4129         return tcg_constant_tl(rs2_or_imm);
4130     } else {
4131         return cpu_regs[rs2_or_imm];
4132     }
4133 }
4134 
4135 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4136 {
4137     TCGv dst = gen_load_gpr(dc, rd);
4138     TCGv c2 = tcg_constant_tl(cmp->c2);
4139 
4140     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4141     gen_store_gpr(dc, rd, dst);
4142     return advance_pc(dc);
4143 }
4144 
4145 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4146 {
4147     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4148     DisasCompare cmp;
4149 
4150     if (src2 == NULL) {
4151         return false;
4152     }
4153     gen_compare(&cmp, a->cc, a->cond, dc);
4154     return do_mov_cond(dc, &cmp, a->rd, src2);
4155 }
4156 
4157 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4158 {
4159     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4160     DisasCompare cmp;
4161 
4162     if (src2 == NULL) {
4163         return false;
4164     }
4165     gen_fcompare(&cmp, a->cc, a->cond);
4166     return do_mov_cond(dc, &cmp, a->rd, src2);
4167 }
4168 
4169 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4170 {
4171     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4172     DisasCompare cmp;
4173 
4174     if (src2 == NULL) {
4175         return false;
4176     }
4177     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4178         return false;
4179     }
4180     return do_mov_cond(dc, &cmp, a->rd, src2);
4181 }
4182 
4183 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4184                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4185 {
4186     TCGv src1, sum;
4187 
4188     /* For simplicity, we under-decoded the rs2 form. */
4189     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4190         return false;
4191     }
4192 
4193     /*
4194      * Always load the sum into a new temporary.
4195      * This is required to capture the value across a window change,
4196      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4197      */
4198     sum = tcg_temp_new();
4199     src1 = gen_load_gpr(dc, a->rs1);
4200     if (a->imm || a->rs2_or_imm == 0) {
4201         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4202     } else {
4203         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4204     }
4205     return func(dc, a->rd, sum);
4206 }
4207 
4208 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4209 {
4210     /*
4211      * Preserve pc across advance, so that we can delay
4212      * the writeback to rd until after src is consumed.
4213      */
4214     target_ulong cur_pc = dc->pc;
4215 
4216     gen_check_align(dc, src, 3);
4217 
4218     gen_mov_pc_npc(dc);
4219     tcg_gen_mov_tl(cpu_npc, src);
4220     gen_address_mask(dc, cpu_npc);
4221     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4222 
4223     dc->npc = DYNAMIC_PC_LOOKUP;
4224     return true;
4225 }
4226 
4227 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4228 
4229 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4230 {
4231     if (!supervisor(dc)) {
4232         return raise_priv(dc);
4233     }
4234 
4235     gen_check_align(dc, src, 3);
4236 
4237     gen_mov_pc_npc(dc);
4238     tcg_gen_mov_tl(cpu_npc, src);
4239     gen_helper_rett(tcg_env);
4240 
4241     dc->npc = DYNAMIC_PC;
4242     return true;
4243 }
4244 
4245 TRANS(RETT, 32, do_add_special, a, do_rett)
4246 
4247 static bool do_return(DisasContext *dc, int rd, TCGv src)
4248 {
4249     gen_check_align(dc, src, 3);
4250     gen_helper_restore(tcg_env);
4251 
4252     gen_mov_pc_npc(dc);
4253     tcg_gen_mov_tl(cpu_npc, src);
4254     gen_address_mask(dc, cpu_npc);
4255 
4256     dc->npc = DYNAMIC_PC_LOOKUP;
4257     return true;
4258 }
4259 
4260 TRANS(RETURN, 64, do_add_special, a, do_return)
4261 
4262 static bool do_save(DisasContext *dc, int rd, TCGv src)
4263 {
4264     gen_helper_save(tcg_env);
4265     gen_store_gpr(dc, rd, src);
4266     return advance_pc(dc);
4267 }
4268 
4269 TRANS(SAVE, ALL, do_add_special, a, do_save)
4270 
4271 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4272 {
4273     gen_helper_restore(tcg_env);
4274     gen_store_gpr(dc, rd, src);
4275     return advance_pc(dc);
4276 }
4277 
4278 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4279 
4280 static bool do_done_retry(DisasContext *dc, bool done)
4281 {
4282     if (!supervisor(dc)) {
4283         return raise_priv(dc);
4284     }
4285     dc->npc = DYNAMIC_PC;
4286     dc->pc = DYNAMIC_PC;
4287     translator_io_start(&dc->base);
4288     if (done) {
4289         gen_helper_done(tcg_env);
4290     } else {
4291         gen_helper_retry(tcg_env);
4292     }
4293     return true;
4294 }
4295 
4296 TRANS(DONE, 64, do_done_retry, true)
4297 TRANS(RETRY, 64, do_done_retry, false)
4298 
4299 /*
4300  * Major opcode 11 -- load and store instructions
4301  */
4302 
4303 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4304 {
4305     TCGv addr, tmp = NULL;
4306 
4307     /* For simplicity, we under-decoded the rs2 form. */
4308     if (!imm && rs2_or_imm & ~0x1f) {
4309         return NULL;
4310     }
4311 
4312     addr = gen_load_gpr(dc, rs1);
4313     if (rs2_or_imm) {
4314         tmp = tcg_temp_new();
4315         if (imm) {
4316             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4317         } else {
4318             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4319         }
4320         addr = tmp;
4321     }
4322     if (AM_CHECK(dc)) {
4323         if (!tmp) {
4324             tmp = tcg_temp_new();
4325         }
4326         tcg_gen_ext32u_tl(tmp, addr);
4327         addr = tmp;
4328     }
4329     return addr;
4330 }
4331 
4332 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4333 {
4334     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4335     DisasASI da;
4336 
4337     if (addr == NULL) {
4338         return false;
4339     }
4340     da = resolve_asi(dc, a->asi, mop);
4341 
4342     reg = gen_dest_gpr(dc, a->rd);
4343     gen_ld_asi(dc, &da, reg, addr);
4344     gen_store_gpr(dc, a->rd, reg);
4345     return advance_pc(dc);
4346 }
4347 
4348 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4349 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4350 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4351 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4352 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4353 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4354 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4355 
4356 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4357 {
4358     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4359     DisasASI da;
4360 
4361     if (addr == NULL) {
4362         return false;
4363     }
4364     da = resolve_asi(dc, a->asi, mop);
4365 
4366     reg = gen_load_gpr(dc, a->rd);
4367     gen_st_asi(dc, &da, reg, addr);
4368     return advance_pc(dc);
4369 }
4370 
4371 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4372 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4373 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4374 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4375 
4376 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4377 {
4378     TCGv addr;
4379     DisasASI da;
4380 
4381     if (a->rd & 1) {
4382         return false;
4383     }
4384     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4385     if (addr == NULL) {
4386         return false;
4387     }
4388     da = resolve_asi(dc, a->asi, MO_TEUQ);
4389     gen_ldda_asi(dc, &da, addr, a->rd);
4390     return advance_pc(dc);
4391 }
4392 
4393 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4394 {
4395     TCGv addr;
4396     DisasASI da;
4397 
4398     if (a->rd & 1) {
4399         return false;
4400     }
4401     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4402     if (addr == NULL) {
4403         return false;
4404     }
4405     da = resolve_asi(dc, a->asi, MO_TEUQ);
4406     gen_stda_asi(dc, &da, addr, a->rd);
4407     return advance_pc(dc);
4408 }
4409 
4410 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4411 {
4412     TCGv addr, reg;
4413     DisasASI da;
4414 
4415     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4416     if (addr == NULL) {
4417         return false;
4418     }
4419     da = resolve_asi(dc, a->asi, MO_UB);
4420 
4421     reg = gen_dest_gpr(dc, a->rd);
4422     gen_ldstub_asi(dc, &da, reg, addr);
4423     gen_store_gpr(dc, a->rd, reg);
4424     return advance_pc(dc);
4425 }
4426 
4427 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4428 {
4429     TCGv addr, dst, src;
4430     DisasASI da;
4431 
4432     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4433     if (addr == NULL) {
4434         return false;
4435     }
4436     da = resolve_asi(dc, a->asi, MO_TEUL);
4437 
4438     dst = gen_dest_gpr(dc, a->rd);
4439     src = gen_load_gpr(dc, a->rd);
4440     gen_swap_asi(dc, &da, dst, src, addr);
4441     gen_store_gpr(dc, a->rd, dst);
4442     return advance_pc(dc);
4443 }
4444 
4445 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4446 {
4447     TCGv addr, o, n, c;
4448     DisasASI da;
4449 
4450     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4451     if (addr == NULL) {
4452         return false;
4453     }
4454     da = resolve_asi(dc, a->asi, mop);
4455 
4456     o = gen_dest_gpr(dc, a->rd);
4457     n = gen_load_gpr(dc, a->rd);
4458     c = gen_load_gpr(dc, a->rs2_or_imm);
4459     gen_cas_asi(dc, &da, o, n, c, addr);
4460     gen_store_gpr(dc, a->rd, o);
4461     return advance_pc(dc);
4462 }
4463 
4464 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4465 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4466 
4467 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4468 {
4469     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4470     DisasASI da;
4471 
4472     if (addr == NULL) {
4473         return false;
4474     }
4475     if (gen_trap_ifnofpu(dc)) {
4476         return true;
4477     }
4478     if (sz == MO_128 && gen_trap_float128(dc)) {
4479         return true;
4480     }
4481     da = resolve_asi(dc, a->asi, MO_TE | sz);
4482     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4483     gen_update_fprs_dirty(dc, a->rd);
4484     return advance_pc(dc);
4485 }
4486 
4487 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4488 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4489 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4490 
4491 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4492 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4493 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4494 
4495 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4496 {
4497     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4498     DisasASI da;
4499 
4500     if (addr == NULL) {
4501         return false;
4502     }
4503     if (gen_trap_ifnofpu(dc)) {
4504         return true;
4505     }
4506     if (sz == MO_128 && gen_trap_float128(dc)) {
4507         return true;
4508     }
4509     da = resolve_asi(dc, a->asi, MO_TE | sz);
4510     gen_stf_asi(dc, &da, sz, addr, a->rd);
4511     return advance_pc(dc);
4512 }
4513 
4514 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4515 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4516 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4517 
4518 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4519 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4520 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4521 
4522 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4523 {
4524     if (!avail_32(dc)) {
4525         return false;
4526     }
4527     if (!supervisor(dc)) {
4528         return raise_priv(dc);
4529     }
4530     if (gen_trap_ifnofpu(dc)) {
4531         return true;
4532     }
4533     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4534     return true;
4535 }
4536 
4537 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4538 {
4539     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4540     TCGv_i32 tmp;
4541 
4542     if (addr == NULL) {
4543         return false;
4544     }
4545     if (gen_trap_ifnofpu(dc)) {
4546         return true;
4547     }
4548 
4549     tmp = tcg_temp_new_i32();
4550     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4551 
4552     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4553     /* LDFSR does not change FCC[1-3]. */
4554 
4555     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4556     return advance_pc(dc);
4557 }
4558 
4559 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4560 {
4561 #ifdef TARGET_SPARC64
4562     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4563     TCGv_i64 t64;
4564     TCGv_i32 lo, hi;
4565 
4566     if (addr == NULL) {
4567         return false;
4568     }
4569     if (gen_trap_ifnofpu(dc)) {
4570         return true;
4571     }
4572 
4573     t64 = tcg_temp_new_i64();
4574     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4575 
4576     lo = tcg_temp_new_i32();
4577     hi = cpu_fcc[3];
4578     tcg_gen_extr_i64_i32(lo, hi, t64);
4579     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4580     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4581     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4582     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4583 
4584     if (entire) {
4585         gen_helper_set_fsr_nofcc(tcg_env, lo);
4586     } else {
4587         gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4588     }
4589     return advance_pc(dc);
4590 #else
4591     return false;
4592 #endif
4593 }
4594 
4595 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4596 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4597 
4598 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4599 {
4600     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4601     TCGv fsr;
4602 
4603     if (addr == NULL) {
4604         return false;
4605     }
4606     if (gen_trap_ifnofpu(dc)) {
4607         return true;
4608     }
4609 
4610     fsr = tcg_temp_new();
4611     gen_helper_get_fsr(fsr, tcg_env);
4612     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4613     return advance_pc(dc);
4614 }
4615 
4616 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4617 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4618 
4619 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4620 {
4621     if (gen_trap_ifnofpu(dc)) {
4622         return true;
4623     }
4624     gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4625     return advance_pc(dc);
4626 }
4627 
4628 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4629 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4630 
4631 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4632 {
4633     if (gen_trap_ifnofpu(dc)) {
4634         return true;
4635     }
4636     gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4637     return advance_pc(dc);
4638 }
4639 
4640 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4641 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4642 
4643 static bool do_ff(DisasContext *dc, arg_r_r *a,
4644                   void (*func)(TCGv_i32, TCGv_i32))
4645 {
4646     TCGv_i32 tmp;
4647 
4648     if (gen_trap_ifnofpu(dc)) {
4649         return true;
4650     }
4651 
4652     tmp = gen_load_fpr_F(dc, a->rs);
4653     func(tmp, tmp);
4654     gen_store_fpr_F(dc, a->rd, tmp);
4655     return advance_pc(dc);
4656 }
4657 
4658 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4659 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4660 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4661 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4662 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4663 
4664 static bool do_fd(DisasContext *dc, arg_r_r *a,
4665                   void (*func)(TCGv_i32, TCGv_i64))
4666 {
4667     TCGv_i32 dst;
4668     TCGv_i64 src;
4669 
4670     if (gen_trap_ifnofpu(dc)) {
4671         return true;
4672     }
4673 
4674     dst = tcg_temp_new_i32();
4675     src = gen_load_fpr_D(dc, a->rs);
4676     func(dst, src);
4677     gen_store_fpr_F(dc, a->rd, dst);
4678     return advance_pc(dc);
4679 }
4680 
4681 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4682 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4683 
4684 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4685                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4686 {
4687     TCGv_i32 tmp;
4688 
4689     if (gen_trap_ifnofpu(dc)) {
4690         return true;
4691     }
4692 
4693     tmp = gen_load_fpr_F(dc, a->rs);
4694     func(tmp, tcg_env, tmp);
4695     gen_store_fpr_F(dc, a->rd, tmp);
4696     return advance_pc(dc);
4697 }
4698 
4699 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4700 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4701 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4702 
4703 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4704                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4705 {
4706     TCGv_i32 dst;
4707     TCGv_i64 src;
4708 
4709     if (gen_trap_ifnofpu(dc)) {
4710         return true;
4711     }
4712 
4713     dst = tcg_temp_new_i32();
4714     src = gen_load_fpr_D(dc, a->rs);
4715     func(dst, tcg_env, src);
4716     gen_store_fpr_F(dc, a->rd, dst);
4717     return advance_pc(dc);
4718 }
4719 
4720 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4721 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4722 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4723 
4724 static bool do_dd(DisasContext *dc, arg_r_r *a,
4725                   void (*func)(TCGv_i64, TCGv_i64))
4726 {
4727     TCGv_i64 dst, src;
4728 
4729     if (gen_trap_ifnofpu(dc)) {
4730         return true;
4731     }
4732 
4733     dst = tcg_temp_new_i64();
4734     src = gen_load_fpr_D(dc, a->rs);
4735     func(dst, src);
4736     gen_store_fpr_D(dc, a->rd, dst);
4737     return advance_pc(dc);
4738 }
4739 
4740 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4741 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4742 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4743 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4744 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4745 
4746 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4747                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4748 {
4749     TCGv_i64 dst, src;
4750 
4751     if (gen_trap_ifnofpu(dc)) {
4752         return true;
4753     }
4754 
4755     dst = tcg_temp_new_i64();
4756     src = gen_load_fpr_D(dc, a->rs);
4757     func(dst, tcg_env, src);
4758     gen_store_fpr_D(dc, a->rd, dst);
4759     return advance_pc(dc);
4760 }
4761 
4762 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4763 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4764 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4765 
4766 static bool do_df(DisasContext *dc, arg_r_r *a,
4767                   void (*func)(TCGv_i64, TCGv_i32))
4768 {
4769     TCGv_i64 dst;
4770     TCGv_i32 src;
4771 
4772     if (gen_trap_ifnofpu(dc)) {
4773         return true;
4774     }
4775 
4776     dst = tcg_temp_new_i64();
4777     src = gen_load_fpr_F(dc, a->rs);
4778     func(dst, src);
4779     gen_store_fpr_D(dc, a->rd, dst);
4780     return advance_pc(dc);
4781 }
4782 
4783 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4784 
4785 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4786                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4787 {
4788     TCGv_i64 dst;
4789     TCGv_i32 src;
4790 
4791     if (gen_trap_ifnofpu(dc)) {
4792         return true;
4793     }
4794 
4795     dst = tcg_temp_new_i64();
4796     src = gen_load_fpr_F(dc, a->rs);
4797     func(dst, tcg_env, src);
4798     gen_store_fpr_D(dc, a->rd, dst);
4799     return advance_pc(dc);
4800 }
4801 
4802 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4803 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4804 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4805 
4806 static bool do_qq(DisasContext *dc, arg_r_r *a,
4807                   void (*func)(TCGv_i128, TCGv_i128))
4808 {
4809     TCGv_i128 t;
4810 
4811     if (gen_trap_ifnofpu(dc)) {
4812         return true;
4813     }
4814     if (gen_trap_float128(dc)) {
4815         return true;
4816     }
4817 
4818     gen_op_clear_ieee_excp_and_FTT();
4819     t = gen_load_fpr_Q(dc, a->rs);
4820     func(t, t);
4821     gen_store_fpr_Q(dc, a->rd, t);
4822     return advance_pc(dc);
4823 }
4824 
4825 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4826 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4827 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4828 
4829 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4830                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4831 {
4832     TCGv_i128 t;
4833 
4834     if (gen_trap_ifnofpu(dc)) {
4835         return true;
4836     }
4837     if (gen_trap_float128(dc)) {
4838         return true;
4839     }
4840 
4841     t = gen_load_fpr_Q(dc, a->rs);
4842     func(t, tcg_env, t);
4843     gen_store_fpr_Q(dc, a->rd, t);
4844     return advance_pc(dc);
4845 }
4846 
4847 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4848 
4849 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4850                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4851 {
4852     TCGv_i128 src;
4853     TCGv_i32 dst;
4854 
4855     if (gen_trap_ifnofpu(dc)) {
4856         return true;
4857     }
4858     if (gen_trap_float128(dc)) {
4859         return true;
4860     }
4861 
4862     src = gen_load_fpr_Q(dc, a->rs);
4863     dst = tcg_temp_new_i32();
4864     func(dst, tcg_env, src);
4865     gen_store_fpr_F(dc, a->rd, dst);
4866     return advance_pc(dc);
4867 }
4868 
4869 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4870 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4871 
4872 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4873                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4874 {
4875     TCGv_i128 src;
4876     TCGv_i64 dst;
4877 
4878     if (gen_trap_ifnofpu(dc)) {
4879         return true;
4880     }
4881     if (gen_trap_float128(dc)) {
4882         return true;
4883     }
4884 
4885     src = gen_load_fpr_Q(dc, a->rs);
4886     dst = tcg_temp_new_i64();
4887     func(dst, tcg_env, src);
4888     gen_store_fpr_D(dc, a->rd, dst);
4889     return advance_pc(dc);
4890 }
4891 
4892 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4893 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4894 
4895 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4896                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4897 {
4898     TCGv_i32 src;
4899     TCGv_i128 dst;
4900 
4901     if (gen_trap_ifnofpu(dc)) {
4902         return true;
4903     }
4904     if (gen_trap_float128(dc)) {
4905         return true;
4906     }
4907 
4908     src = gen_load_fpr_F(dc, a->rs);
4909     dst = tcg_temp_new_i128();
4910     func(dst, tcg_env, src);
4911     gen_store_fpr_Q(dc, a->rd, dst);
4912     return advance_pc(dc);
4913 }
4914 
4915 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4916 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4917 
4918 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4919                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4920 {
4921     TCGv_i64 src;
4922     TCGv_i128 dst;
4923 
4924     if (gen_trap_ifnofpu(dc)) {
4925         return true;
4926     }
4927     if (gen_trap_float128(dc)) {
4928         return true;
4929     }
4930 
4931     src = gen_load_fpr_D(dc, a->rs);
4932     dst = tcg_temp_new_i128();
4933     func(dst, tcg_env, src);
4934     gen_store_fpr_Q(dc, a->rd, dst);
4935     return advance_pc(dc);
4936 }
4937 
4938 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4939 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4940 
4941 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4942                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4943 {
4944     TCGv_i32 src1, src2;
4945 
4946     if (gen_trap_ifnofpu(dc)) {
4947         return true;
4948     }
4949 
4950     src1 = gen_load_fpr_F(dc, a->rs1);
4951     src2 = gen_load_fpr_F(dc, a->rs2);
4952     func(src1, src1, src2);
4953     gen_store_fpr_F(dc, a->rd, src1);
4954     return advance_pc(dc);
4955 }
4956 
4957 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4958 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4959 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4960 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4961 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4962 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4963 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4964 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4965 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4966 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4967 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4968 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4969 
4970 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4971 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4972 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4973 
4974 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4975 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4976 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4977 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4978 
4979 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4980                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4981 {
4982     TCGv_i32 src1, src2;
4983 
4984     if (gen_trap_ifnofpu(dc)) {
4985         return true;
4986     }
4987 
4988     src1 = gen_load_fpr_F(dc, a->rs1);
4989     src2 = gen_load_fpr_F(dc, a->rs2);
4990     func(src1, tcg_env, src1, src2);
4991     gen_store_fpr_F(dc, a->rd, src1);
4992     return advance_pc(dc);
4993 }
4994 
4995 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4996 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4997 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4998 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4999 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5000 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5001 
5002 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5003                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5004 {
5005     TCGv_i64 dst;
5006     TCGv_i32 src1, src2;
5007 
5008     if (gen_trap_ifnofpu(dc)) {
5009         return true;
5010     }
5011 
5012     dst = tcg_temp_new_i64();
5013     src1 = gen_load_fpr_F(dc, a->rs1);
5014     src2 = gen_load_fpr_F(dc, a->rs2);
5015     func(dst, src1, src2);
5016     gen_store_fpr_D(dc, a->rd, dst);
5017     return advance_pc(dc);
5018 }
5019 
5020 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5021 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5022 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5023 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5024 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5025 
5026 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5027                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5028 {
5029     TCGv_i64 dst, src2;
5030     TCGv_i32 src1;
5031 
5032     if (gen_trap_ifnofpu(dc)) {
5033         return true;
5034     }
5035 
5036     dst = tcg_temp_new_i64();
5037     src1 = gen_load_fpr_F(dc, a->rs1);
5038     src2 = gen_load_fpr_D(dc, a->rs2);
5039     func(dst, src1, src2);
5040     gen_store_fpr_D(dc, a->rd, dst);
5041     return advance_pc(dc);
5042 }
5043 
5044 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5045 
5046 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5047                         void (*func)(unsigned, uint32_t, uint32_t,
5048                                      uint32_t, uint32_t, uint32_t))
5049 {
5050     if (gen_trap_ifnofpu(dc)) {
5051         return true;
5052     }
5053 
5054     func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5055          gen_offset_fpr_D(a->rs2), 8, 8);
5056     return advance_pc(dc);
5057 }
5058 
5059 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5060 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5061 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5062 
5063 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5064 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5065 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5066 
5067 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5068 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5069 
5070 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5071 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5072 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5073 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5074 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5075 
5076 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5077 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5078 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5079 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5080 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5081 
5082 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5083 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5084 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5085 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5086 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5087 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5088 
5089 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5090 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5091 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5092 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5093 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5094 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5095 
5096 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5097 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5098 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5099 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5100 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5101 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5102 
5103 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5104                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5105 {
5106     TCGv_i64 dst, src1, src2;
5107 
5108     if (gen_trap_ifnofpu(dc)) {
5109         return true;
5110     }
5111 
5112     dst = tcg_temp_new_i64();
5113     src1 = gen_load_fpr_D(dc, a->rs1);
5114     src2 = gen_load_fpr_D(dc, a->rs2);
5115     func(dst, src1, src2);
5116     gen_store_fpr_D(dc, a->rd, dst);
5117     return advance_pc(dc);
5118 }
5119 
5120 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5121 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5122 
5123 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5124 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5125 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5126 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5127 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5128 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5129 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5130 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5131 
5132 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5133 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5134 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5135 
5136 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5137 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5138 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5139 
5140 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5141 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5142 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5143 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5144 
5145 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5146                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
5147 {
5148     TCGv_i64 src1, src2;
5149     TCGv dst;
5150 
5151     if (gen_trap_ifnofpu(dc)) {
5152         return true;
5153     }
5154 
5155     dst = gen_dest_gpr(dc, a->rd);
5156     src1 = gen_load_fpr_D(dc, a->rs1);
5157     src2 = gen_load_fpr_D(dc, a->rs2);
5158     func(dst, src1, src2);
5159     gen_store_gpr(dc, a->rd, dst);
5160     return advance_pc(dc);
5161 }
5162 
5163 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5164 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5165 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5166 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5167 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5168 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5169 
5170 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5171 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5172 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5173 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5174 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5175 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5176 
5177 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5178 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5179 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5180 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5181 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5182 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5183 
5184 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5185 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5186 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5187 
5188 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5189                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5190 {
5191     TCGv_i64 dst, src1, src2;
5192 
5193     if (gen_trap_ifnofpu(dc)) {
5194         return true;
5195     }
5196 
5197     dst = tcg_temp_new_i64();
5198     src1 = gen_load_fpr_D(dc, a->rs1);
5199     src2 = gen_load_fpr_D(dc, a->rs2);
5200     func(dst, tcg_env, src1, src2);
5201     gen_store_fpr_D(dc, a->rd, dst);
5202     return advance_pc(dc);
5203 }
5204 
5205 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5206 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5207 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5208 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5209 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5210 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5211 
5212 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5213 {
5214     TCGv_i64 dst;
5215     TCGv_i32 src1, src2;
5216 
5217     if (gen_trap_ifnofpu(dc)) {
5218         return true;
5219     }
5220     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5221         return raise_unimpfpop(dc);
5222     }
5223 
5224     dst = tcg_temp_new_i64();
5225     src1 = gen_load_fpr_F(dc, a->rs1);
5226     src2 = gen_load_fpr_F(dc, a->rs2);
5227     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5228     gen_store_fpr_D(dc, a->rd, dst);
5229     return advance_pc(dc);
5230 }
5231 
5232 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5233 {
5234     TCGv_i64 dst;
5235     TCGv_i32 src1, src2;
5236 
5237     if (!avail_VIS3(dc)) {
5238         return false;
5239     }
5240     if (gen_trap_ifnofpu(dc)) {
5241         return true;
5242     }
5243     dst = tcg_temp_new_i64();
5244     src1 = gen_load_fpr_F(dc, a->rs1);
5245     src2 = gen_load_fpr_F(dc, a->rs2);
5246     gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5247     gen_store_fpr_D(dc, a->rd, dst);
5248     return advance_pc(dc);
5249 }
5250 
5251 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5252                     void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5253 {
5254     TCGv_i32 dst, src1, src2, src3;
5255 
5256     if (gen_trap_ifnofpu(dc)) {
5257         return true;
5258     }
5259 
5260     src1 = gen_load_fpr_F(dc, a->rs1);
5261     src2 = gen_load_fpr_F(dc, a->rs2);
5262     src3 = gen_load_fpr_F(dc, a->rs3);
5263     dst = tcg_temp_new_i32();
5264     func(dst, src1, src2, src3);
5265     gen_store_fpr_F(dc, a->rd, dst);
5266     return advance_pc(dc);
5267 }
5268 
5269 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5270 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5271 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5272 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5273 
5274 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5275                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5276 {
5277     TCGv_i64 dst, src1, src2, src3;
5278 
5279     if (gen_trap_ifnofpu(dc)) {
5280         return true;
5281     }
5282 
5283     dst  = tcg_temp_new_i64();
5284     src1 = gen_load_fpr_D(dc, a->rs1);
5285     src2 = gen_load_fpr_D(dc, a->rs2);
5286     src3 = gen_load_fpr_D(dc, a->rs3);
5287     func(dst, src1, src2, src3);
5288     gen_store_fpr_D(dc, a->rd, dst);
5289     return advance_pc(dc);
5290 }
5291 
5292 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5293 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5294 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5295 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5296 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5297 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5298 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5299 
5300 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5301 {
5302     TCGv_i64 dst, src1, src2;
5303     TCGv src3;
5304 
5305     if (!avail_VIS4(dc)) {
5306         return false;
5307     }
5308     if (gen_trap_ifnofpu(dc)) {
5309         return true;
5310     }
5311 
5312     dst  = tcg_temp_new_i64();
5313     src1 = gen_load_fpr_D(dc, a->rd);
5314     src2 = gen_load_fpr_D(dc, a->rs2);
5315     src3 = gen_load_gpr(dc, a->rs1);
5316     gen_op_faligndata_i(dst, src1, src2, src3);
5317     gen_store_fpr_D(dc, a->rd, dst);
5318     return advance_pc(dc);
5319 }
5320 
5321 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5322                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5323 {
5324     TCGv_i128 src1, src2;
5325 
5326     if (gen_trap_ifnofpu(dc)) {
5327         return true;
5328     }
5329     if (gen_trap_float128(dc)) {
5330         return true;
5331     }
5332 
5333     src1 = gen_load_fpr_Q(dc, a->rs1);
5334     src2 = gen_load_fpr_Q(dc, a->rs2);
5335     func(src1, tcg_env, src1, src2);
5336     gen_store_fpr_Q(dc, a->rd, src1);
5337     return advance_pc(dc);
5338 }
5339 
5340 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5341 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5342 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5343 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5344 
5345 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5346 {
5347     TCGv_i64 src1, src2;
5348     TCGv_i128 dst;
5349 
5350     if (gen_trap_ifnofpu(dc)) {
5351         return true;
5352     }
5353     if (gen_trap_float128(dc)) {
5354         return true;
5355     }
5356 
5357     src1 = gen_load_fpr_D(dc, a->rs1);
5358     src2 = gen_load_fpr_D(dc, a->rs2);
5359     dst = tcg_temp_new_i128();
5360     gen_helper_fdmulq(dst, tcg_env, src1, src2);
5361     gen_store_fpr_Q(dc, a->rd, dst);
5362     return advance_pc(dc);
5363 }
5364 
5365 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5366                      void (*func)(DisasContext *, DisasCompare *, int, int))
5367 {
5368     DisasCompare cmp;
5369 
5370     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5371         return false;
5372     }
5373     if (gen_trap_ifnofpu(dc)) {
5374         return true;
5375     }
5376     if (is_128 && gen_trap_float128(dc)) {
5377         return true;
5378     }
5379 
5380     gen_op_clear_ieee_excp_and_FTT();
5381     func(dc, &cmp, a->rd, a->rs2);
5382     return advance_pc(dc);
5383 }
5384 
5385 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5386 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5387 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5388 
5389 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5390                       void (*func)(DisasContext *, DisasCompare *, int, int))
5391 {
5392     DisasCompare cmp;
5393 
5394     if (gen_trap_ifnofpu(dc)) {
5395         return true;
5396     }
5397     if (is_128 && gen_trap_float128(dc)) {
5398         return true;
5399     }
5400 
5401     gen_op_clear_ieee_excp_and_FTT();
5402     gen_compare(&cmp, a->cc, a->cond, dc);
5403     func(dc, &cmp, a->rd, a->rs2);
5404     return advance_pc(dc);
5405 }
5406 
5407 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5408 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5409 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5410 
5411 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5412                        void (*func)(DisasContext *, DisasCompare *, int, int))
5413 {
5414     DisasCompare cmp;
5415 
5416     if (gen_trap_ifnofpu(dc)) {
5417         return true;
5418     }
5419     if (is_128 && gen_trap_float128(dc)) {
5420         return true;
5421     }
5422 
5423     gen_op_clear_ieee_excp_and_FTT();
5424     gen_fcompare(&cmp, a->cc, a->cond);
5425     func(dc, &cmp, a->rd, a->rs2);
5426     return advance_pc(dc);
5427 }
5428 
5429 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5430 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5431 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5432 
5433 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5434 {
5435     TCGv_i32 src1, src2;
5436 
5437     if (avail_32(dc) && a->cc != 0) {
5438         return false;
5439     }
5440     if (gen_trap_ifnofpu(dc)) {
5441         return true;
5442     }
5443 
5444     src1 = gen_load_fpr_F(dc, a->rs1);
5445     src2 = gen_load_fpr_F(dc, a->rs2);
5446     if (e) {
5447         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5448     } else {
5449         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5450     }
5451     return advance_pc(dc);
5452 }
5453 
5454 TRANS(FCMPs, ALL, do_fcmps, a, false)
5455 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5456 
5457 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5458 {
5459     TCGv_i64 src1, src2;
5460 
5461     if (avail_32(dc) && a->cc != 0) {
5462         return false;
5463     }
5464     if (gen_trap_ifnofpu(dc)) {
5465         return true;
5466     }
5467 
5468     src1 = gen_load_fpr_D(dc, a->rs1);
5469     src2 = gen_load_fpr_D(dc, a->rs2);
5470     if (e) {
5471         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5472     } else {
5473         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5474     }
5475     return advance_pc(dc);
5476 }
5477 
5478 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5479 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5480 
5481 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5482 {
5483     TCGv_i128 src1, src2;
5484 
5485     if (avail_32(dc) && a->cc != 0) {
5486         return false;
5487     }
5488     if (gen_trap_ifnofpu(dc)) {
5489         return true;
5490     }
5491     if (gen_trap_float128(dc)) {
5492         return true;
5493     }
5494 
5495     src1 = gen_load_fpr_Q(dc, a->rs1);
5496     src2 = gen_load_fpr_Q(dc, a->rs2);
5497     if (e) {
5498         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5499     } else {
5500         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5501     }
5502     return advance_pc(dc);
5503 }
5504 
5505 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5506 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5507 
5508 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5509 {
5510     TCGv_i32 src1, src2;
5511 
5512     if (!avail_VIS3(dc)) {
5513         return false;
5514     }
5515     if (gen_trap_ifnofpu(dc)) {
5516         return true;
5517     }
5518 
5519     src1 = gen_load_fpr_F(dc, a->rs1);
5520     src2 = gen_load_fpr_F(dc, a->rs2);
5521     gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5522     return advance_pc(dc);
5523 }
5524 
5525 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5526 {
5527     TCGv_i64 src1, src2;
5528 
5529     if (!avail_VIS3(dc)) {
5530         return false;
5531     }
5532     if (gen_trap_ifnofpu(dc)) {
5533         return true;
5534     }
5535 
5536     src1 = gen_load_fpr_D(dc, a->rs1);
5537     src2 = gen_load_fpr_D(dc, a->rs2);
5538     gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5539     return advance_pc(dc);
5540 }
5541 
5542 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5543                       int (*offset)(unsigned int),
5544                       void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5545 {
5546     TCGv dst;
5547 
5548     if (gen_trap_ifnofpu(dc)) {
5549         return true;
5550     }
5551     dst = gen_dest_gpr(dc, a->rd);
5552     load(dst, tcg_env, offset(a->rs));
5553     gen_store_gpr(dc, a->rd, dst);
5554     return advance_pc(dc);
5555 }
5556 
5557 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5558 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5559 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5560 
5561 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5562                       int (*offset)(unsigned int),
5563                       void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5564 {
5565     TCGv src;
5566 
5567     if (gen_trap_ifnofpu(dc)) {
5568         return true;
5569     }
5570     src = gen_load_gpr(dc, a->rs);
5571     store(src, tcg_env, offset(a->rd));
5572     return advance_pc(dc);
5573 }
5574 
5575 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5576 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5577 
5578 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5579 {
5580     DisasContext *dc = container_of(dcbase, DisasContext, base);
5581     int bound;
5582 
5583     dc->pc = dc->base.pc_first;
5584     dc->npc = (target_ulong)dc->base.tb->cs_base;
5585     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5586     dc->def = &cpu_env(cs)->def;
5587     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5588     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5589 #ifndef CONFIG_USER_ONLY
5590     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5591 #endif
5592 #ifdef TARGET_SPARC64
5593     dc->fprs_dirty = 0;
5594     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5595 #ifndef CONFIG_USER_ONLY
5596     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5597 #endif
5598 #endif
5599     /*
5600      * if we reach a page boundary, we stop generation so that the
5601      * PC of a TT_TFAULT exception is always in the right page
5602      */
5603     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5604     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5605 }
5606 
5607 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5608 {
5609 }
5610 
5611 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5612 {
5613     DisasContext *dc = container_of(dcbase, DisasContext, base);
5614     target_ulong npc = dc->npc;
5615 
5616     if (npc & 3) {
5617         switch (npc) {
5618         case JUMP_PC:
5619             assert(dc->jump_pc[1] == dc->pc + 4);
5620             npc = dc->jump_pc[0] | JUMP_PC;
5621             break;
5622         case DYNAMIC_PC:
5623         case DYNAMIC_PC_LOOKUP:
5624             npc = DYNAMIC_PC;
5625             break;
5626         default:
5627             g_assert_not_reached();
5628         }
5629     }
5630     tcg_gen_insn_start(dc->pc, npc);
5631 }
5632 
5633 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5634 {
5635     DisasContext *dc = container_of(dcbase, DisasContext, base);
5636     unsigned int insn;
5637 
5638     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5639     dc->base.pc_next += 4;
5640 
5641     if (!decode(dc, insn)) {
5642         gen_exception(dc, TT_ILL_INSN);
5643     }
5644 
5645     if (dc->base.is_jmp == DISAS_NORETURN) {
5646         return;
5647     }
5648     if (dc->pc != dc->base.pc_next) {
5649         dc->base.is_jmp = DISAS_TOO_MANY;
5650     }
5651 }
5652 
5653 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5654 {
5655     DisasContext *dc = container_of(dcbase, DisasContext, base);
5656     DisasDelayException *e, *e_next;
5657     bool may_lookup;
5658 
5659     finishing_insn(dc);
5660 
5661     switch (dc->base.is_jmp) {
5662     case DISAS_NEXT:
5663     case DISAS_TOO_MANY:
5664         if (((dc->pc | dc->npc) & 3) == 0) {
5665             /* static PC and NPC: we can use direct chaining */
5666             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5667             break;
5668         }
5669 
5670         may_lookup = true;
5671         if (dc->pc & 3) {
5672             switch (dc->pc) {
5673             case DYNAMIC_PC_LOOKUP:
5674                 break;
5675             case DYNAMIC_PC:
5676                 may_lookup = false;
5677                 break;
5678             default:
5679                 g_assert_not_reached();
5680             }
5681         } else {
5682             tcg_gen_movi_tl(cpu_pc, dc->pc);
5683         }
5684 
5685         if (dc->npc & 3) {
5686             switch (dc->npc) {
5687             case JUMP_PC:
5688                 gen_generic_branch(dc);
5689                 break;
5690             case DYNAMIC_PC:
5691                 may_lookup = false;
5692                 break;
5693             case DYNAMIC_PC_LOOKUP:
5694                 break;
5695             default:
5696                 g_assert_not_reached();
5697             }
5698         } else {
5699             tcg_gen_movi_tl(cpu_npc, dc->npc);
5700         }
5701         if (may_lookup) {
5702             tcg_gen_lookup_and_goto_ptr();
5703         } else {
5704             tcg_gen_exit_tb(NULL, 0);
5705         }
5706         break;
5707 
5708     case DISAS_NORETURN:
5709        break;
5710 
5711     case DISAS_EXIT:
5712         /* Exit TB */
5713         save_state(dc);
5714         tcg_gen_exit_tb(NULL, 0);
5715         break;
5716 
5717     default:
5718         g_assert_not_reached();
5719     }
5720 
5721     for (e = dc->delay_excp_list; e ; e = e_next) {
5722         gen_set_label(e->lab);
5723 
5724         tcg_gen_movi_tl(cpu_pc, e->pc);
5725         if (e->npc % 4 == 0) {
5726             tcg_gen_movi_tl(cpu_npc, e->npc);
5727         }
5728         gen_helper_raise_exception(tcg_env, e->excp);
5729 
5730         e_next = e->next;
5731         g_free(e);
5732     }
5733 }
5734 
5735 static const TranslatorOps sparc_tr_ops = {
5736     .init_disas_context = sparc_tr_init_disas_context,
5737     .tb_start           = sparc_tr_tb_start,
5738     .insn_start         = sparc_tr_insn_start,
5739     .translate_insn     = sparc_tr_translate_insn,
5740     .tb_stop            = sparc_tr_tb_stop,
5741 };
5742 
5743 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5744                            vaddr pc, void *host_pc)
5745 {
5746     DisasContext dc = {};
5747 
5748     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5749 }
5750 
5751 void sparc_tcg_init(void)
5752 {
5753     static const char gregnames[32][4] = {
5754         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5755         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5756         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5757         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5758     };
5759 
5760     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5761 #ifdef TARGET_SPARC64
5762         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5763         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5764         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5765         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5766         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5767 #else
5768         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5769 #endif
5770     };
5771 
5772     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5773 #ifdef TARGET_SPARC64
5774         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5775         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5776         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5777 #endif
5778         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5779         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5780         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5781         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5782         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5783         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5784         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5785         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5786         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5787     };
5788 
5789     unsigned int i;
5790 
5791     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5792                                          offsetof(CPUSPARCState, regwptr),
5793                                          "regwptr");
5794 
5795     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5796         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5797     }
5798 
5799     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5800         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5801     }
5802 
5803     cpu_regs[0] = NULL;
5804     for (i = 1; i < 8; ++i) {
5805         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5806                                          offsetof(CPUSPARCState, gregs[i]),
5807                                          gregnames[i]);
5808     }
5809 
5810     for (i = 8; i < 32; ++i) {
5811         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5812                                          (i - 8) * sizeof(target_ulong),
5813                                          gregnames[i]);
5814     }
5815 }
5816 
5817 void sparc_restore_state_to_opc(CPUState *cs,
5818                                 const TranslationBlock *tb,
5819                                 const uint64_t *data)
5820 {
5821     CPUSPARCState *env = cpu_env(cs);
5822     target_ulong pc = data[0];
5823     target_ulong npc = data[1];
5824 
5825     env->pc = pc;
5826     if (npc == DYNAMIC_PC) {
5827         /* dynamic NPC: already stored */
5828     } else if (npc & JUMP_PC) {
5829         /* jump PC: use 'cond' and the jump targets of the translation */
5830         if (env->cond) {
5831             env->npc = npc & ~3;
5832         } else {
5833             env->npc = pc + 4;
5834         }
5835     } else {
5836         env->npc = npc;
5837     }
5838 }
5839