xref: /openbmc/qemu/target/sparc/translate.c (revision 21c06f57)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "asi.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 #ifdef TARGET_SPARC64
38 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
39 # define gen_helper_rdasr17(D, E)               qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_flushw(E)                   qemu_build_not_reached()
47 # define gen_helper_fmul8x16a(D, S1, S2)        qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
50 # define gen_helper_restored(E)                 qemu_build_not_reached()
51 # define gen_helper_retry(E)                    qemu_build_not_reached()
52 # define gen_helper_saved(E)                    qemu_build_not_reached()
53 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
54 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
55 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
56 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
57 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
58 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
60 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
61 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
63 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
64 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
83 # define MAXTL_MASK                             0
84 #endif
85 
86 /* Dynamic PC, must exit to main loop. */
87 #define DYNAMIC_PC         1
88 /* Dynamic PC, one of two values according to jump_pc[T2]. */
89 #define JUMP_PC            2
90 /* Dynamic PC, may lookup next TB. */
91 #define DYNAMIC_PC_LOOKUP  3
92 
93 #define DISAS_EXIT  DISAS_TARGET_0
94 
95 /* global register indexes */
96 static TCGv_ptr cpu_regwptr;
97 static TCGv cpu_pc, cpu_npc;
98 static TCGv cpu_regs[32];
99 static TCGv cpu_y;
100 static TCGv cpu_tbr;
101 static TCGv cpu_cond;
102 static TCGv cpu_cc_N;
103 static TCGv cpu_cc_V;
104 static TCGv cpu_icc_Z;
105 static TCGv cpu_icc_C;
106 #ifdef TARGET_SPARC64
107 static TCGv cpu_xcc_Z;
108 static TCGv cpu_xcc_C;
109 static TCGv_i32 cpu_fprs;
110 static TCGv cpu_gsr;
111 #else
112 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
113 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
114 #endif
115 
116 #ifdef TARGET_SPARC64
117 #define cpu_cc_Z  cpu_xcc_Z
118 #define cpu_cc_C  cpu_xcc_C
119 #else
120 #define cpu_cc_Z  cpu_icc_Z
121 #define cpu_cc_C  cpu_icc_C
122 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
123 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
124 #endif
125 
126 /* Floating point registers */
127 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
128 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
129 
130 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
131 #ifdef TARGET_SPARC64
132 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
133 # define env64_field_offsetof(X)  env_field_offsetof(X)
134 #else
135 # define env32_field_offsetof(X)  env_field_offsetof(X)
136 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
137 #endif
138 
139 typedef struct DisasCompare {
140     TCGCond cond;
141     TCGv c1;
142     int c2;
143 } DisasCompare;
144 
145 typedef struct DisasDelayException {
146     struct DisasDelayException *next;
147     TCGLabel *lab;
148     TCGv_i32 excp;
149     /* Saved state at parent insn. */
150     target_ulong pc;
151     target_ulong npc;
152 } DisasDelayException;
153 
154 typedef struct DisasContext {
155     DisasContextBase base;
156     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
157     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
158 
159     /* Used when JUMP_PC value is used. */
160     DisasCompare jump;
161     target_ulong jump_pc[2];
162 
163     int mem_idx;
164     bool cpu_cond_live;
165     bool fpu_enabled;
166     bool address_mask_32bit;
167 #ifndef CONFIG_USER_ONLY
168     bool supervisor;
169 #ifdef TARGET_SPARC64
170     bool hypervisor;
171 #endif
172 #endif
173 
174     sparc_def_t *def;
175 #ifdef TARGET_SPARC64
176     int fprs_dirty;
177     int asi;
178 #endif
179     DisasDelayException *delay_excp_list;
180 } DisasContext;
181 
182 // This function uses non-native bit order
183 #define GET_FIELD(X, FROM, TO)                                  \
184     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
185 
186 // This function uses the order in the manuals, i.e. bit 0 is 2^0
187 #define GET_FIELD_SP(X, FROM, TO)               \
188     GET_FIELD(X, 31 - (TO), 31 - (FROM))
189 
190 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
191 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
192 
193 #ifdef TARGET_SPARC64
194 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
195 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
196 #else
197 #define DFPREG(r) (r & 0x1e)
198 #define QFPREG(r) (r & 0x1c)
199 #endif
200 
201 #define UA2005_HTRAP_MASK 0xff
202 #define V8_TRAP_MASK 0x7f
203 
204 #define IS_IMM (insn & (1<<13))
205 
206 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
207 {
208 #if defined(TARGET_SPARC64)
209     int bit = (rd < 32) ? 1 : 2;
210     /* If we know we've already set this bit within the TB,
211        we can avoid setting it again.  */
212     if (!(dc->fprs_dirty & bit)) {
213         dc->fprs_dirty |= bit;
214         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
215     }
216 #endif
217 }
218 
219 /* floating point registers moves */
220 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
221 {
222     TCGv_i32 ret = tcg_temp_new_i32();
223     if (src & 1) {
224         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
225     } else {
226         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
227     }
228     return ret;
229 }
230 
231 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
232 {
233     TCGv_i64 t = tcg_temp_new_i64();
234 
235     tcg_gen_extu_i32_i64(t, v);
236     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
237                         (dst & 1 ? 0 : 32), 32);
238     gen_update_fprs_dirty(dc, dst);
239 }
240 
241 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
242 {
243     src = DFPREG(src);
244     return cpu_fpr[src / 2];
245 }
246 
247 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
248 {
249     dst = DFPREG(dst);
250     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
251     gen_update_fprs_dirty(dc, dst);
252 }
253 
254 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
255 {
256     return cpu_fpr[DFPREG(dst) / 2];
257 }
258 
259 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
260 {
261     TCGv_i128 ret = tcg_temp_new_i128();
262 
263     src = QFPREG(src);
264     tcg_gen_concat_i64_i128(ret, cpu_fpr[src / 2 + 1], cpu_fpr[src / 2]);
265     return ret;
266 }
267 
268 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
269 {
270     dst = DFPREG(dst);
271     tcg_gen_extr_i128_i64(cpu_fpr[dst / 2 + 1], cpu_fpr[dst / 2], v);
272     gen_update_fprs_dirty(dc, dst);
273 }
274 
275 /* moves */
276 #ifdef CONFIG_USER_ONLY
277 #define supervisor(dc) 0
278 #define hypervisor(dc) 0
279 #else
280 #ifdef TARGET_SPARC64
281 #define hypervisor(dc) (dc->hypervisor)
282 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
283 #else
284 #define supervisor(dc) (dc->supervisor)
285 #define hypervisor(dc) 0
286 #endif
287 #endif
288 
289 #if !defined(TARGET_SPARC64)
290 # define AM_CHECK(dc)  false
291 #elif defined(TARGET_ABI32)
292 # define AM_CHECK(dc)  true
293 #elif defined(CONFIG_USER_ONLY)
294 # define AM_CHECK(dc)  false
295 #else
296 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
297 #endif
298 
299 static void gen_address_mask(DisasContext *dc, TCGv addr)
300 {
301     if (AM_CHECK(dc)) {
302         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
303     }
304 }
305 
306 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
307 {
308     return AM_CHECK(dc) ? (uint32_t)addr : addr;
309 }
310 
311 static TCGv gen_load_gpr(DisasContext *dc, int reg)
312 {
313     if (reg > 0) {
314         assert(reg < 32);
315         return cpu_regs[reg];
316     } else {
317         TCGv t = tcg_temp_new();
318         tcg_gen_movi_tl(t, 0);
319         return t;
320     }
321 }
322 
323 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
324 {
325     if (reg > 0) {
326         assert(reg < 32);
327         tcg_gen_mov_tl(cpu_regs[reg], v);
328     }
329 }
330 
331 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
332 {
333     if (reg > 0) {
334         assert(reg < 32);
335         return cpu_regs[reg];
336     } else {
337         return tcg_temp_new();
338     }
339 }
340 
341 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
342 {
343     return translator_use_goto_tb(&s->base, pc) &&
344            translator_use_goto_tb(&s->base, npc);
345 }
346 
347 static void gen_goto_tb(DisasContext *s, int tb_num,
348                         target_ulong pc, target_ulong npc)
349 {
350     if (use_goto_tb(s, pc, npc))  {
351         /* jump to same page: we can use a direct jump */
352         tcg_gen_goto_tb(tb_num);
353         tcg_gen_movi_tl(cpu_pc, pc);
354         tcg_gen_movi_tl(cpu_npc, npc);
355         tcg_gen_exit_tb(s->base.tb, tb_num);
356     } else {
357         /* jump to another page: we can use an indirect jump */
358         tcg_gen_movi_tl(cpu_pc, pc);
359         tcg_gen_movi_tl(cpu_npc, npc);
360         tcg_gen_lookup_and_goto_ptr();
361     }
362 }
363 
364 static TCGv gen_carry32(void)
365 {
366     if (TARGET_LONG_BITS == 64) {
367         TCGv t = tcg_temp_new();
368         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
369         return t;
370     }
371     return cpu_icc_C;
372 }
373 
374 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
375 {
376     TCGv z = tcg_constant_tl(0);
377 
378     if (cin) {
379         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
380         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
381     } else {
382         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
383     }
384     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
385     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
386     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
387     if (TARGET_LONG_BITS == 64) {
388         /*
389          * Carry-in to bit 32 is result ^ src1 ^ src2.
390          * We already have the src xor term in Z, from computation of V.
391          */
392         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
393         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
394     }
395     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
396     tcg_gen_mov_tl(dst, cpu_cc_N);
397 }
398 
399 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
400 {
401     gen_op_addcc_int(dst, src1, src2, NULL);
402 }
403 
404 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
405 {
406     TCGv t = tcg_temp_new();
407 
408     /* Save the tag bits around modification of dst. */
409     tcg_gen_or_tl(t, src1, src2);
410 
411     gen_op_addcc(dst, src1, src2);
412 
413     /* Incorprate tag bits into icc.V */
414     tcg_gen_andi_tl(t, t, 3);
415     tcg_gen_neg_tl(t, t);
416     tcg_gen_ext32u_tl(t, t);
417     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
418 }
419 
420 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
421 {
422     tcg_gen_add_tl(dst, src1, src2);
423     tcg_gen_add_tl(dst, dst, gen_carry32());
424 }
425 
426 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
427 {
428     gen_op_addcc_int(dst, src1, src2, gen_carry32());
429 }
430 
431 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
432 {
433     TCGv z = tcg_constant_tl(0);
434 
435     if (cin) {
436         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
437         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
438     } else {
439         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
440     }
441     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
442     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
443     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
444     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
445 #ifdef TARGET_SPARC64
446     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
447     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
448 #endif
449     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
450     tcg_gen_mov_tl(dst, cpu_cc_N);
451 }
452 
453 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
454 {
455     gen_op_subcc_int(dst, src1, src2, NULL);
456 }
457 
458 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
459 {
460     TCGv t = tcg_temp_new();
461 
462     /* Save the tag bits around modification of dst. */
463     tcg_gen_or_tl(t, src1, src2);
464 
465     gen_op_subcc(dst, src1, src2);
466 
467     /* Incorprate tag bits into icc.V */
468     tcg_gen_andi_tl(t, t, 3);
469     tcg_gen_neg_tl(t, t);
470     tcg_gen_ext32u_tl(t, t);
471     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
472 }
473 
474 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
475 {
476     tcg_gen_sub_tl(dst, src1, src2);
477     tcg_gen_sub_tl(dst, dst, gen_carry32());
478 }
479 
480 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
481 {
482     gen_op_subcc_int(dst, src1, src2, gen_carry32());
483 }
484 
485 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
486 {
487     TCGv zero = tcg_constant_tl(0);
488     TCGv one = tcg_constant_tl(1);
489     TCGv t_src1 = tcg_temp_new();
490     TCGv t_src2 = tcg_temp_new();
491     TCGv t0 = tcg_temp_new();
492 
493     tcg_gen_ext32u_tl(t_src1, src1);
494     tcg_gen_ext32u_tl(t_src2, src2);
495 
496     /*
497      * if (!(env->y & 1))
498      *   src2 = 0;
499      */
500     tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
501 
502     /*
503      * b2 = src1 & 1;
504      * y = (b2 << 31) | (y >> 1);
505      */
506     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
507     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
508 
509     // b1 = N ^ V;
510     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
511 
512     /*
513      * src1 = (b1 << 31) | (src1 >> 1)
514      */
515     tcg_gen_andi_tl(t0, t0, 1u << 31);
516     tcg_gen_shri_tl(t_src1, t_src1, 1);
517     tcg_gen_or_tl(t_src1, t_src1, t0);
518 
519     gen_op_addcc(dst, t_src1, t_src2);
520 }
521 
522 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
523 {
524 #if TARGET_LONG_BITS == 32
525     if (sign_ext) {
526         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
527     } else {
528         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
529     }
530 #else
531     TCGv t0 = tcg_temp_new_i64();
532     TCGv t1 = tcg_temp_new_i64();
533 
534     if (sign_ext) {
535         tcg_gen_ext32s_i64(t0, src1);
536         tcg_gen_ext32s_i64(t1, src2);
537     } else {
538         tcg_gen_ext32u_i64(t0, src1);
539         tcg_gen_ext32u_i64(t1, src2);
540     }
541 
542     tcg_gen_mul_i64(dst, t0, t1);
543     tcg_gen_shri_i64(cpu_y, dst, 32);
544 #endif
545 }
546 
547 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
548 {
549     /* zero-extend truncated operands before multiplication */
550     gen_op_multiply(dst, src1, src2, 0);
551 }
552 
553 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
554 {
555     /* sign-extend truncated operands before multiplication */
556     gen_op_multiply(dst, src1, src2, 1);
557 }
558 
559 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
560 {
561 #ifdef TARGET_SPARC64
562     gen_helper_sdiv(dst, tcg_env, src1, src2);
563     tcg_gen_ext32s_tl(dst, dst);
564 #else
565     TCGv_i64 t64 = tcg_temp_new_i64();
566     gen_helper_sdiv(t64, tcg_env, src1, src2);
567     tcg_gen_trunc_i64_tl(dst, t64);
568 #endif
569 }
570 
571 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
572 {
573     TCGv_i64 t64;
574 
575 #ifdef TARGET_SPARC64
576     t64 = cpu_cc_V;
577 #else
578     t64 = tcg_temp_new_i64();
579 #endif
580 
581     gen_helper_udiv(t64, tcg_env, src1, src2);
582 
583 #ifdef TARGET_SPARC64
584     tcg_gen_ext32u_tl(cpu_cc_N, t64);
585     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
586     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
587     tcg_gen_movi_tl(cpu_icc_C, 0);
588 #else
589     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
590 #endif
591     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
592     tcg_gen_movi_tl(cpu_cc_C, 0);
593     tcg_gen_mov_tl(dst, cpu_cc_N);
594 }
595 
596 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
597 {
598     TCGv_i64 t64;
599 
600 #ifdef TARGET_SPARC64
601     t64 = cpu_cc_V;
602 #else
603     t64 = tcg_temp_new_i64();
604 #endif
605 
606     gen_helper_sdiv(t64, tcg_env, src1, src2);
607 
608 #ifdef TARGET_SPARC64
609     tcg_gen_ext32s_tl(cpu_cc_N, t64);
610     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
611     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
612     tcg_gen_movi_tl(cpu_icc_C, 0);
613 #else
614     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
615 #endif
616     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
617     tcg_gen_movi_tl(cpu_cc_C, 0);
618     tcg_gen_mov_tl(dst, cpu_cc_N);
619 }
620 
621 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
622 {
623     gen_helper_taddcctv(dst, tcg_env, src1, src2);
624 }
625 
626 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
627 {
628     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
629 }
630 
631 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
632 {
633     tcg_gen_ctpop_tl(dst, src2);
634 }
635 
636 #ifndef TARGET_SPARC64
637 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
638 {
639     g_assert_not_reached();
640 }
641 #endif
642 
643 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
644 {
645     gen_helper_array8(dst, src1, src2);
646     tcg_gen_shli_tl(dst, dst, 1);
647 }
648 
649 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
650 {
651     gen_helper_array8(dst, src1, src2);
652     tcg_gen_shli_tl(dst, dst, 2);
653 }
654 
655 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
656 {
657 #ifdef TARGET_SPARC64
658     gen_helper_fpack16(dst, cpu_gsr, src);
659 #else
660     g_assert_not_reached();
661 #endif
662 }
663 
664 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
665 {
666 #ifdef TARGET_SPARC64
667     gen_helper_fpackfix(dst, cpu_gsr, src);
668 #else
669     g_assert_not_reached();
670 #endif
671 }
672 
673 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
674 {
675 #ifdef TARGET_SPARC64
676     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
677 #else
678     g_assert_not_reached();
679 #endif
680 }
681 
682 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
683 {
684 #ifdef TARGET_SPARC64
685     TCGv t1, t2, shift;
686 
687     t1 = tcg_temp_new();
688     t2 = tcg_temp_new();
689     shift = tcg_temp_new();
690 
691     tcg_gen_andi_tl(shift, cpu_gsr, 7);
692     tcg_gen_shli_tl(shift, shift, 3);
693     tcg_gen_shl_tl(t1, s1, shift);
694 
695     /*
696      * A shift of 64 does not produce 0 in TCG.  Divide this into a
697      * shift of (up to 63) followed by a constant shift of 1.
698      */
699     tcg_gen_xori_tl(shift, shift, 63);
700     tcg_gen_shr_tl(t2, s2, shift);
701     tcg_gen_shri_tl(t2, t2, 1);
702 
703     tcg_gen_or_tl(dst, t1, t2);
704 #else
705     g_assert_not_reached();
706 #endif
707 }
708 
709 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
710 {
711 #ifdef TARGET_SPARC64
712     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
713 #else
714     g_assert_not_reached();
715 #endif
716 }
717 
718 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
719 {
720     tcg_gen_ext16s_i32(src2, src2);
721     gen_helper_fmul8x16a(dst, src1, src2);
722 }
723 
724 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
725 {
726     tcg_gen_sari_i32(src2, src2, 16);
727     gen_helper_fmul8x16a(dst, src1, src2);
728 }
729 
730 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
731 {
732     TCGv_i32 t0 = tcg_temp_new_i32();
733     TCGv_i32 t1 = tcg_temp_new_i32();
734     TCGv_i32 t2 = tcg_temp_new_i32();
735 
736     tcg_gen_ext8u_i32(t0, src1);
737     tcg_gen_ext16s_i32(t1, src2);
738     tcg_gen_mul_i32(t0, t0, t1);
739 
740     tcg_gen_extract_i32(t1, src1, 16, 8);
741     tcg_gen_sextract_i32(t2, src2, 16, 16);
742     tcg_gen_mul_i32(t1, t1, t2);
743 
744     tcg_gen_concat_i32_i64(dst, t0, t1);
745 }
746 
747 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
748 {
749     TCGv_i32 t0 = tcg_temp_new_i32();
750     TCGv_i32 t1 = tcg_temp_new_i32();
751     TCGv_i32 t2 = tcg_temp_new_i32();
752 
753     /*
754      * The insn description talks about extracting the upper 8 bits
755      * of the signed 16-bit input rs1, performing the multiply, then
756      * shifting left by 8 bits.  Instead, zap the lower 8 bits of
757      * the rs1 input, which avoids the need for two shifts.
758      */
759     tcg_gen_ext16s_i32(t0, src1);
760     tcg_gen_andi_i32(t0, t0, ~0xff);
761     tcg_gen_ext16s_i32(t1, src2);
762     tcg_gen_mul_i32(t0, t0, t1);
763 
764     tcg_gen_sextract_i32(t1, src1, 16, 16);
765     tcg_gen_andi_i32(t1, t1, ~0xff);
766     tcg_gen_sextract_i32(t2, src2, 16, 16);
767     tcg_gen_mul_i32(t1, t1, t2);
768 
769     tcg_gen_concat_i32_i64(dst, t0, t1);
770 }
771 
772 static void finishing_insn(DisasContext *dc)
773 {
774     /*
775      * From here, there is no future path through an unwinding exception.
776      * If the current insn cannot raise an exception, the computation of
777      * cpu_cond may be able to be elided.
778      */
779     if (dc->cpu_cond_live) {
780         tcg_gen_discard_tl(cpu_cond);
781         dc->cpu_cond_live = false;
782     }
783 }
784 
785 static void gen_generic_branch(DisasContext *dc)
786 {
787     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
788     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
789     TCGv c2 = tcg_constant_tl(dc->jump.c2);
790 
791     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
792 }
793 
794 /* call this function before using the condition register as it may
795    have been set for a jump */
796 static void flush_cond(DisasContext *dc)
797 {
798     if (dc->npc == JUMP_PC) {
799         gen_generic_branch(dc);
800         dc->npc = DYNAMIC_PC_LOOKUP;
801     }
802 }
803 
804 static void save_npc(DisasContext *dc)
805 {
806     if (dc->npc & 3) {
807         switch (dc->npc) {
808         case JUMP_PC:
809             gen_generic_branch(dc);
810             dc->npc = DYNAMIC_PC_LOOKUP;
811             break;
812         case DYNAMIC_PC:
813         case DYNAMIC_PC_LOOKUP:
814             break;
815         default:
816             g_assert_not_reached();
817         }
818     } else {
819         tcg_gen_movi_tl(cpu_npc, dc->npc);
820     }
821 }
822 
823 static void save_state(DisasContext *dc)
824 {
825     tcg_gen_movi_tl(cpu_pc, dc->pc);
826     save_npc(dc);
827 }
828 
829 static void gen_exception(DisasContext *dc, int which)
830 {
831     finishing_insn(dc);
832     save_state(dc);
833     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
834     dc->base.is_jmp = DISAS_NORETURN;
835 }
836 
837 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
838 {
839     DisasDelayException *e = g_new0(DisasDelayException, 1);
840 
841     e->next = dc->delay_excp_list;
842     dc->delay_excp_list = e;
843 
844     e->lab = gen_new_label();
845     e->excp = excp;
846     e->pc = dc->pc;
847     /* Caller must have used flush_cond before branch. */
848     assert(e->npc != JUMP_PC);
849     e->npc = dc->npc;
850 
851     return e->lab;
852 }
853 
854 static TCGLabel *delay_exception(DisasContext *dc, int excp)
855 {
856     return delay_exceptionv(dc, tcg_constant_i32(excp));
857 }
858 
859 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
860 {
861     TCGv t = tcg_temp_new();
862     TCGLabel *lab;
863 
864     tcg_gen_andi_tl(t, addr, mask);
865 
866     flush_cond(dc);
867     lab = delay_exception(dc, TT_UNALIGNED);
868     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
869 }
870 
871 static void gen_mov_pc_npc(DisasContext *dc)
872 {
873     finishing_insn(dc);
874 
875     if (dc->npc & 3) {
876         switch (dc->npc) {
877         case JUMP_PC:
878             gen_generic_branch(dc);
879             tcg_gen_mov_tl(cpu_pc, cpu_npc);
880             dc->pc = DYNAMIC_PC_LOOKUP;
881             break;
882         case DYNAMIC_PC:
883         case DYNAMIC_PC_LOOKUP:
884             tcg_gen_mov_tl(cpu_pc, cpu_npc);
885             dc->pc = dc->npc;
886             break;
887         default:
888             g_assert_not_reached();
889         }
890     } else {
891         dc->pc = dc->npc;
892     }
893 }
894 
895 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
896                         DisasContext *dc)
897 {
898     TCGv t1;
899 
900     cmp->c1 = t1 = tcg_temp_new();
901     cmp->c2 = 0;
902 
903     switch (cond & 7) {
904     case 0x0: /* never */
905         cmp->cond = TCG_COND_NEVER;
906         cmp->c1 = tcg_constant_tl(0);
907         break;
908 
909     case 0x1: /* eq: Z */
910         cmp->cond = TCG_COND_EQ;
911         if (TARGET_LONG_BITS == 32 || xcc) {
912             tcg_gen_mov_tl(t1, cpu_cc_Z);
913         } else {
914             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
915         }
916         break;
917 
918     case 0x2: /* le: Z | (N ^ V) */
919         /*
920          * Simplify:
921          *   cc_Z || (N ^ V) < 0        NE
922          *   cc_Z && !((N ^ V) < 0)     EQ
923          *   cc_Z & ~((N ^ V) >> TLB)   EQ
924          */
925         cmp->cond = TCG_COND_EQ;
926         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
927         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
928         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
929         if (TARGET_LONG_BITS == 64 && !xcc) {
930             tcg_gen_ext32u_tl(t1, t1);
931         }
932         break;
933 
934     case 0x3: /* lt: N ^ V */
935         cmp->cond = TCG_COND_LT;
936         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
937         if (TARGET_LONG_BITS == 64 && !xcc) {
938             tcg_gen_ext32s_tl(t1, t1);
939         }
940         break;
941 
942     case 0x4: /* leu: Z | C */
943         /*
944          * Simplify:
945          *   cc_Z == 0 || cc_C != 0     NE
946          *   cc_Z != 0 && cc_C == 0     EQ
947          *   cc_Z & (cc_C ? 0 : -1)     EQ
948          *   cc_Z & (cc_C - 1)          EQ
949          */
950         cmp->cond = TCG_COND_EQ;
951         if (TARGET_LONG_BITS == 32 || xcc) {
952             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
953             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
954         } else {
955             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
956             tcg_gen_subi_tl(t1, t1, 1);
957             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
958             tcg_gen_ext32u_tl(t1, t1);
959         }
960         break;
961 
962     case 0x5: /* ltu: C */
963         cmp->cond = TCG_COND_NE;
964         if (TARGET_LONG_BITS == 32 || xcc) {
965             tcg_gen_mov_tl(t1, cpu_cc_C);
966         } else {
967             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
968         }
969         break;
970 
971     case 0x6: /* neg: N */
972         cmp->cond = TCG_COND_LT;
973         if (TARGET_LONG_BITS == 32 || xcc) {
974             tcg_gen_mov_tl(t1, cpu_cc_N);
975         } else {
976             tcg_gen_ext32s_tl(t1, cpu_cc_N);
977         }
978         break;
979 
980     case 0x7: /* vs: V */
981         cmp->cond = TCG_COND_LT;
982         if (TARGET_LONG_BITS == 32 || xcc) {
983             tcg_gen_mov_tl(t1, cpu_cc_V);
984         } else {
985             tcg_gen_ext32s_tl(t1, cpu_cc_V);
986         }
987         break;
988     }
989     if (cond & 8) {
990         cmp->cond = tcg_invert_cond(cmp->cond);
991     }
992 }
993 
994 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
995 {
996     TCGv_i32 fcc = cpu_fcc[cc];
997     TCGv_i32 c1 = fcc;
998     int c2 = 0;
999     TCGCond tcond;
1000 
1001     /*
1002      * FCC values:
1003      * 0 =
1004      * 1 <
1005      * 2 >
1006      * 3 unordered
1007      */
1008     switch (cond & 7) {
1009     case 0x0: /* fbn */
1010         tcond = TCG_COND_NEVER;
1011         break;
1012     case 0x1: /* fbne : !0 */
1013         tcond = TCG_COND_NE;
1014         break;
1015     case 0x2: /* fblg : 1 or 2 */
1016         /* fcc in {1,2} - 1 -> fcc in {0,1} */
1017         c1 = tcg_temp_new_i32();
1018         tcg_gen_addi_i32(c1, fcc, -1);
1019         c2 = 1;
1020         tcond = TCG_COND_LEU;
1021         break;
1022     case 0x3: /* fbul : 1 or 3 */
1023         c1 = tcg_temp_new_i32();
1024         tcg_gen_andi_i32(c1, fcc, 1);
1025         tcond = TCG_COND_NE;
1026         break;
1027     case 0x4: /* fbl  : 1 */
1028         c2 = 1;
1029         tcond = TCG_COND_EQ;
1030         break;
1031     case 0x5: /* fbug : 2 or 3 */
1032         c2 = 2;
1033         tcond = TCG_COND_GEU;
1034         break;
1035     case 0x6: /* fbg  : 2 */
1036         c2 = 2;
1037         tcond = TCG_COND_EQ;
1038         break;
1039     case 0x7: /* fbu  : 3 */
1040         c2 = 3;
1041         tcond = TCG_COND_EQ;
1042         break;
1043     }
1044     if (cond & 8) {
1045         tcond = tcg_invert_cond(tcond);
1046     }
1047 
1048     cmp->cond = tcond;
1049     cmp->c2 = c2;
1050     cmp->c1 = tcg_temp_new();
1051     tcg_gen_extu_i32_tl(cmp->c1, c1);
1052 }
1053 
1054 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1055 {
1056     static const TCGCond cond_reg[4] = {
1057         TCG_COND_NEVER,  /* reserved */
1058         TCG_COND_EQ,
1059         TCG_COND_LE,
1060         TCG_COND_LT,
1061     };
1062     TCGCond tcond;
1063 
1064     if ((cond & 3) == 0) {
1065         return false;
1066     }
1067     tcond = cond_reg[cond & 3];
1068     if (cond & 4) {
1069         tcond = tcg_invert_cond(tcond);
1070     }
1071 
1072     cmp->cond = tcond;
1073     cmp->c1 = tcg_temp_new();
1074     cmp->c2 = 0;
1075     tcg_gen_mov_tl(cmp->c1, r_src);
1076     return true;
1077 }
1078 
1079 static void gen_op_clear_ieee_excp_and_FTT(void)
1080 {
1081     tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1082                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1083 }
1084 
1085 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1086 {
1087     gen_op_clear_ieee_excp_and_FTT();
1088     tcg_gen_mov_i32(dst, src);
1089 }
1090 
1091 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1092 {
1093     gen_op_clear_ieee_excp_and_FTT();
1094     tcg_gen_xori_i32(dst, src, 1u << 31);
1095 }
1096 
1097 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1098 {
1099     gen_op_clear_ieee_excp_and_FTT();
1100     tcg_gen_andi_i32(dst, src, ~(1u << 31));
1101 }
1102 
1103 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1104 {
1105     gen_op_clear_ieee_excp_and_FTT();
1106     tcg_gen_mov_i64(dst, src);
1107 }
1108 
1109 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1110 {
1111     gen_op_clear_ieee_excp_and_FTT();
1112     tcg_gen_xori_i64(dst, src, 1ull << 63);
1113 }
1114 
1115 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1116 {
1117     gen_op_clear_ieee_excp_and_FTT();
1118     tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1119 }
1120 
1121 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1122 {
1123     TCGv_i64 l = tcg_temp_new_i64();
1124     TCGv_i64 h = tcg_temp_new_i64();
1125 
1126     tcg_gen_extr_i128_i64(l, h, src);
1127     tcg_gen_xori_i64(h, h, 1ull << 63);
1128     tcg_gen_concat_i64_i128(dst, l, h);
1129 }
1130 
1131 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1132 {
1133     TCGv_i64 l = tcg_temp_new_i64();
1134     TCGv_i64 h = tcg_temp_new_i64();
1135 
1136     tcg_gen_extr_i128_i64(l, h, src);
1137     tcg_gen_andi_i64(h, h, ~(1ull << 63));
1138     tcg_gen_concat_i64_i128(dst, l, h);
1139 }
1140 
1141 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1142 {
1143     /*
1144      * CEXC is only set when succesfully completing an FPop,
1145      * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1146      * Thus we can simply store FTT into this field.
1147      */
1148     tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1149                    offsetof(CPUSPARCState, fsr_cexc_ftt));
1150     gen_exception(dc, TT_FP_EXCP);
1151 }
1152 
1153 static int gen_trap_ifnofpu(DisasContext *dc)
1154 {
1155 #if !defined(CONFIG_USER_ONLY)
1156     if (!dc->fpu_enabled) {
1157         gen_exception(dc, TT_NFPU_INSN);
1158         return 1;
1159     }
1160 #endif
1161     return 0;
1162 }
1163 
1164 /* asi moves */
1165 typedef enum {
1166     GET_ASI_HELPER,
1167     GET_ASI_EXCP,
1168     GET_ASI_DIRECT,
1169     GET_ASI_DTWINX,
1170     GET_ASI_CODE,
1171     GET_ASI_BLOCK,
1172     GET_ASI_SHORT,
1173     GET_ASI_BCOPY,
1174     GET_ASI_BFILL,
1175 } ASIType;
1176 
1177 typedef struct {
1178     ASIType type;
1179     int asi;
1180     int mem_idx;
1181     MemOp memop;
1182 } DisasASI;
1183 
1184 /*
1185  * Build DisasASI.
1186  * For asi == -1, treat as non-asi.
1187  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1188  */
1189 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1190 {
1191     ASIType type = GET_ASI_HELPER;
1192     int mem_idx = dc->mem_idx;
1193 
1194     if (asi == -1) {
1195         /* Artificial "non-asi" case. */
1196         type = GET_ASI_DIRECT;
1197         goto done;
1198     }
1199 
1200 #ifndef TARGET_SPARC64
1201     /* Before v9, all asis are immediate and privileged.  */
1202     if (asi < 0) {
1203         gen_exception(dc, TT_ILL_INSN);
1204         type = GET_ASI_EXCP;
1205     } else if (supervisor(dc)
1206                /* Note that LEON accepts ASI_USERDATA in user mode, for
1207                   use with CASA.  Also note that previous versions of
1208                   QEMU allowed (and old versions of gcc emitted) ASI_P
1209                   for LEON, which is incorrect.  */
1210                || (asi == ASI_USERDATA
1211                    && (dc->def->features & CPU_FEATURE_CASA))) {
1212         switch (asi) {
1213         case ASI_USERDATA:    /* User data access */
1214             mem_idx = MMU_USER_IDX;
1215             type = GET_ASI_DIRECT;
1216             break;
1217         case ASI_KERNELDATA:  /* Supervisor data access */
1218             mem_idx = MMU_KERNEL_IDX;
1219             type = GET_ASI_DIRECT;
1220             break;
1221         case ASI_USERTXT:     /* User text access */
1222             mem_idx = MMU_USER_IDX;
1223             type = GET_ASI_CODE;
1224             break;
1225         case ASI_KERNELTXT:   /* Supervisor text access */
1226             mem_idx = MMU_KERNEL_IDX;
1227             type = GET_ASI_CODE;
1228             break;
1229         case ASI_M_BYPASS:    /* MMU passthrough */
1230         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1231             mem_idx = MMU_PHYS_IDX;
1232             type = GET_ASI_DIRECT;
1233             break;
1234         case ASI_M_BCOPY: /* Block copy, sta access */
1235             mem_idx = MMU_KERNEL_IDX;
1236             type = GET_ASI_BCOPY;
1237             break;
1238         case ASI_M_BFILL: /* Block fill, stda access */
1239             mem_idx = MMU_KERNEL_IDX;
1240             type = GET_ASI_BFILL;
1241             break;
1242         }
1243 
1244         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1245          * permissions check in get_physical_address(..).
1246          */
1247         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1248     } else {
1249         gen_exception(dc, TT_PRIV_INSN);
1250         type = GET_ASI_EXCP;
1251     }
1252 #else
1253     if (asi < 0) {
1254         asi = dc->asi;
1255     }
1256     /* With v9, all asis below 0x80 are privileged.  */
1257     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1258        down that bit into DisasContext.  For the moment that's ok,
1259        since the direct implementations below doesn't have any ASIs
1260        in the restricted [0x30, 0x7f] range, and the check will be
1261        done properly in the helper.  */
1262     if (!supervisor(dc) && asi < 0x80) {
1263         gen_exception(dc, TT_PRIV_ACT);
1264         type = GET_ASI_EXCP;
1265     } else {
1266         switch (asi) {
1267         case ASI_REAL:      /* Bypass */
1268         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1269         case ASI_REAL_L:    /* Bypass LE */
1270         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1271         case ASI_TWINX_REAL:   /* Real address, twinx */
1272         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1273         case ASI_QUAD_LDD_PHYS:
1274         case ASI_QUAD_LDD_PHYS_L:
1275             mem_idx = MMU_PHYS_IDX;
1276             break;
1277         case ASI_N:  /* Nucleus */
1278         case ASI_NL: /* Nucleus LE */
1279         case ASI_TWINX_N:
1280         case ASI_TWINX_NL:
1281         case ASI_NUCLEUS_QUAD_LDD:
1282         case ASI_NUCLEUS_QUAD_LDD_L:
1283             if (hypervisor(dc)) {
1284                 mem_idx = MMU_PHYS_IDX;
1285             } else {
1286                 mem_idx = MMU_NUCLEUS_IDX;
1287             }
1288             break;
1289         case ASI_AIUP:  /* As if user primary */
1290         case ASI_AIUPL: /* As if user primary LE */
1291         case ASI_TWINX_AIUP:
1292         case ASI_TWINX_AIUP_L:
1293         case ASI_BLK_AIUP_4V:
1294         case ASI_BLK_AIUP_L_4V:
1295         case ASI_BLK_AIUP:
1296         case ASI_BLK_AIUPL:
1297             mem_idx = MMU_USER_IDX;
1298             break;
1299         case ASI_AIUS:  /* As if user secondary */
1300         case ASI_AIUSL: /* As if user secondary LE */
1301         case ASI_TWINX_AIUS:
1302         case ASI_TWINX_AIUS_L:
1303         case ASI_BLK_AIUS_4V:
1304         case ASI_BLK_AIUS_L_4V:
1305         case ASI_BLK_AIUS:
1306         case ASI_BLK_AIUSL:
1307             mem_idx = MMU_USER_SECONDARY_IDX;
1308             break;
1309         case ASI_S:  /* Secondary */
1310         case ASI_SL: /* Secondary LE */
1311         case ASI_TWINX_S:
1312         case ASI_TWINX_SL:
1313         case ASI_BLK_COMMIT_S:
1314         case ASI_BLK_S:
1315         case ASI_BLK_SL:
1316         case ASI_FL8_S:
1317         case ASI_FL8_SL:
1318         case ASI_FL16_S:
1319         case ASI_FL16_SL:
1320             if (mem_idx == MMU_USER_IDX) {
1321                 mem_idx = MMU_USER_SECONDARY_IDX;
1322             } else if (mem_idx == MMU_KERNEL_IDX) {
1323                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1324             }
1325             break;
1326         case ASI_P:  /* Primary */
1327         case ASI_PL: /* Primary LE */
1328         case ASI_TWINX_P:
1329         case ASI_TWINX_PL:
1330         case ASI_BLK_COMMIT_P:
1331         case ASI_BLK_P:
1332         case ASI_BLK_PL:
1333         case ASI_FL8_P:
1334         case ASI_FL8_PL:
1335         case ASI_FL16_P:
1336         case ASI_FL16_PL:
1337             break;
1338         }
1339         switch (asi) {
1340         case ASI_REAL:
1341         case ASI_REAL_IO:
1342         case ASI_REAL_L:
1343         case ASI_REAL_IO_L:
1344         case ASI_N:
1345         case ASI_NL:
1346         case ASI_AIUP:
1347         case ASI_AIUPL:
1348         case ASI_AIUS:
1349         case ASI_AIUSL:
1350         case ASI_S:
1351         case ASI_SL:
1352         case ASI_P:
1353         case ASI_PL:
1354             type = GET_ASI_DIRECT;
1355             break;
1356         case ASI_TWINX_REAL:
1357         case ASI_TWINX_REAL_L:
1358         case ASI_TWINX_N:
1359         case ASI_TWINX_NL:
1360         case ASI_TWINX_AIUP:
1361         case ASI_TWINX_AIUP_L:
1362         case ASI_TWINX_AIUS:
1363         case ASI_TWINX_AIUS_L:
1364         case ASI_TWINX_P:
1365         case ASI_TWINX_PL:
1366         case ASI_TWINX_S:
1367         case ASI_TWINX_SL:
1368         case ASI_QUAD_LDD_PHYS:
1369         case ASI_QUAD_LDD_PHYS_L:
1370         case ASI_NUCLEUS_QUAD_LDD:
1371         case ASI_NUCLEUS_QUAD_LDD_L:
1372             type = GET_ASI_DTWINX;
1373             break;
1374         case ASI_BLK_COMMIT_P:
1375         case ASI_BLK_COMMIT_S:
1376         case ASI_BLK_AIUP_4V:
1377         case ASI_BLK_AIUP_L_4V:
1378         case ASI_BLK_AIUP:
1379         case ASI_BLK_AIUPL:
1380         case ASI_BLK_AIUS_4V:
1381         case ASI_BLK_AIUS_L_4V:
1382         case ASI_BLK_AIUS:
1383         case ASI_BLK_AIUSL:
1384         case ASI_BLK_S:
1385         case ASI_BLK_SL:
1386         case ASI_BLK_P:
1387         case ASI_BLK_PL:
1388             type = GET_ASI_BLOCK;
1389             break;
1390         case ASI_FL8_S:
1391         case ASI_FL8_SL:
1392         case ASI_FL8_P:
1393         case ASI_FL8_PL:
1394             memop = MO_UB;
1395             type = GET_ASI_SHORT;
1396             break;
1397         case ASI_FL16_S:
1398         case ASI_FL16_SL:
1399         case ASI_FL16_P:
1400         case ASI_FL16_PL:
1401             memop = MO_TEUW;
1402             type = GET_ASI_SHORT;
1403             break;
1404         }
1405         /* The little-endian asis all have bit 3 set.  */
1406         if (asi & 8) {
1407             memop ^= MO_BSWAP;
1408         }
1409     }
1410 #endif
1411 
1412  done:
1413     return (DisasASI){ type, asi, mem_idx, memop };
1414 }
1415 
1416 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1417 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1418                               TCGv_i32 asi, TCGv_i32 mop)
1419 {
1420     g_assert_not_reached();
1421 }
1422 
1423 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1424                               TCGv_i32 asi, TCGv_i32 mop)
1425 {
1426     g_assert_not_reached();
1427 }
1428 #endif
1429 
1430 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1431 {
1432     switch (da->type) {
1433     case GET_ASI_EXCP:
1434         break;
1435     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1436         gen_exception(dc, TT_ILL_INSN);
1437         break;
1438     case GET_ASI_DIRECT:
1439         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1440         break;
1441 
1442     case GET_ASI_CODE:
1443 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1444         {
1445             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1446             TCGv_i64 t64 = tcg_temp_new_i64();
1447 
1448             gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1449             tcg_gen_trunc_i64_tl(dst, t64);
1450         }
1451         break;
1452 #else
1453         g_assert_not_reached();
1454 #endif
1455 
1456     default:
1457         {
1458             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1459             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1460 
1461             save_state(dc);
1462 #ifdef TARGET_SPARC64
1463             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1464 #else
1465             {
1466                 TCGv_i64 t64 = tcg_temp_new_i64();
1467                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1468                 tcg_gen_trunc_i64_tl(dst, t64);
1469             }
1470 #endif
1471         }
1472         break;
1473     }
1474 }
1475 
1476 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1477 {
1478     switch (da->type) {
1479     case GET_ASI_EXCP:
1480         break;
1481 
1482     case GET_ASI_DTWINX: /* Reserved for stda.  */
1483         if (TARGET_LONG_BITS == 32) {
1484             gen_exception(dc, TT_ILL_INSN);
1485             break;
1486         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1487             /* Pre OpenSPARC CPUs don't have these */
1488             gen_exception(dc, TT_ILL_INSN);
1489             break;
1490         }
1491         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1492         /* fall through */
1493 
1494     case GET_ASI_DIRECT:
1495         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1496         break;
1497 
1498     case GET_ASI_BCOPY:
1499         assert(TARGET_LONG_BITS == 32);
1500         /*
1501          * Copy 32 bytes from the address in SRC to ADDR.
1502          *
1503          * From Ross RT625 hyperSPARC manual, section 4.6:
1504          * "Block Copy and Block Fill will work only on cache line boundaries."
1505          *
1506          * It does not specify if an unaliged address is truncated or trapped.
1507          * Previous qemu behaviour was to truncate to 4 byte alignment, which
1508          * is obviously wrong.  The only place I can see this used is in the
1509          * Linux kernel which begins with page alignment, advancing by 32,
1510          * so is always aligned.  Assume truncation as the simpler option.
1511          *
1512          * Since the loads and stores are paired, allow the copy to happen
1513          * in the host endianness.  The copy need not be atomic.
1514          */
1515         {
1516             MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1517             TCGv saddr = tcg_temp_new();
1518             TCGv daddr = tcg_temp_new();
1519             TCGv_i128 tmp = tcg_temp_new_i128();
1520 
1521             tcg_gen_andi_tl(saddr, src, -32);
1522             tcg_gen_andi_tl(daddr, addr, -32);
1523             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1524             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1525             tcg_gen_addi_tl(saddr, saddr, 16);
1526             tcg_gen_addi_tl(daddr, daddr, 16);
1527             tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1528             tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1529         }
1530         break;
1531 
1532     default:
1533         {
1534             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1535             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1536 
1537             save_state(dc);
1538 #ifdef TARGET_SPARC64
1539             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1540 #else
1541             {
1542                 TCGv_i64 t64 = tcg_temp_new_i64();
1543                 tcg_gen_extu_tl_i64(t64, src);
1544                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1545             }
1546 #endif
1547 
1548             /* A write to a TLB register may alter page maps.  End the TB. */
1549             dc->npc = DYNAMIC_PC;
1550         }
1551         break;
1552     }
1553 }
1554 
1555 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1556                          TCGv dst, TCGv src, TCGv addr)
1557 {
1558     switch (da->type) {
1559     case GET_ASI_EXCP:
1560         break;
1561     case GET_ASI_DIRECT:
1562         tcg_gen_atomic_xchg_tl(dst, addr, src,
1563                                da->mem_idx, da->memop | MO_ALIGN);
1564         break;
1565     default:
1566         /* ??? Should be DAE_invalid_asi.  */
1567         gen_exception(dc, TT_DATA_ACCESS);
1568         break;
1569     }
1570 }
1571 
1572 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1573                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1574 {
1575     switch (da->type) {
1576     case GET_ASI_EXCP:
1577         return;
1578     case GET_ASI_DIRECT:
1579         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1580                                   da->mem_idx, da->memop | MO_ALIGN);
1581         break;
1582     default:
1583         /* ??? Should be DAE_invalid_asi.  */
1584         gen_exception(dc, TT_DATA_ACCESS);
1585         break;
1586     }
1587 }
1588 
1589 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1590 {
1591     switch (da->type) {
1592     case GET_ASI_EXCP:
1593         break;
1594     case GET_ASI_DIRECT:
1595         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1596                                da->mem_idx, MO_UB);
1597         break;
1598     default:
1599         /* ??? In theory, this should be raise DAE_invalid_asi.
1600            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1601         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1602             gen_helper_exit_atomic(tcg_env);
1603         } else {
1604             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1605             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1606             TCGv_i64 s64, t64;
1607 
1608             save_state(dc);
1609             t64 = tcg_temp_new_i64();
1610             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1611 
1612             s64 = tcg_constant_i64(0xff);
1613             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1614 
1615             tcg_gen_trunc_i64_tl(dst, t64);
1616 
1617             /* End the TB.  */
1618             dc->npc = DYNAMIC_PC;
1619         }
1620         break;
1621     }
1622 }
1623 
1624 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1625                         TCGv addr, int rd)
1626 {
1627     MemOp memop = da->memop;
1628     MemOp size = memop & MO_SIZE;
1629     TCGv_i32 d32;
1630     TCGv_i64 d64;
1631     TCGv addr_tmp;
1632 
1633     /* TODO: Use 128-bit load/store below. */
1634     if (size == MO_128) {
1635         memop = (memop & ~MO_SIZE) | MO_64;
1636     }
1637 
1638     switch (da->type) {
1639     case GET_ASI_EXCP:
1640         break;
1641 
1642     case GET_ASI_DIRECT:
1643         memop |= MO_ALIGN_4;
1644         switch (size) {
1645         case MO_32:
1646             d32 = tcg_temp_new_i32();
1647             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1648             gen_store_fpr_F(dc, rd, d32);
1649             break;
1650 
1651         case MO_64:
1652             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1653             break;
1654 
1655         case MO_128:
1656             d64 = tcg_temp_new_i64();
1657             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1658             addr_tmp = tcg_temp_new();
1659             tcg_gen_addi_tl(addr_tmp, addr, 8);
1660             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1661             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1662             break;
1663         default:
1664             g_assert_not_reached();
1665         }
1666         break;
1667 
1668     case GET_ASI_BLOCK:
1669         /* Valid for lddfa on aligned registers only.  */
1670         if (orig_size == MO_64 && (rd & 7) == 0) {
1671             /* The first operation checks required alignment.  */
1672             addr_tmp = tcg_temp_new();
1673             for (int i = 0; ; ++i) {
1674                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1675                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1676                 if (i == 7) {
1677                     break;
1678                 }
1679                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1680                 addr = addr_tmp;
1681             }
1682         } else {
1683             gen_exception(dc, TT_ILL_INSN);
1684         }
1685         break;
1686 
1687     case GET_ASI_SHORT:
1688         /* Valid for lddfa only.  */
1689         if (orig_size == MO_64) {
1690             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1691                                 memop | MO_ALIGN);
1692         } else {
1693             gen_exception(dc, TT_ILL_INSN);
1694         }
1695         break;
1696 
1697     default:
1698         {
1699             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1700             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1701 
1702             save_state(dc);
1703             /* According to the table in the UA2011 manual, the only
1704                other asis that are valid for ldfa/lddfa/ldqfa are
1705                the NO_FAULT asis.  We still need a helper for these,
1706                but we can just use the integer asi helper for them.  */
1707             switch (size) {
1708             case MO_32:
1709                 d64 = tcg_temp_new_i64();
1710                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1711                 d32 = tcg_temp_new_i32();
1712                 tcg_gen_extrl_i64_i32(d32, d64);
1713                 gen_store_fpr_F(dc, rd, d32);
1714                 break;
1715             case MO_64:
1716                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1717                                   r_asi, r_mop);
1718                 break;
1719             case MO_128:
1720                 d64 = tcg_temp_new_i64();
1721                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1722                 addr_tmp = tcg_temp_new();
1723                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1724                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1725                                   r_asi, r_mop);
1726                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1727                 break;
1728             default:
1729                 g_assert_not_reached();
1730             }
1731         }
1732         break;
1733     }
1734 }
1735 
1736 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1737                         TCGv addr, int rd)
1738 {
1739     MemOp memop = da->memop;
1740     MemOp size = memop & MO_SIZE;
1741     TCGv_i32 d32;
1742     TCGv addr_tmp;
1743 
1744     /* TODO: Use 128-bit load/store below. */
1745     if (size == MO_128) {
1746         memop = (memop & ~MO_SIZE) | MO_64;
1747     }
1748 
1749     switch (da->type) {
1750     case GET_ASI_EXCP:
1751         break;
1752 
1753     case GET_ASI_DIRECT:
1754         memop |= MO_ALIGN_4;
1755         switch (size) {
1756         case MO_32:
1757             d32 = gen_load_fpr_F(dc, rd);
1758             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1759             break;
1760         case MO_64:
1761             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1762                                 memop | MO_ALIGN_4);
1763             break;
1764         case MO_128:
1765             /* Only 4-byte alignment required.  However, it is legal for the
1766                cpu to signal the alignment fault, and the OS trap handler is
1767                required to fix it up.  Requiring 16-byte alignment here avoids
1768                having to probe the second page before performing the first
1769                write.  */
1770             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1771                                 memop | MO_ALIGN_16);
1772             addr_tmp = tcg_temp_new();
1773             tcg_gen_addi_tl(addr_tmp, addr, 8);
1774             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1775             break;
1776         default:
1777             g_assert_not_reached();
1778         }
1779         break;
1780 
1781     case GET_ASI_BLOCK:
1782         /* Valid for stdfa on aligned registers only.  */
1783         if (orig_size == MO_64 && (rd & 7) == 0) {
1784             /* The first operation checks required alignment.  */
1785             addr_tmp = tcg_temp_new();
1786             for (int i = 0; ; ++i) {
1787                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1788                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1789                 if (i == 7) {
1790                     break;
1791                 }
1792                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1793                 addr = addr_tmp;
1794             }
1795         } else {
1796             gen_exception(dc, TT_ILL_INSN);
1797         }
1798         break;
1799 
1800     case GET_ASI_SHORT:
1801         /* Valid for stdfa only.  */
1802         if (orig_size == MO_64) {
1803             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1804                                 memop | MO_ALIGN);
1805         } else {
1806             gen_exception(dc, TT_ILL_INSN);
1807         }
1808         break;
1809 
1810     default:
1811         /* According to the table in the UA2011 manual, the only
1812            other asis that are valid for ldfa/lddfa/ldqfa are
1813            the PST* asis, which aren't currently handled.  */
1814         gen_exception(dc, TT_ILL_INSN);
1815         break;
1816     }
1817 }
1818 
1819 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1820 {
1821     TCGv hi = gen_dest_gpr(dc, rd);
1822     TCGv lo = gen_dest_gpr(dc, rd + 1);
1823 
1824     switch (da->type) {
1825     case GET_ASI_EXCP:
1826         return;
1827 
1828     case GET_ASI_DTWINX:
1829 #ifdef TARGET_SPARC64
1830         {
1831             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1832             TCGv_i128 t = tcg_temp_new_i128();
1833 
1834             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
1835             /*
1836              * Note that LE twinx acts as if each 64-bit register result is
1837              * byte swapped.  We perform one 128-bit LE load, so must swap
1838              * the order of the writebacks.
1839              */
1840             if ((mop & MO_BSWAP) == MO_TE) {
1841                 tcg_gen_extr_i128_i64(lo, hi, t);
1842             } else {
1843                 tcg_gen_extr_i128_i64(hi, lo, t);
1844             }
1845         }
1846         break;
1847 #else
1848         g_assert_not_reached();
1849 #endif
1850 
1851     case GET_ASI_DIRECT:
1852         {
1853             TCGv_i64 tmp = tcg_temp_new_i64();
1854 
1855             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
1856 
1857             /* Note that LE ldda acts as if each 32-bit register
1858                result is byte swapped.  Having just performed one
1859                64-bit bswap, we need now to swap the writebacks.  */
1860             if ((da->memop & MO_BSWAP) == MO_TE) {
1861                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1862             } else {
1863                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1864             }
1865         }
1866         break;
1867 
1868     case GET_ASI_CODE:
1869 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1870         {
1871             MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1872             TCGv_i64 tmp = tcg_temp_new_i64();
1873 
1874             gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
1875 
1876             /* See above.  */
1877             if ((da->memop & MO_BSWAP) == MO_TE) {
1878                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1879             } else {
1880                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1881             }
1882         }
1883         break;
1884 #else
1885         g_assert_not_reached();
1886 #endif
1887 
1888     default:
1889         /* ??? In theory we've handled all of the ASIs that are valid
1890            for ldda, and this should raise DAE_invalid_asi.  However,
1891            real hardware allows others.  This can be seen with e.g.
1892            FreeBSD 10.3 wrt ASI_IC_TAG.  */
1893         {
1894             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1895             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1896             TCGv_i64 tmp = tcg_temp_new_i64();
1897 
1898             save_state(dc);
1899             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
1900 
1901             /* See above.  */
1902             if ((da->memop & MO_BSWAP) == MO_TE) {
1903                 tcg_gen_extr_i64_tl(lo, hi, tmp);
1904             } else {
1905                 tcg_gen_extr_i64_tl(hi, lo, tmp);
1906             }
1907         }
1908         break;
1909     }
1910 
1911     gen_store_gpr(dc, rd, hi);
1912     gen_store_gpr(dc, rd + 1, lo);
1913 }
1914 
1915 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
1916 {
1917     TCGv hi = gen_load_gpr(dc, rd);
1918     TCGv lo = gen_load_gpr(dc, rd + 1);
1919 
1920     switch (da->type) {
1921     case GET_ASI_EXCP:
1922         break;
1923 
1924     case GET_ASI_DTWINX:
1925 #ifdef TARGET_SPARC64
1926         {
1927             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
1928             TCGv_i128 t = tcg_temp_new_i128();
1929 
1930             /*
1931              * Note that LE twinx acts as if each 64-bit register result is
1932              * byte swapped.  We perform one 128-bit LE store, so must swap
1933              * the order of the construction.
1934              */
1935             if ((mop & MO_BSWAP) == MO_TE) {
1936                 tcg_gen_concat_i64_i128(t, lo, hi);
1937             } else {
1938                 tcg_gen_concat_i64_i128(t, hi, lo);
1939             }
1940             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
1941         }
1942         break;
1943 #else
1944         g_assert_not_reached();
1945 #endif
1946 
1947     case GET_ASI_DIRECT:
1948         {
1949             TCGv_i64 t64 = tcg_temp_new_i64();
1950 
1951             /* Note that LE stda acts as if each 32-bit register result is
1952                byte swapped.  We will perform one 64-bit LE store, so now
1953                we must swap the order of the construction.  */
1954             if ((da->memop & MO_BSWAP) == MO_TE) {
1955                 tcg_gen_concat_tl_i64(t64, lo, hi);
1956             } else {
1957                 tcg_gen_concat_tl_i64(t64, hi, lo);
1958             }
1959             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
1960         }
1961         break;
1962 
1963     case GET_ASI_BFILL:
1964         assert(TARGET_LONG_BITS == 32);
1965         /*
1966          * Store 32 bytes of [rd:rd+1] to ADDR.
1967          * See comments for GET_ASI_COPY above.
1968          */
1969         {
1970             MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
1971             TCGv_i64 t8 = tcg_temp_new_i64();
1972             TCGv_i128 t16 = tcg_temp_new_i128();
1973             TCGv daddr = tcg_temp_new();
1974 
1975             tcg_gen_concat_tl_i64(t8, lo, hi);
1976             tcg_gen_concat_i64_i128(t16, t8, t8);
1977             tcg_gen_andi_tl(daddr, addr, -32);
1978             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1979             tcg_gen_addi_tl(daddr, daddr, 16);
1980             tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
1981         }
1982         break;
1983 
1984     default:
1985         /* ??? In theory we've handled all of the ASIs that are valid
1986            for stda, and this should raise DAE_invalid_asi.  */
1987         {
1988             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1989             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
1990             TCGv_i64 t64 = tcg_temp_new_i64();
1991 
1992             /* See above.  */
1993             if ((da->memop & MO_BSWAP) == MO_TE) {
1994                 tcg_gen_concat_tl_i64(t64, lo, hi);
1995             } else {
1996                 tcg_gen_concat_tl_i64(t64, hi, lo);
1997             }
1998 
1999             save_state(dc);
2000             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2001         }
2002         break;
2003     }
2004 }
2005 
2006 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2007 {
2008 #ifdef TARGET_SPARC64
2009     TCGv_i32 c32, zero, dst, s1, s2;
2010     TCGv_i64 c64 = tcg_temp_new_i64();
2011 
2012     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2013        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2014        the later.  */
2015     c32 = tcg_temp_new_i32();
2016     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2017     tcg_gen_extrl_i64_i32(c32, c64);
2018 
2019     s1 = gen_load_fpr_F(dc, rs);
2020     s2 = gen_load_fpr_F(dc, rd);
2021     dst = tcg_temp_new_i32();
2022     zero = tcg_constant_i32(0);
2023 
2024     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2025 
2026     gen_store_fpr_F(dc, rd, dst);
2027 #else
2028     qemu_build_not_reached();
2029 #endif
2030 }
2031 
2032 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2033 {
2034 #ifdef TARGET_SPARC64
2035     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2036     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2037                         gen_load_fpr_D(dc, rs),
2038                         gen_load_fpr_D(dc, rd));
2039     gen_store_fpr_D(dc, rd, dst);
2040 #else
2041     qemu_build_not_reached();
2042 #endif
2043 }
2044 
2045 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2046 {
2047 #ifdef TARGET_SPARC64
2048     int qd = QFPREG(rd);
2049     int qs = QFPREG(rs);
2050     TCGv c2 = tcg_constant_tl(cmp->c2);
2051 
2052     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2053                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2054     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2055                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2056 
2057     gen_update_fprs_dirty(dc, qd);
2058 #else
2059     qemu_build_not_reached();
2060 #endif
2061 }
2062 
2063 #ifdef TARGET_SPARC64
2064 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2065 {
2066     TCGv_i32 r_tl = tcg_temp_new_i32();
2067 
2068     /* load env->tl into r_tl */
2069     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2070 
2071     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2072     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2073 
2074     /* calculate offset to current trap state from env->ts, reuse r_tl */
2075     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2076     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2077 
2078     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2079     {
2080         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2081         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2082         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2083     }
2084 }
2085 #endif
2086 
2087 static int extract_dfpreg(DisasContext *dc, int x)
2088 {
2089     return DFPREG(x);
2090 }
2091 
2092 static int extract_qfpreg(DisasContext *dc, int x)
2093 {
2094     return QFPREG(x);
2095 }
2096 
2097 /* Include the auto-generated decoder.  */
2098 #include "decode-insns.c.inc"
2099 
2100 #define TRANS(NAME, AVAIL, FUNC, ...) \
2101     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2102     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2103 
2104 #define avail_ALL(C)      true
2105 #ifdef TARGET_SPARC64
2106 # define avail_32(C)      false
2107 # define avail_ASR17(C)   false
2108 # define avail_CASA(C)    true
2109 # define avail_DIV(C)     true
2110 # define avail_MUL(C)     true
2111 # define avail_POWERDOWN(C) false
2112 # define avail_64(C)      true
2113 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2114 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2115 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2116 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2117 #else
2118 # define avail_32(C)      true
2119 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2120 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2121 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2122 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2123 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2124 # define avail_64(C)      false
2125 # define avail_GL(C)      false
2126 # define avail_HYPV(C)    false
2127 # define avail_VIS1(C)    false
2128 # define avail_VIS2(C)    false
2129 #endif
2130 
2131 /* Default case for non jump instructions. */
2132 static bool advance_pc(DisasContext *dc)
2133 {
2134     TCGLabel *l1;
2135 
2136     finishing_insn(dc);
2137 
2138     if (dc->npc & 3) {
2139         switch (dc->npc) {
2140         case DYNAMIC_PC:
2141         case DYNAMIC_PC_LOOKUP:
2142             dc->pc = dc->npc;
2143             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2144             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2145             break;
2146 
2147         case JUMP_PC:
2148             /* we can do a static jump */
2149             l1 = gen_new_label();
2150             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2151 
2152             /* jump not taken */
2153             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2154 
2155             /* jump taken */
2156             gen_set_label(l1);
2157             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2158 
2159             dc->base.is_jmp = DISAS_NORETURN;
2160             break;
2161 
2162         default:
2163             g_assert_not_reached();
2164         }
2165     } else {
2166         dc->pc = dc->npc;
2167         dc->npc = dc->npc + 4;
2168     }
2169     return true;
2170 }
2171 
2172 /*
2173  * Major opcodes 00 and 01 -- branches, call, and sethi
2174  */
2175 
2176 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2177                               bool annul, int disp)
2178 {
2179     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2180     target_ulong npc;
2181 
2182     finishing_insn(dc);
2183 
2184     if (cmp->cond == TCG_COND_ALWAYS) {
2185         if (annul) {
2186             dc->pc = dest;
2187             dc->npc = dest + 4;
2188         } else {
2189             gen_mov_pc_npc(dc);
2190             dc->npc = dest;
2191         }
2192         return true;
2193     }
2194 
2195     if (cmp->cond == TCG_COND_NEVER) {
2196         npc = dc->npc;
2197         if (npc & 3) {
2198             gen_mov_pc_npc(dc);
2199             if (annul) {
2200                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2201             }
2202             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2203         } else {
2204             dc->pc = npc + (annul ? 4 : 0);
2205             dc->npc = dc->pc + 4;
2206         }
2207         return true;
2208     }
2209 
2210     flush_cond(dc);
2211     npc = dc->npc;
2212 
2213     if (annul) {
2214         TCGLabel *l1 = gen_new_label();
2215 
2216         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2217         gen_goto_tb(dc, 0, npc, dest);
2218         gen_set_label(l1);
2219         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2220 
2221         dc->base.is_jmp = DISAS_NORETURN;
2222     } else {
2223         if (npc & 3) {
2224             switch (npc) {
2225             case DYNAMIC_PC:
2226             case DYNAMIC_PC_LOOKUP:
2227                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2228                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2229                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2230                                    cmp->c1, tcg_constant_tl(cmp->c2),
2231                                    tcg_constant_tl(dest), cpu_npc);
2232                 dc->pc = npc;
2233                 break;
2234             default:
2235                 g_assert_not_reached();
2236             }
2237         } else {
2238             dc->pc = npc;
2239             dc->npc = JUMP_PC;
2240             dc->jump = *cmp;
2241             dc->jump_pc[0] = dest;
2242             dc->jump_pc[1] = npc + 4;
2243 
2244             /* The condition for cpu_cond is always NE -- normalize. */
2245             if (cmp->cond == TCG_COND_NE) {
2246                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2247             } else {
2248                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2249             }
2250             dc->cpu_cond_live = true;
2251         }
2252     }
2253     return true;
2254 }
2255 
2256 static bool raise_priv(DisasContext *dc)
2257 {
2258     gen_exception(dc, TT_PRIV_INSN);
2259     return true;
2260 }
2261 
2262 static bool raise_unimpfpop(DisasContext *dc)
2263 {
2264     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2265     return true;
2266 }
2267 
2268 static bool gen_trap_float128(DisasContext *dc)
2269 {
2270     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2271         return false;
2272     }
2273     return raise_unimpfpop(dc);
2274 }
2275 
2276 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2277 {
2278     DisasCompare cmp;
2279 
2280     gen_compare(&cmp, a->cc, a->cond, dc);
2281     return advance_jump_cond(dc, &cmp, a->a, a->i);
2282 }
2283 
2284 TRANS(Bicc, ALL, do_bpcc, a)
2285 TRANS(BPcc,  64, do_bpcc, a)
2286 
2287 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2288 {
2289     DisasCompare cmp;
2290 
2291     if (gen_trap_ifnofpu(dc)) {
2292         return true;
2293     }
2294     gen_fcompare(&cmp, a->cc, a->cond);
2295     return advance_jump_cond(dc, &cmp, a->a, a->i);
2296 }
2297 
2298 TRANS(FBPfcc,  64, do_fbpfcc, a)
2299 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2300 
2301 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2302 {
2303     DisasCompare cmp;
2304 
2305     if (!avail_64(dc)) {
2306         return false;
2307     }
2308     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2309         return false;
2310     }
2311     return advance_jump_cond(dc, &cmp, a->a, a->i);
2312 }
2313 
2314 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2315 {
2316     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2317 
2318     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2319     gen_mov_pc_npc(dc);
2320     dc->npc = target;
2321     return true;
2322 }
2323 
2324 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2325 {
2326     /*
2327      * For sparc32, always generate the no-coprocessor exception.
2328      * For sparc64, always generate illegal instruction.
2329      */
2330 #ifdef TARGET_SPARC64
2331     return false;
2332 #else
2333     gen_exception(dc, TT_NCP_INSN);
2334     return true;
2335 #endif
2336 }
2337 
2338 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2339 {
2340     /* Special-case %g0 because that's the canonical nop.  */
2341     if (a->rd) {
2342         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2343     }
2344     return advance_pc(dc);
2345 }
2346 
2347 /*
2348  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2349  */
2350 
2351 static bool do_tcc(DisasContext *dc, int cond, int cc,
2352                    int rs1, bool imm, int rs2_or_imm)
2353 {
2354     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2355                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2356     DisasCompare cmp;
2357     TCGLabel *lab;
2358     TCGv_i32 trap;
2359 
2360     /* Trap never.  */
2361     if (cond == 0) {
2362         return advance_pc(dc);
2363     }
2364 
2365     /*
2366      * Immediate traps are the most common case.  Since this value is
2367      * live across the branch, it really pays to evaluate the constant.
2368      */
2369     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2370         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2371     } else {
2372         trap = tcg_temp_new_i32();
2373         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2374         if (imm) {
2375             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2376         } else {
2377             TCGv_i32 t2 = tcg_temp_new_i32();
2378             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2379             tcg_gen_add_i32(trap, trap, t2);
2380         }
2381         tcg_gen_andi_i32(trap, trap, mask);
2382         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2383     }
2384 
2385     finishing_insn(dc);
2386 
2387     /* Trap always.  */
2388     if (cond == 8) {
2389         save_state(dc);
2390         gen_helper_raise_exception(tcg_env, trap);
2391         dc->base.is_jmp = DISAS_NORETURN;
2392         return true;
2393     }
2394 
2395     /* Conditional trap.  */
2396     flush_cond(dc);
2397     lab = delay_exceptionv(dc, trap);
2398     gen_compare(&cmp, cc, cond, dc);
2399     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2400 
2401     return advance_pc(dc);
2402 }
2403 
2404 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2405 {
2406     if (avail_32(dc) && a->cc) {
2407         return false;
2408     }
2409     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2410 }
2411 
2412 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2413 {
2414     if (avail_64(dc)) {
2415         return false;
2416     }
2417     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2418 }
2419 
2420 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2421 {
2422     if (avail_32(dc)) {
2423         return false;
2424     }
2425     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2426 }
2427 
2428 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2429 {
2430     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2431     return advance_pc(dc);
2432 }
2433 
2434 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2435 {
2436     if (avail_32(dc)) {
2437         return false;
2438     }
2439     if (a->mmask) {
2440         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2441         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2442     }
2443     if (a->cmask) {
2444         /* For #Sync, etc, end the TB to recognize interrupts. */
2445         dc->base.is_jmp = DISAS_EXIT;
2446     }
2447     return advance_pc(dc);
2448 }
2449 
2450 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2451                           TCGv (*func)(DisasContext *, TCGv))
2452 {
2453     if (!priv) {
2454         return raise_priv(dc);
2455     }
2456     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2457     return advance_pc(dc);
2458 }
2459 
2460 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2461 {
2462     return cpu_y;
2463 }
2464 
2465 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2466 {
2467     /*
2468      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2469      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2470      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2471      */
2472     if (avail_64(dc) && a->rs1 != 0) {
2473         return false;
2474     }
2475     return do_rd_special(dc, true, a->rd, do_rdy);
2476 }
2477 
2478 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2479 {
2480     gen_helper_rdasr17(dst, tcg_env);
2481     return dst;
2482 }
2483 
2484 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2485 
2486 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2487 {
2488     gen_helper_rdccr(dst, tcg_env);
2489     return dst;
2490 }
2491 
2492 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2493 
2494 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2495 {
2496 #ifdef TARGET_SPARC64
2497     return tcg_constant_tl(dc->asi);
2498 #else
2499     qemu_build_not_reached();
2500 #endif
2501 }
2502 
2503 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2504 
2505 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2506 {
2507     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2508 
2509     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2510     if (translator_io_start(&dc->base)) {
2511         dc->base.is_jmp = DISAS_EXIT;
2512     }
2513     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2514                               tcg_constant_i32(dc->mem_idx));
2515     return dst;
2516 }
2517 
2518 /* TODO: non-priv access only allowed when enabled. */
2519 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2520 
2521 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2522 {
2523     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2524 }
2525 
2526 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2527 
2528 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2529 {
2530     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2531     return dst;
2532 }
2533 
2534 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2535 
2536 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2537 {
2538     gen_trap_ifnofpu(dc);
2539     return cpu_gsr;
2540 }
2541 
2542 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2543 
2544 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2545 {
2546     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2547     return dst;
2548 }
2549 
2550 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2551 
2552 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2553 {
2554     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2555     return dst;
2556 }
2557 
2558 /* TODO: non-priv access only allowed when enabled. */
2559 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2560 
2561 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2562 {
2563     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2564 
2565     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2566     if (translator_io_start(&dc->base)) {
2567         dc->base.is_jmp = DISAS_EXIT;
2568     }
2569     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2570                               tcg_constant_i32(dc->mem_idx));
2571     return dst;
2572 }
2573 
2574 /* TODO: non-priv access only allowed when enabled. */
2575 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2576 
2577 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2578 {
2579     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2580     return dst;
2581 }
2582 
2583 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2584 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2585 
2586 /*
2587  * UltraSPARC-T1 Strand status.
2588  * HYPV check maybe not enough, UA2005 & UA2007 describe
2589  * this ASR as impl. dep
2590  */
2591 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2592 {
2593     return tcg_constant_tl(1);
2594 }
2595 
2596 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2597 
2598 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2599 {
2600     gen_helper_rdpsr(dst, tcg_env);
2601     return dst;
2602 }
2603 
2604 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2605 
2606 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2607 {
2608     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2609     return dst;
2610 }
2611 
2612 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2613 
2614 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2615 {
2616     TCGv_i32 tl = tcg_temp_new_i32();
2617     TCGv_ptr tp = tcg_temp_new_ptr();
2618 
2619     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2620     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2621     tcg_gen_shli_i32(tl, tl, 3);
2622     tcg_gen_ext_i32_ptr(tp, tl);
2623     tcg_gen_add_ptr(tp, tp, tcg_env);
2624 
2625     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2626     return dst;
2627 }
2628 
2629 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2630 
2631 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2632 {
2633     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2634     return dst;
2635 }
2636 
2637 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2638 
2639 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2640 {
2641     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2642     return dst;
2643 }
2644 
2645 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2646 
2647 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2648 {
2649     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2650     return dst;
2651 }
2652 
2653 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2654 
2655 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2656 {
2657     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2658     return dst;
2659 }
2660 
2661 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2662       do_rdhstick_cmpr)
2663 
2664 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2665 {
2666     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2667     return dst;
2668 }
2669 
2670 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2671 
2672 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2673 {
2674 #ifdef TARGET_SPARC64
2675     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2676 
2677     gen_load_trap_state_at_tl(r_tsptr);
2678     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2679     return dst;
2680 #else
2681     qemu_build_not_reached();
2682 #endif
2683 }
2684 
2685 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2686 
2687 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2688 {
2689 #ifdef TARGET_SPARC64
2690     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2691 
2692     gen_load_trap_state_at_tl(r_tsptr);
2693     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2694     return dst;
2695 #else
2696     qemu_build_not_reached();
2697 #endif
2698 }
2699 
2700 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2701 
2702 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2703 {
2704 #ifdef TARGET_SPARC64
2705     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2706 
2707     gen_load_trap_state_at_tl(r_tsptr);
2708     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2709     return dst;
2710 #else
2711     qemu_build_not_reached();
2712 #endif
2713 }
2714 
2715 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2716 
2717 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2718 {
2719 #ifdef TARGET_SPARC64
2720     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2721 
2722     gen_load_trap_state_at_tl(r_tsptr);
2723     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2724     return dst;
2725 #else
2726     qemu_build_not_reached();
2727 #endif
2728 }
2729 
2730 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2731 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2732 
2733 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2734 {
2735     return cpu_tbr;
2736 }
2737 
2738 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2739 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2740 
2741 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2742 {
2743     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2744     return dst;
2745 }
2746 
2747 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2748 
2749 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2750 {
2751     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2752     return dst;
2753 }
2754 
2755 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2756 
2757 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2758 {
2759     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2760     return dst;
2761 }
2762 
2763 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2764 
2765 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2766 {
2767     gen_helper_rdcwp(dst, tcg_env);
2768     return dst;
2769 }
2770 
2771 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2772 
2773 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2774 {
2775     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2776     return dst;
2777 }
2778 
2779 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2780 
2781 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2782 {
2783     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2784     return dst;
2785 }
2786 
2787 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
2788       do_rdcanrestore)
2789 
2790 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
2791 {
2792     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
2793     return dst;
2794 }
2795 
2796 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
2797 
2798 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
2799 {
2800     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
2801     return dst;
2802 }
2803 
2804 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
2805 
2806 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
2807 {
2808     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
2809     return dst;
2810 }
2811 
2812 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
2813 
2814 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
2815 {
2816     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
2817     return dst;
2818 }
2819 
2820 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
2821 
2822 /* UA2005 strand status */
2823 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
2824 {
2825     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
2826     return dst;
2827 }
2828 
2829 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
2830 
2831 static TCGv do_rdver(DisasContext *dc, TCGv dst)
2832 {
2833     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
2834     return dst;
2835 }
2836 
2837 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
2838 
2839 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
2840 {
2841     if (avail_64(dc)) {
2842         gen_helper_flushw(tcg_env);
2843         return advance_pc(dc);
2844     }
2845     return false;
2846 }
2847 
2848 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
2849                           void (*func)(DisasContext *, TCGv))
2850 {
2851     TCGv src;
2852 
2853     /* For simplicity, we under-decoded the rs2 form. */
2854     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
2855         return false;
2856     }
2857     if (!priv) {
2858         return raise_priv(dc);
2859     }
2860 
2861     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
2862         src = tcg_constant_tl(a->rs2_or_imm);
2863     } else {
2864         TCGv src1 = gen_load_gpr(dc, a->rs1);
2865         if (a->rs2_or_imm == 0) {
2866             src = src1;
2867         } else {
2868             src = tcg_temp_new();
2869             if (a->imm) {
2870                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
2871             } else {
2872                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
2873             }
2874         }
2875     }
2876     func(dc, src);
2877     return advance_pc(dc);
2878 }
2879 
2880 static void do_wry(DisasContext *dc, TCGv src)
2881 {
2882     tcg_gen_ext32u_tl(cpu_y, src);
2883 }
2884 
2885 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
2886 
2887 static void do_wrccr(DisasContext *dc, TCGv src)
2888 {
2889     gen_helper_wrccr(tcg_env, src);
2890 }
2891 
2892 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
2893 
2894 static void do_wrasi(DisasContext *dc, TCGv src)
2895 {
2896     TCGv tmp = tcg_temp_new();
2897 
2898     tcg_gen_ext8u_tl(tmp, src);
2899     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
2900     /* End TB to notice changed ASI. */
2901     dc->base.is_jmp = DISAS_EXIT;
2902 }
2903 
2904 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
2905 
2906 static void do_wrfprs(DisasContext *dc, TCGv src)
2907 {
2908 #ifdef TARGET_SPARC64
2909     tcg_gen_trunc_tl_i32(cpu_fprs, src);
2910     dc->fprs_dirty = 0;
2911     dc->base.is_jmp = DISAS_EXIT;
2912 #else
2913     qemu_build_not_reached();
2914 #endif
2915 }
2916 
2917 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
2918 
2919 static void do_wrgsr(DisasContext *dc, TCGv src)
2920 {
2921     gen_trap_ifnofpu(dc);
2922     tcg_gen_mov_tl(cpu_gsr, src);
2923 }
2924 
2925 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
2926 
2927 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
2928 {
2929     gen_helper_set_softint(tcg_env, src);
2930 }
2931 
2932 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
2933 
2934 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
2935 {
2936     gen_helper_clear_softint(tcg_env, src);
2937 }
2938 
2939 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
2940 
2941 static void do_wrsoftint(DisasContext *dc, TCGv src)
2942 {
2943     gen_helper_write_softint(tcg_env, src);
2944 }
2945 
2946 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
2947 
2948 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
2949 {
2950     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2951 
2952     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
2953     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2954     translator_io_start(&dc->base);
2955     gen_helper_tick_set_limit(r_tickptr, src);
2956     /* End TB to handle timer interrupt */
2957     dc->base.is_jmp = DISAS_EXIT;
2958 }
2959 
2960 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
2961 
2962 static void do_wrstick(DisasContext *dc, TCGv src)
2963 {
2964 #ifdef TARGET_SPARC64
2965     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2966 
2967     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
2968     translator_io_start(&dc->base);
2969     gen_helper_tick_set_count(r_tickptr, src);
2970     /* End TB to handle timer interrupt */
2971     dc->base.is_jmp = DISAS_EXIT;
2972 #else
2973     qemu_build_not_reached();
2974 #endif
2975 }
2976 
2977 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
2978 
2979 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
2980 {
2981     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2982 
2983     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
2984     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2985     translator_io_start(&dc->base);
2986     gen_helper_tick_set_limit(r_tickptr, src);
2987     /* End TB to handle timer interrupt */
2988     dc->base.is_jmp = DISAS_EXIT;
2989 }
2990 
2991 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
2992 
2993 static void do_wrpowerdown(DisasContext *dc, TCGv src)
2994 {
2995     finishing_insn(dc);
2996     save_state(dc);
2997     gen_helper_power_down(tcg_env);
2998 }
2999 
3000 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3001 
3002 static void do_wrpsr(DisasContext *dc, TCGv src)
3003 {
3004     gen_helper_wrpsr(tcg_env, src);
3005     dc->base.is_jmp = DISAS_EXIT;
3006 }
3007 
3008 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3009 
3010 static void do_wrwim(DisasContext *dc, TCGv src)
3011 {
3012     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3013     TCGv tmp = tcg_temp_new();
3014 
3015     tcg_gen_andi_tl(tmp, src, mask);
3016     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3017 }
3018 
3019 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3020 
3021 static void do_wrtpc(DisasContext *dc, TCGv src)
3022 {
3023 #ifdef TARGET_SPARC64
3024     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3025 
3026     gen_load_trap_state_at_tl(r_tsptr);
3027     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3028 #else
3029     qemu_build_not_reached();
3030 #endif
3031 }
3032 
3033 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3034 
3035 static void do_wrtnpc(DisasContext *dc, TCGv src)
3036 {
3037 #ifdef TARGET_SPARC64
3038     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3039 
3040     gen_load_trap_state_at_tl(r_tsptr);
3041     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3042 #else
3043     qemu_build_not_reached();
3044 #endif
3045 }
3046 
3047 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3048 
3049 static void do_wrtstate(DisasContext *dc, TCGv src)
3050 {
3051 #ifdef TARGET_SPARC64
3052     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3053 
3054     gen_load_trap_state_at_tl(r_tsptr);
3055     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3056 #else
3057     qemu_build_not_reached();
3058 #endif
3059 }
3060 
3061 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3062 
3063 static void do_wrtt(DisasContext *dc, TCGv src)
3064 {
3065 #ifdef TARGET_SPARC64
3066     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3067 
3068     gen_load_trap_state_at_tl(r_tsptr);
3069     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3070 #else
3071     qemu_build_not_reached();
3072 #endif
3073 }
3074 
3075 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3076 
3077 static void do_wrtick(DisasContext *dc, TCGv src)
3078 {
3079     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3080 
3081     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3082     translator_io_start(&dc->base);
3083     gen_helper_tick_set_count(r_tickptr, src);
3084     /* End TB to handle timer interrupt */
3085     dc->base.is_jmp = DISAS_EXIT;
3086 }
3087 
3088 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3089 
3090 static void do_wrtba(DisasContext *dc, TCGv src)
3091 {
3092     tcg_gen_mov_tl(cpu_tbr, src);
3093 }
3094 
3095 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3096 
3097 static void do_wrpstate(DisasContext *dc, TCGv src)
3098 {
3099     save_state(dc);
3100     if (translator_io_start(&dc->base)) {
3101         dc->base.is_jmp = DISAS_EXIT;
3102     }
3103     gen_helper_wrpstate(tcg_env, src);
3104     dc->npc = DYNAMIC_PC;
3105 }
3106 
3107 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3108 
3109 static void do_wrtl(DisasContext *dc, TCGv src)
3110 {
3111     save_state(dc);
3112     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3113     dc->npc = DYNAMIC_PC;
3114 }
3115 
3116 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3117 
3118 static void do_wrpil(DisasContext *dc, TCGv src)
3119 {
3120     if (translator_io_start(&dc->base)) {
3121         dc->base.is_jmp = DISAS_EXIT;
3122     }
3123     gen_helper_wrpil(tcg_env, src);
3124 }
3125 
3126 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3127 
3128 static void do_wrcwp(DisasContext *dc, TCGv src)
3129 {
3130     gen_helper_wrcwp(tcg_env, src);
3131 }
3132 
3133 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3134 
3135 static void do_wrcansave(DisasContext *dc, TCGv src)
3136 {
3137     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3138 }
3139 
3140 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3141 
3142 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3143 {
3144     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3145 }
3146 
3147 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3148 
3149 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3150 {
3151     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3152 }
3153 
3154 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3155 
3156 static void do_wrotherwin(DisasContext *dc, TCGv src)
3157 {
3158     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3159 }
3160 
3161 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3162 
3163 static void do_wrwstate(DisasContext *dc, TCGv src)
3164 {
3165     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3166 }
3167 
3168 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3169 
3170 static void do_wrgl(DisasContext *dc, TCGv src)
3171 {
3172     gen_helper_wrgl(tcg_env, src);
3173 }
3174 
3175 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3176 
3177 /* UA2005 strand status */
3178 static void do_wrssr(DisasContext *dc, TCGv src)
3179 {
3180     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3181 }
3182 
3183 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3184 
3185 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3186 
3187 static void do_wrhpstate(DisasContext *dc, TCGv src)
3188 {
3189     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3190     dc->base.is_jmp = DISAS_EXIT;
3191 }
3192 
3193 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3194 
3195 static void do_wrhtstate(DisasContext *dc, TCGv src)
3196 {
3197     TCGv_i32 tl = tcg_temp_new_i32();
3198     TCGv_ptr tp = tcg_temp_new_ptr();
3199 
3200     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3201     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3202     tcg_gen_shli_i32(tl, tl, 3);
3203     tcg_gen_ext_i32_ptr(tp, tl);
3204     tcg_gen_add_ptr(tp, tp, tcg_env);
3205 
3206     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3207 }
3208 
3209 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3210 
3211 static void do_wrhintp(DisasContext *dc, TCGv src)
3212 {
3213     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3214 }
3215 
3216 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3217 
3218 static void do_wrhtba(DisasContext *dc, TCGv src)
3219 {
3220     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3221 }
3222 
3223 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3224 
3225 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3226 {
3227     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3228 
3229     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3230     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3231     translator_io_start(&dc->base);
3232     gen_helper_tick_set_limit(r_tickptr, src);
3233     /* End TB to handle timer interrupt */
3234     dc->base.is_jmp = DISAS_EXIT;
3235 }
3236 
3237 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3238       do_wrhstick_cmpr)
3239 
3240 static bool do_saved_restored(DisasContext *dc, bool saved)
3241 {
3242     if (!supervisor(dc)) {
3243         return raise_priv(dc);
3244     }
3245     if (saved) {
3246         gen_helper_saved(tcg_env);
3247     } else {
3248         gen_helper_restored(tcg_env);
3249     }
3250     return advance_pc(dc);
3251 }
3252 
3253 TRANS(SAVED, 64, do_saved_restored, true)
3254 TRANS(RESTORED, 64, do_saved_restored, false)
3255 
3256 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3257 {
3258     return advance_pc(dc);
3259 }
3260 
3261 /*
3262  * TODO: Need a feature bit for sparcv8.
3263  * In the meantime, treat all 32-bit cpus like sparcv7.
3264  */
3265 TRANS(NOP_v7, 32, trans_NOP, a)
3266 TRANS(NOP_v9, 64, trans_NOP, a)
3267 
3268 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3269                          void (*func)(TCGv, TCGv, TCGv),
3270                          void (*funci)(TCGv, TCGv, target_long),
3271                          bool logic_cc)
3272 {
3273     TCGv dst, src1;
3274 
3275     /* For simplicity, we under-decoded the rs2 form. */
3276     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3277         return false;
3278     }
3279 
3280     if (logic_cc) {
3281         dst = cpu_cc_N;
3282     } else {
3283         dst = gen_dest_gpr(dc, a->rd);
3284     }
3285     src1 = gen_load_gpr(dc, a->rs1);
3286 
3287     if (a->imm || a->rs2_or_imm == 0) {
3288         if (funci) {
3289             funci(dst, src1, a->rs2_or_imm);
3290         } else {
3291             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3292         }
3293     } else {
3294         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3295     }
3296 
3297     if (logic_cc) {
3298         if (TARGET_LONG_BITS == 64) {
3299             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3300             tcg_gen_movi_tl(cpu_icc_C, 0);
3301         }
3302         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3303         tcg_gen_movi_tl(cpu_cc_C, 0);
3304         tcg_gen_movi_tl(cpu_cc_V, 0);
3305     }
3306 
3307     gen_store_gpr(dc, a->rd, dst);
3308     return advance_pc(dc);
3309 }
3310 
3311 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3312                      void (*func)(TCGv, TCGv, TCGv),
3313                      void (*funci)(TCGv, TCGv, target_long),
3314                      void (*func_cc)(TCGv, TCGv, TCGv))
3315 {
3316     if (a->cc) {
3317         return do_arith_int(dc, a, func_cc, NULL, false);
3318     }
3319     return do_arith_int(dc, a, func, funci, false);
3320 }
3321 
3322 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3323                      void (*func)(TCGv, TCGv, TCGv),
3324                      void (*funci)(TCGv, TCGv, target_long))
3325 {
3326     return do_arith_int(dc, a, func, funci, a->cc);
3327 }
3328 
3329 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3330 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3331 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3332 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3333 
3334 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3335 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3336 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3337 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3338 
3339 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3340 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3341 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3342 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3343 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3344 
3345 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3346 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3347 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3348 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3349 
3350 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3351 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3352 
3353 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3354 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3355 
3356 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3357 {
3358     /* OR with %g0 is the canonical alias for MOV. */
3359     if (!a->cc && a->rs1 == 0) {
3360         if (a->imm || a->rs2_or_imm == 0) {
3361             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3362         } else if (a->rs2_or_imm & ~0x1f) {
3363             /* For simplicity, we under-decoded the rs2 form. */
3364             return false;
3365         } else {
3366             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3367         }
3368         return advance_pc(dc);
3369     }
3370     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3371 }
3372 
3373 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3374 {
3375     TCGv_i64 t1, t2;
3376     TCGv dst;
3377 
3378     if (!avail_DIV(dc)) {
3379         return false;
3380     }
3381     /* For simplicity, we under-decoded the rs2 form. */
3382     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3383         return false;
3384     }
3385 
3386     if (unlikely(a->rs2_or_imm == 0)) {
3387         gen_exception(dc, TT_DIV_ZERO);
3388         return true;
3389     }
3390 
3391     if (a->imm) {
3392         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3393     } else {
3394         TCGLabel *lab;
3395         TCGv_i32 n2;
3396 
3397         finishing_insn(dc);
3398         flush_cond(dc);
3399 
3400         n2 = tcg_temp_new_i32();
3401         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3402 
3403         lab = delay_exception(dc, TT_DIV_ZERO);
3404         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3405 
3406         t2 = tcg_temp_new_i64();
3407 #ifdef TARGET_SPARC64
3408         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3409 #else
3410         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3411 #endif
3412     }
3413 
3414     t1 = tcg_temp_new_i64();
3415     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3416 
3417     tcg_gen_divu_i64(t1, t1, t2);
3418     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3419 
3420     dst = gen_dest_gpr(dc, a->rd);
3421     tcg_gen_trunc_i64_tl(dst, t1);
3422     gen_store_gpr(dc, a->rd, dst);
3423     return advance_pc(dc);
3424 }
3425 
3426 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3427 {
3428     TCGv dst, src1, src2;
3429 
3430     if (!avail_64(dc)) {
3431         return false;
3432     }
3433     /* For simplicity, we under-decoded the rs2 form. */
3434     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3435         return false;
3436     }
3437 
3438     if (unlikely(a->rs2_or_imm == 0)) {
3439         gen_exception(dc, TT_DIV_ZERO);
3440         return true;
3441     }
3442 
3443     if (a->imm) {
3444         src2 = tcg_constant_tl(a->rs2_or_imm);
3445     } else {
3446         TCGLabel *lab;
3447 
3448         finishing_insn(dc);
3449         flush_cond(dc);
3450 
3451         lab = delay_exception(dc, TT_DIV_ZERO);
3452         src2 = cpu_regs[a->rs2_or_imm];
3453         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3454     }
3455 
3456     dst = gen_dest_gpr(dc, a->rd);
3457     src1 = gen_load_gpr(dc, a->rs1);
3458 
3459     tcg_gen_divu_tl(dst, src1, src2);
3460     gen_store_gpr(dc, a->rd, dst);
3461     return advance_pc(dc);
3462 }
3463 
3464 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3465 {
3466     TCGv dst, src1, src2;
3467 
3468     if (!avail_64(dc)) {
3469         return false;
3470     }
3471     /* For simplicity, we under-decoded the rs2 form. */
3472     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3473         return false;
3474     }
3475 
3476     if (unlikely(a->rs2_or_imm == 0)) {
3477         gen_exception(dc, TT_DIV_ZERO);
3478         return true;
3479     }
3480 
3481     dst = gen_dest_gpr(dc, a->rd);
3482     src1 = gen_load_gpr(dc, a->rs1);
3483 
3484     if (a->imm) {
3485         if (unlikely(a->rs2_or_imm == -1)) {
3486             tcg_gen_neg_tl(dst, src1);
3487             gen_store_gpr(dc, a->rd, dst);
3488             return advance_pc(dc);
3489         }
3490         src2 = tcg_constant_tl(a->rs2_or_imm);
3491     } else {
3492         TCGLabel *lab;
3493         TCGv t1, t2;
3494 
3495         finishing_insn(dc);
3496         flush_cond(dc);
3497 
3498         lab = delay_exception(dc, TT_DIV_ZERO);
3499         src2 = cpu_regs[a->rs2_or_imm];
3500         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3501 
3502         /*
3503          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3504          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3505          */
3506         t1 = tcg_temp_new();
3507         t2 = tcg_temp_new();
3508         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3509         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3510         tcg_gen_and_tl(t1, t1, t2);
3511         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3512                            tcg_constant_tl(1), src2);
3513         src2 = t1;
3514     }
3515 
3516     tcg_gen_div_tl(dst, src1, src2);
3517     gen_store_gpr(dc, a->rd, dst);
3518     return advance_pc(dc);
3519 }
3520 
3521 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3522                      int width, bool cc, bool left)
3523 {
3524     TCGv dst, s1, s2, lo1, lo2;
3525     uint64_t amask, tabl, tabr;
3526     int shift, imask, omask;
3527 
3528     dst = gen_dest_gpr(dc, a->rd);
3529     s1 = gen_load_gpr(dc, a->rs1);
3530     s2 = gen_load_gpr(dc, a->rs2);
3531 
3532     if (cc) {
3533         gen_op_subcc(cpu_cc_N, s1, s2);
3534     }
3535 
3536     /*
3537      * Theory of operation: there are two tables, left and right (not to
3538      * be confused with the left and right versions of the opcode).  These
3539      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3540      * these tables are loaded into two constants, TABL and TABR below.
3541      * The operation index = (input & imask) << shift calculates the index
3542      * into the constant, while val = (table >> index) & omask calculates
3543      * the value we're looking for.
3544      */
3545     switch (width) {
3546     case 8:
3547         imask = 0x7;
3548         shift = 3;
3549         omask = 0xff;
3550         if (left) {
3551             tabl = 0x80c0e0f0f8fcfeffULL;
3552             tabr = 0xff7f3f1f0f070301ULL;
3553         } else {
3554             tabl = 0x0103070f1f3f7fffULL;
3555             tabr = 0xfffefcf8f0e0c080ULL;
3556         }
3557         break;
3558     case 16:
3559         imask = 0x6;
3560         shift = 1;
3561         omask = 0xf;
3562         if (left) {
3563             tabl = 0x8cef;
3564             tabr = 0xf731;
3565         } else {
3566             tabl = 0x137f;
3567             tabr = 0xfec8;
3568         }
3569         break;
3570     case 32:
3571         imask = 0x4;
3572         shift = 0;
3573         omask = 0x3;
3574         if (left) {
3575             tabl = (2 << 2) | 3;
3576             tabr = (3 << 2) | 1;
3577         } else {
3578             tabl = (1 << 2) | 3;
3579             tabr = (3 << 2) | 2;
3580         }
3581         break;
3582     default:
3583         abort();
3584     }
3585 
3586     lo1 = tcg_temp_new();
3587     lo2 = tcg_temp_new();
3588     tcg_gen_andi_tl(lo1, s1, imask);
3589     tcg_gen_andi_tl(lo2, s2, imask);
3590     tcg_gen_shli_tl(lo1, lo1, shift);
3591     tcg_gen_shli_tl(lo2, lo2, shift);
3592 
3593     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3594     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3595     tcg_gen_andi_tl(lo1, lo1, omask);
3596     tcg_gen_andi_tl(lo2, lo2, omask);
3597 
3598     amask = address_mask_i(dc, -8);
3599     tcg_gen_andi_tl(s1, s1, amask);
3600     tcg_gen_andi_tl(s2, s2, amask);
3601 
3602     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3603     tcg_gen_and_tl(lo2, lo2, lo1);
3604     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3605 
3606     gen_store_gpr(dc, a->rd, dst);
3607     return advance_pc(dc);
3608 }
3609 
3610 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3611 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3612 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3613 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3614 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3615 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3616 
3617 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3618 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3619 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3620 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3621 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3622 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3623 
3624 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3625                    void (*func)(TCGv, TCGv, TCGv))
3626 {
3627     TCGv dst = gen_dest_gpr(dc, a->rd);
3628     TCGv src1 = gen_load_gpr(dc, a->rs1);
3629     TCGv src2 = gen_load_gpr(dc, a->rs2);
3630 
3631     func(dst, src1, src2);
3632     gen_store_gpr(dc, a->rd, dst);
3633     return advance_pc(dc);
3634 }
3635 
3636 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3637 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3638 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3639 
3640 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3641 {
3642 #ifdef TARGET_SPARC64
3643     TCGv tmp = tcg_temp_new();
3644 
3645     tcg_gen_add_tl(tmp, s1, s2);
3646     tcg_gen_andi_tl(dst, tmp, -8);
3647     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3648 #else
3649     g_assert_not_reached();
3650 #endif
3651 }
3652 
3653 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3654 {
3655 #ifdef TARGET_SPARC64
3656     TCGv tmp = tcg_temp_new();
3657 
3658     tcg_gen_add_tl(tmp, s1, s2);
3659     tcg_gen_andi_tl(dst, tmp, -8);
3660     tcg_gen_neg_tl(tmp, tmp);
3661     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3662 #else
3663     g_assert_not_reached();
3664 #endif
3665 }
3666 
3667 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3668 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3669 
3670 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3671 {
3672 #ifdef TARGET_SPARC64
3673     tcg_gen_add_tl(dst, s1, s2);
3674     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3675 #else
3676     g_assert_not_reached();
3677 #endif
3678 }
3679 
3680 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3681 
3682 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3683 {
3684     TCGv dst, src1, src2;
3685 
3686     /* Reject 64-bit shifts for sparc32. */
3687     if (avail_32(dc) && a->x) {
3688         return false;
3689     }
3690 
3691     src2 = tcg_temp_new();
3692     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3693     src1 = gen_load_gpr(dc, a->rs1);
3694     dst = gen_dest_gpr(dc, a->rd);
3695 
3696     if (l) {
3697         tcg_gen_shl_tl(dst, src1, src2);
3698         if (!a->x) {
3699             tcg_gen_ext32u_tl(dst, dst);
3700         }
3701     } else if (u) {
3702         if (!a->x) {
3703             tcg_gen_ext32u_tl(dst, src1);
3704             src1 = dst;
3705         }
3706         tcg_gen_shr_tl(dst, src1, src2);
3707     } else {
3708         if (!a->x) {
3709             tcg_gen_ext32s_tl(dst, src1);
3710             src1 = dst;
3711         }
3712         tcg_gen_sar_tl(dst, src1, src2);
3713     }
3714     gen_store_gpr(dc, a->rd, dst);
3715     return advance_pc(dc);
3716 }
3717 
3718 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3719 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3720 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3721 
3722 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3723 {
3724     TCGv dst, src1;
3725 
3726     /* Reject 64-bit shifts for sparc32. */
3727     if (avail_32(dc) && (a->x || a->i >= 32)) {
3728         return false;
3729     }
3730 
3731     src1 = gen_load_gpr(dc, a->rs1);
3732     dst = gen_dest_gpr(dc, a->rd);
3733 
3734     if (avail_32(dc) || a->x) {
3735         if (l) {
3736             tcg_gen_shli_tl(dst, src1, a->i);
3737         } else if (u) {
3738             tcg_gen_shri_tl(dst, src1, a->i);
3739         } else {
3740             tcg_gen_sari_tl(dst, src1, a->i);
3741         }
3742     } else {
3743         if (l) {
3744             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3745         } else if (u) {
3746             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3747         } else {
3748             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3749         }
3750     }
3751     gen_store_gpr(dc, a->rd, dst);
3752     return advance_pc(dc);
3753 }
3754 
3755 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3756 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3757 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3758 
3759 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3760 {
3761     /* For simplicity, we under-decoded the rs2 form. */
3762     if (!imm && rs2_or_imm & ~0x1f) {
3763         return NULL;
3764     }
3765     if (imm || rs2_or_imm == 0) {
3766         return tcg_constant_tl(rs2_or_imm);
3767     } else {
3768         return cpu_regs[rs2_or_imm];
3769     }
3770 }
3771 
3772 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3773 {
3774     TCGv dst = gen_load_gpr(dc, rd);
3775     TCGv c2 = tcg_constant_tl(cmp->c2);
3776 
3777     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3778     gen_store_gpr(dc, rd, dst);
3779     return advance_pc(dc);
3780 }
3781 
3782 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3783 {
3784     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3785     DisasCompare cmp;
3786 
3787     if (src2 == NULL) {
3788         return false;
3789     }
3790     gen_compare(&cmp, a->cc, a->cond, dc);
3791     return do_mov_cond(dc, &cmp, a->rd, src2);
3792 }
3793 
3794 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
3795 {
3796     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3797     DisasCompare cmp;
3798 
3799     if (src2 == NULL) {
3800         return false;
3801     }
3802     gen_fcompare(&cmp, a->cc, a->cond);
3803     return do_mov_cond(dc, &cmp, a->rd, src2);
3804 }
3805 
3806 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
3807 {
3808     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3809     DisasCompare cmp;
3810 
3811     if (src2 == NULL) {
3812         return false;
3813     }
3814     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
3815         return false;
3816     }
3817     return do_mov_cond(dc, &cmp, a->rd, src2);
3818 }
3819 
3820 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
3821                            bool (*func)(DisasContext *dc, int rd, TCGv src))
3822 {
3823     TCGv src1, sum;
3824 
3825     /* For simplicity, we under-decoded the rs2 form. */
3826     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3827         return false;
3828     }
3829 
3830     /*
3831      * Always load the sum into a new temporary.
3832      * This is required to capture the value across a window change,
3833      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3834      */
3835     sum = tcg_temp_new();
3836     src1 = gen_load_gpr(dc, a->rs1);
3837     if (a->imm || a->rs2_or_imm == 0) {
3838         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
3839     } else {
3840         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
3841     }
3842     return func(dc, a->rd, sum);
3843 }
3844 
3845 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
3846 {
3847     /*
3848      * Preserve pc across advance, so that we can delay
3849      * the writeback to rd until after src is consumed.
3850      */
3851     target_ulong cur_pc = dc->pc;
3852 
3853     gen_check_align(dc, src, 3);
3854 
3855     gen_mov_pc_npc(dc);
3856     tcg_gen_mov_tl(cpu_npc, src);
3857     gen_address_mask(dc, cpu_npc);
3858     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
3859 
3860     dc->npc = DYNAMIC_PC_LOOKUP;
3861     return true;
3862 }
3863 
3864 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
3865 
3866 static bool do_rett(DisasContext *dc, int rd, TCGv src)
3867 {
3868     if (!supervisor(dc)) {
3869         return raise_priv(dc);
3870     }
3871 
3872     gen_check_align(dc, src, 3);
3873 
3874     gen_mov_pc_npc(dc);
3875     tcg_gen_mov_tl(cpu_npc, src);
3876     gen_helper_rett(tcg_env);
3877 
3878     dc->npc = DYNAMIC_PC;
3879     return true;
3880 }
3881 
3882 TRANS(RETT, 32, do_add_special, a, do_rett)
3883 
3884 static bool do_return(DisasContext *dc, int rd, TCGv src)
3885 {
3886     gen_check_align(dc, src, 3);
3887     gen_helper_restore(tcg_env);
3888 
3889     gen_mov_pc_npc(dc);
3890     tcg_gen_mov_tl(cpu_npc, src);
3891     gen_address_mask(dc, cpu_npc);
3892 
3893     dc->npc = DYNAMIC_PC_LOOKUP;
3894     return true;
3895 }
3896 
3897 TRANS(RETURN, 64, do_add_special, a, do_return)
3898 
3899 static bool do_save(DisasContext *dc, int rd, TCGv src)
3900 {
3901     gen_helper_save(tcg_env);
3902     gen_store_gpr(dc, rd, src);
3903     return advance_pc(dc);
3904 }
3905 
3906 TRANS(SAVE, ALL, do_add_special, a, do_save)
3907 
3908 static bool do_restore(DisasContext *dc, int rd, TCGv src)
3909 {
3910     gen_helper_restore(tcg_env);
3911     gen_store_gpr(dc, rd, src);
3912     return advance_pc(dc);
3913 }
3914 
3915 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
3916 
3917 static bool do_done_retry(DisasContext *dc, bool done)
3918 {
3919     if (!supervisor(dc)) {
3920         return raise_priv(dc);
3921     }
3922     dc->npc = DYNAMIC_PC;
3923     dc->pc = DYNAMIC_PC;
3924     translator_io_start(&dc->base);
3925     if (done) {
3926         gen_helper_done(tcg_env);
3927     } else {
3928         gen_helper_retry(tcg_env);
3929     }
3930     return true;
3931 }
3932 
3933 TRANS(DONE, 64, do_done_retry, true)
3934 TRANS(RETRY, 64, do_done_retry, false)
3935 
3936 /*
3937  * Major opcode 11 -- load and store instructions
3938  */
3939 
3940 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
3941 {
3942     TCGv addr, tmp = NULL;
3943 
3944     /* For simplicity, we under-decoded the rs2 form. */
3945     if (!imm && rs2_or_imm & ~0x1f) {
3946         return NULL;
3947     }
3948 
3949     addr = gen_load_gpr(dc, rs1);
3950     if (rs2_or_imm) {
3951         tmp = tcg_temp_new();
3952         if (imm) {
3953             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
3954         } else {
3955             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
3956         }
3957         addr = tmp;
3958     }
3959     if (AM_CHECK(dc)) {
3960         if (!tmp) {
3961             tmp = tcg_temp_new();
3962         }
3963         tcg_gen_ext32u_tl(tmp, addr);
3964         addr = tmp;
3965     }
3966     return addr;
3967 }
3968 
3969 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3970 {
3971     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3972     DisasASI da;
3973 
3974     if (addr == NULL) {
3975         return false;
3976     }
3977     da = resolve_asi(dc, a->asi, mop);
3978 
3979     reg = gen_dest_gpr(dc, a->rd);
3980     gen_ld_asi(dc, &da, reg, addr);
3981     gen_store_gpr(dc, a->rd, reg);
3982     return advance_pc(dc);
3983 }
3984 
3985 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
3986 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
3987 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
3988 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
3989 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
3990 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
3991 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
3992 
3993 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
3994 {
3995     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
3996     DisasASI da;
3997 
3998     if (addr == NULL) {
3999         return false;
4000     }
4001     da = resolve_asi(dc, a->asi, mop);
4002 
4003     reg = gen_load_gpr(dc, a->rd);
4004     gen_st_asi(dc, &da, reg, addr);
4005     return advance_pc(dc);
4006 }
4007 
4008 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4009 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4010 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4011 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4012 
4013 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4014 {
4015     TCGv addr;
4016     DisasASI da;
4017 
4018     if (a->rd & 1) {
4019         return false;
4020     }
4021     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4022     if (addr == NULL) {
4023         return false;
4024     }
4025     da = resolve_asi(dc, a->asi, MO_TEUQ);
4026     gen_ldda_asi(dc, &da, addr, a->rd);
4027     return advance_pc(dc);
4028 }
4029 
4030 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4031 {
4032     TCGv addr;
4033     DisasASI da;
4034 
4035     if (a->rd & 1) {
4036         return false;
4037     }
4038     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4039     if (addr == NULL) {
4040         return false;
4041     }
4042     da = resolve_asi(dc, a->asi, MO_TEUQ);
4043     gen_stda_asi(dc, &da, addr, a->rd);
4044     return advance_pc(dc);
4045 }
4046 
4047 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4048 {
4049     TCGv addr, reg;
4050     DisasASI da;
4051 
4052     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4053     if (addr == NULL) {
4054         return false;
4055     }
4056     da = resolve_asi(dc, a->asi, MO_UB);
4057 
4058     reg = gen_dest_gpr(dc, a->rd);
4059     gen_ldstub_asi(dc, &da, reg, addr);
4060     gen_store_gpr(dc, a->rd, reg);
4061     return advance_pc(dc);
4062 }
4063 
4064 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4065 {
4066     TCGv addr, dst, src;
4067     DisasASI da;
4068 
4069     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4070     if (addr == NULL) {
4071         return false;
4072     }
4073     da = resolve_asi(dc, a->asi, MO_TEUL);
4074 
4075     dst = gen_dest_gpr(dc, a->rd);
4076     src = gen_load_gpr(dc, a->rd);
4077     gen_swap_asi(dc, &da, dst, src, addr);
4078     gen_store_gpr(dc, a->rd, dst);
4079     return advance_pc(dc);
4080 }
4081 
4082 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4083 {
4084     TCGv addr, o, n, c;
4085     DisasASI da;
4086 
4087     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4088     if (addr == NULL) {
4089         return false;
4090     }
4091     da = resolve_asi(dc, a->asi, mop);
4092 
4093     o = gen_dest_gpr(dc, a->rd);
4094     n = gen_load_gpr(dc, a->rd);
4095     c = gen_load_gpr(dc, a->rs2_or_imm);
4096     gen_cas_asi(dc, &da, o, n, c, addr);
4097     gen_store_gpr(dc, a->rd, o);
4098     return advance_pc(dc);
4099 }
4100 
4101 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4102 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4103 
4104 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4105 {
4106     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4107     DisasASI da;
4108 
4109     if (addr == NULL) {
4110         return false;
4111     }
4112     if (gen_trap_ifnofpu(dc)) {
4113         return true;
4114     }
4115     if (sz == MO_128 && gen_trap_float128(dc)) {
4116         return true;
4117     }
4118     da = resolve_asi(dc, a->asi, MO_TE | sz);
4119     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4120     gen_update_fprs_dirty(dc, a->rd);
4121     return advance_pc(dc);
4122 }
4123 
4124 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4125 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4126 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4127 
4128 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4129 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4130 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4131 
4132 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4133 {
4134     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4135     DisasASI da;
4136 
4137     if (addr == NULL) {
4138         return false;
4139     }
4140     if (gen_trap_ifnofpu(dc)) {
4141         return true;
4142     }
4143     if (sz == MO_128 && gen_trap_float128(dc)) {
4144         return true;
4145     }
4146     da = resolve_asi(dc, a->asi, MO_TE | sz);
4147     gen_stf_asi(dc, &da, sz, addr, a->rd);
4148     return advance_pc(dc);
4149 }
4150 
4151 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4152 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4153 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4154 
4155 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4156 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4157 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4158 
4159 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4160 {
4161     if (!avail_32(dc)) {
4162         return false;
4163     }
4164     if (!supervisor(dc)) {
4165         return raise_priv(dc);
4166     }
4167     if (gen_trap_ifnofpu(dc)) {
4168         return true;
4169     }
4170     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4171     return true;
4172 }
4173 
4174 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4175 {
4176     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4177     TCGv_i32 tmp;
4178 
4179     if (addr == NULL) {
4180         return false;
4181     }
4182     if (gen_trap_ifnofpu(dc)) {
4183         return true;
4184     }
4185 
4186     tmp = tcg_temp_new_i32();
4187     tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4188 
4189     tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4190     /* LDFSR does not change FCC[1-3]. */
4191 
4192     gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4193     return advance_pc(dc);
4194 }
4195 
4196 static bool trans_LDXFSR(DisasContext *dc, arg_r_r_ri *a)
4197 {
4198 #ifdef TARGET_SPARC64
4199     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4200     TCGv_i64 t64;
4201     TCGv_i32 lo, hi;
4202 
4203     if (addr == NULL) {
4204         return false;
4205     }
4206     if (gen_trap_ifnofpu(dc)) {
4207         return true;
4208     }
4209 
4210     t64 = tcg_temp_new_i64();
4211     tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4212 
4213     lo = tcg_temp_new_i32();
4214     hi = cpu_fcc[3];
4215     tcg_gen_extr_i64_i32(lo, hi, t64);
4216     tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4217     tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4218     tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4219     tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4220 
4221     gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4222     return advance_pc(dc);
4223 #else
4224     return false;
4225 #endif
4226 }
4227 
4228 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4229 {
4230     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4231     TCGv fsr;
4232 
4233     if (addr == NULL) {
4234         return false;
4235     }
4236     if (gen_trap_ifnofpu(dc)) {
4237         return true;
4238     }
4239 
4240     fsr = tcg_temp_new();
4241     gen_helper_get_fsr(fsr, tcg_env);
4242     tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4243     return advance_pc(dc);
4244 }
4245 
4246 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4247 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4248 
4249 static bool do_fc(DisasContext *dc, int rd, bool c)
4250 {
4251     uint64_t mask;
4252 
4253     if (gen_trap_ifnofpu(dc)) {
4254         return true;
4255     }
4256 
4257     if (rd & 1) {
4258         mask = MAKE_64BIT_MASK(0, 32);
4259     } else {
4260         mask = MAKE_64BIT_MASK(32, 32);
4261     }
4262     if (c) {
4263         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4264     } else {
4265         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4266     }
4267     gen_update_fprs_dirty(dc, rd);
4268     return advance_pc(dc);
4269 }
4270 
4271 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4272 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4273 
4274 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4275 {
4276     if (gen_trap_ifnofpu(dc)) {
4277         return true;
4278     }
4279 
4280     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4281     gen_update_fprs_dirty(dc, rd);
4282     return advance_pc(dc);
4283 }
4284 
4285 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4286 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4287 
4288 static bool do_ff(DisasContext *dc, arg_r_r *a,
4289                   void (*func)(TCGv_i32, TCGv_i32))
4290 {
4291     TCGv_i32 tmp;
4292 
4293     if (gen_trap_ifnofpu(dc)) {
4294         return true;
4295     }
4296 
4297     tmp = gen_load_fpr_F(dc, a->rs);
4298     func(tmp, tmp);
4299     gen_store_fpr_F(dc, a->rd, tmp);
4300     return advance_pc(dc);
4301 }
4302 
4303 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4304 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4305 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4306 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4307 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4308 
4309 static bool do_fd(DisasContext *dc, arg_r_r *a,
4310                   void (*func)(TCGv_i32, TCGv_i64))
4311 {
4312     TCGv_i32 dst;
4313     TCGv_i64 src;
4314 
4315     if (gen_trap_ifnofpu(dc)) {
4316         return true;
4317     }
4318 
4319     dst = tcg_temp_new_i32();
4320     src = gen_load_fpr_D(dc, a->rs);
4321     func(dst, src);
4322     gen_store_fpr_F(dc, a->rd, dst);
4323     return advance_pc(dc);
4324 }
4325 
4326 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4327 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4328 
4329 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4330                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4331 {
4332     TCGv_i32 tmp;
4333 
4334     if (gen_trap_ifnofpu(dc)) {
4335         return true;
4336     }
4337 
4338     tmp = gen_load_fpr_F(dc, a->rs);
4339     func(tmp, tcg_env, tmp);
4340     gen_store_fpr_F(dc, a->rd, tmp);
4341     return advance_pc(dc);
4342 }
4343 
4344 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4345 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4346 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4347 
4348 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4349                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4350 {
4351     TCGv_i32 dst;
4352     TCGv_i64 src;
4353 
4354     if (gen_trap_ifnofpu(dc)) {
4355         return true;
4356     }
4357 
4358     dst = tcg_temp_new_i32();
4359     src = gen_load_fpr_D(dc, a->rs);
4360     func(dst, tcg_env, src);
4361     gen_store_fpr_F(dc, a->rd, dst);
4362     return advance_pc(dc);
4363 }
4364 
4365 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4366 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4367 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4368 
4369 static bool do_dd(DisasContext *dc, arg_r_r *a,
4370                   void (*func)(TCGv_i64, TCGv_i64))
4371 {
4372     TCGv_i64 dst, src;
4373 
4374     if (gen_trap_ifnofpu(dc)) {
4375         return true;
4376     }
4377 
4378     dst = gen_dest_fpr_D(dc, a->rd);
4379     src = gen_load_fpr_D(dc, a->rs);
4380     func(dst, src);
4381     gen_store_fpr_D(dc, a->rd, dst);
4382     return advance_pc(dc);
4383 }
4384 
4385 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4386 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4387 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4388 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4389 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4390 
4391 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4392                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4393 {
4394     TCGv_i64 dst, src;
4395 
4396     if (gen_trap_ifnofpu(dc)) {
4397         return true;
4398     }
4399 
4400     dst = gen_dest_fpr_D(dc, a->rd);
4401     src = gen_load_fpr_D(dc, a->rs);
4402     func(dst, tcg_env, src);
4403     gen_store_fpr_D(dc, a->rd, dst);
4404     return advance_pc(dc);
4405 }
4406 
4407 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4408 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4409 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4410 
4411 static bool do_df(DisasContext *dc, arg_r_r *a,
4412                   void (*func)(TCGv_i64, TCGv_i32))
4413 {
4414     TCGv_i64 dst;
4415     TCGv_i32 src;
4416 
4417     if (gen_trap_ifnofpu(dc)) {
4418         return true;
4419     }
4420 
4421     dst = tcg_temp_new_i64();
4422     src = gen_load_fpr_F(dc, a->rs);
4423     func(dst, src);
4424     gen_store_fpr_D(dc, a->rd, dst);
4425     return advance_pc(dc);
4426 }
4427 
4428 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4429 
4430 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4431                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4432 {
4433     TCGv_i64 dst;
4434     TCGv_i32 src;
4435 
4436     if (gen_trap_ifnofpu(dc)) {
4437         return true;
4438     }
4439 
4440     dst = gen_dest_fpr_D(dc, a->rd);
4441     src = gen_load_fpr_F(dc, a->rs);
4442     func(dst, tcg_env, src);
4443     gen_store_fpr_D(dc, a->rd, dst);
4444     return advance_pc(dc);
4445 }
4446 
4447 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4448 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4449 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4450 
4451 static bool do_qq(DisasContext *dc, arg_r_r *a,
4452                   void (*func)(TCGv_i128, TCGv_i128))
4453 {
4454     TCGv_i128 t;
4455 
4456     if (gen_trap_ifnofpu(dc)) {
4457         return true;
4458     }
4459     if (gen_trap_float128(dc)) {
4460         return true;
4461     }
4462 
4463     gen_op_clear_ieee_excp_and_FTT();
4464     t = gen_load_fpr_Q(dc, a->rs);
4465     func(t, t);
4466     gen_store_fpr_Q(dc, a->rd, t);
4467     return advance_pc(dc);
4468 }
4469 
4470 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4471 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4472 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4473 
4474 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4475                       void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4476 {
4477     TCGv_i128 t;
4478 
4479     if (gen_trap_ifnofpu(dc)) {
4480         return true;
4481     }
4482     if (gen_trap_float128(dc)) {
4483         return true;
4484     }
4485 
4486     t = gen_load_fpr_Q(dc, a->rs);
4487     func(t, tcg_env, t);
4488     gen_store_fpr_Q(dc, a->rd, t);
4489     return advance_pc(dc);
4490 }
4491 
4492 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4493 
4494 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4495                       void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4496 {
4497     TCGv_i128 src;
4498     TCGv_i32 dst;
4499 
4500     if (gen_trap_ifnofpu(dc)) {
4501         return true;
4502     }
4503     if (gen_trap_float128(dc)) {
4504         return true;
4505     }
4506 
4507     src = gen_load_fpr_Q(dc, a->rs);
4508     dst = tcg_temp_new_i32();
4509     func(dst, tcg_env, src);
4510     gen_store_fpr_F(dc, a->rd, dst);
4511     return advance_pc(dc);
4512 }
4513 
4514 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4515 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4516 
4517 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4518                       void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4519 {
4520     TCGv_i128 src;
4521     TCGv_i64 dst;
4522 
4523     if (gen_trap_ifnofpu(dc)) {
4524         return true;
4525     }
4526     if (gen_trap_float128(dc)) {
4527         return true;
4528     }
4529 
4530     src = gen_load_fpr_Q(dc, a->rs);
4531     dst = gen_dest_fpr_D(dc, a->rd);
4532     func(dst, tcg_env, src);
4533     gen_store_fpr_D(dc, a->rd, dst);
4534     return advance_pc(dc);
4535 }
4536 
4537 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4538 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4539 
4540 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4541                       void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4542 {
4543     TCGv_i32 src;
4544     TCGv_i128 dst;
4545 
4546     if (gen_trap_ifnofpu(dc)) {
4547         return true;
4548     }
4549     if (gen_trap_float128(dc)) {
4550         return true;
4551     }
4552 
4553     src = gen_load_fpr_F(dc, a->rs);
4554     dst = tcg_temp_new_i128();
4555     func(dst, tcg_env, src);
4556     gen_store_fpr_Q(dc, a->rd, dst);
4557     return advance_pc(dc);
4558 }
4559 
4560 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4561 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4562 
4563 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4564                       void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4565 {
4566     TCGv_i64 src;
4567     TCGv_i128 dst;
4568 
4569     if (gen_trap_ifnofpu(dc)) {
4570         return true;
4571     }
4572     if (gen_trap_float128(dc)) {
4573         return true;
4574     }
4575 
4576     src = gen_load_fpr_D(dc, a->rs);
4577     dst = tcg_temp_new_i128();
4578     func(dst, tcg_env, src);
4579     gen_store_fpr_Q(dc, a->rd, dst);
4580     return advance_pc(dc);
4581 }
4582 
4583 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4584 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4585 
4586 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4587                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4588 {
4589     TCGv_i32 src1, src2;
4590 
4591     if (gen_trap_ifnofpu(dc)) {
4592         return true;
4593     }
4594 
4595     src1 = gen_load_fpr_F(dc, a->rs1);
4596     src2 = gen_load_fpr_F(dc, a->rs2);
4597     func(src1, src1, src2);
4598     gen_store_fpr_F(dc, a->rd, src1);
4599     return advance_pc(dc);
4600 }
4601 
4602 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4603 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4604 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4605 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4606 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4607 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4608 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4609 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4610 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4611 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4612 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4613 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4614 
4615 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4616                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4617 {
4618     TCGv_i32 src1, src2;
4619 
4620     if (gen_trap_ifnofpu(dc)) {
4621         return true;
4622     }
4623 
4624     src1 = gen_load_fpr_F(dc, a->rs1);
4625     src2 = gen_load_fpr_F(dc, a->rs2);
4626     func(src1, tcg_env, src1, src2);
4627     gen_store_fpr_F(dc, a->rd, src1);
4628     return advance_pc(dc);
4629 }
4630 
4631 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4632 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4633 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4634 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4635 
4636 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
4637                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
4638 {
4639     TCGv_i64 dst;
4640     TCGv_i32 src1, src2;
4641 
4642     if (gen_trap_ifnofpu(dc)) {
4643         return true;
4644     }
4645 
4646     dst = gen_dest_fpr_D(dc, a->rd);
4647     src1 = gen_load_fpr_F(dc, a->rs1);
4648     src2 = gen_load_fpr_F(dc, a->rs2);
4649     func(dst, src1, src2);
4650     gen_store_fpr_D(dc, a->rd, dst);
4651     return advance_pc(dc);
4652 }
4653 
4654 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
4655 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
4656 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
4657 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
4658 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
4659 
4660 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
4661                    void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
4662 {
4663     TCGv_i64 dst, src2;
4664     TCGv_i32 src1;
4665 
4666     if (gen_trap_ifnofpu(dc)) {
4667         return true;
4668     }
4669 
4670     dst = gen_dest_fpr_D(dc, a->rd);
4671     src1 = gen_load_fpr_F(dc, a->rs1);
4672     src2 = gen_load_fpr_D(dc, a->rs2);
4673     func(dst, src1, src2);
4674     gen_store_fpr_D(dc, a->rd, dst);
4675     return advance_pc(dc);
4676 }
4677 
4678 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
4679 
4680 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4681                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4682 {
4683     TCGv_i64 dst, src1, src2;
4684 
4685     if (gen_trap_ifnofpu(dc)) {
4686         return true;
4687     }
4688 
4689     dst = gen_dest_fpr_D(dc, a->rd);
4690     src1 = gen_load_fpr_D(dc, a->rs1);
4691     src2 = gen_load_fpr_D(dc, a->rs2);
4692     func(dst, src1, src2);
4693     gen_store_fpr_D(dc, a->rd, dst);
4694     return advance_pc(dc);
4695 }
4696 
4697 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4698 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4699 
4700 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4701 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4702 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4703 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4704 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4705 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4706 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4707 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4708 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4709 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4710 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4711 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4712 
4713 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4714 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4715 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4716 
4717 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4718                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4719 {
4720     TCGv_i64 src1, src2;
4721     TCGv dst;
4722 
4723     if (gen_trap_ifnofpu(dc)) {
4724         return true;
4725     }
4726 
4727     dst = gen_dest_gpr(dc, a->rd);
4728     src1 = gen_load_fpr_D(dc, a->rs1);
4729     src2 = gen_load_fpr_D(dc, a->rs2);
4730     func(dst, src1, src2);
4731     gen_store_gpr(dc, a->rd, dst);
4732     return advance_pc(dc);
4733 }
4734 
4735 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4736 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4737 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4738 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4739 
4740 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4741 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4742 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4743 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4744 
4745 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4746                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4747 {
4748     TCGv_i64 dst, src1, src2;
4749 
4750     if (gen_trap_ifnofpu(dc)) {
4751         return true;
4752     }
4753 
4754     dst = gen_dest_fpr_D(dc, a->rd);
4755     src1 = gen_load_fpr_D(dc, a->rs1);
4756     src2 = gen_load_fpr_D(dc, a->rs2);
4757     func(dst, tcg_env, src1, src2);
4758     gen_store_fpr_D(dc, a->rd, dst);
4759     return advance_pc(dc);
4760 }
4761 
4762 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4763 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4764 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4765 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4766 
4767 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4768 {
4769     TCGv_i64 dst;
4770     TCGv_i32 src1, src2;
4771 
4772     if (gen_trap_ifnofpu(dc)) {
4773         return true;
4774     }
4775     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4776         return raise_unimpfpop(dc);
4777     }
4778 
4779     dst = gen_dest_fpr_D(dc, a->rd);
4780     src1 = gen_load_fpr_F(dc, a->rs1);
4781     src2 = gen_load_fpr_F(dc, a->rs2);
4782     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4783     gen_store_fpr_D(dc, a->rd, dst);
4784     return advance_pc(dc);
4785 }
4786 
4787 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4788                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4789 {
4790     TCGv_i64 dst, src0, src1, src2;
4791 
4792     if (gen_trap_ifnofpu(dc)) {
4793         return true;
4794     }
4795 
4796     dst  = gen_dest_fpr_D(dc, a->rd);
4797     src0 = gen_load_fpr_D(dc, a->rd);
4798     src1 = gen_load_fpr_D(dc, a->rs1);
4799     src2 = gen_load_fpr_D(dc, a->rs2);
4800     func(dst, src0, src1, src2);
4801     gen_store_fpr_D(dc, a->rd, dst);
4802     return advance_pc(dc);
4803 }
4804 
4805 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4806 
4807 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4808                        void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
4809 {
4810     TCGv_i128 src1, src2;
4811 
4812     if (gen_trap_ifnofpu(dc)) {
4813         return true;
4814     }
4815     if (gen_trap_float128(dc)) {
4816         return true;
4817     }
4818 
4819     src1 = gen_load_fpr_Q(dc, a->rs1);
4820     src2 = gen_load_fpr_Q(dc, a->rs2);
4821     func(src1, tcg_env, src1, src2);
4822     gen_store_fpr_Q(dc, a->rd, src1);
4823     return advance_pc(dc);
4824 }
4825 
4826 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4827 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4828 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4829 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4830 
4831 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4832 {
4833     TCGv_i64 src1, src2;
4834     TCGv_i128 dst;
4835 
4836     if (gen_trap_ifnofpu(dc)) {
4837         return true;
4838     }
4839     if (gen_trap_float128(dc)) {
4840         return true;
4841     }
4842 
4843     src1 = gen_load_fpr_D(dc, a->rs1);
4844     src2 = gen_load_fpr_D(dc, a->rs2);
4845     dst = tcg_temp_new_i128();
4846     gen_helper_fdmulq(dst, tcg_env, src1, src2);
4847     gen_store_fpr_Q(dc, a->rd, dst);
4848     return advance_pc(dc);
4849 }
4850 
4851 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
4852                      void (*func)(DisasContext *, DisasCompare *, int, int))
4853 {
4854     DisasCompare cmp;
4855 
4856     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4857         return false;
4858     }
4859     if (gen_trap_ifnofpu(dc)) {
4860         return true;
4861     }
4862     if (is_128 && gen_trap_float128(dc)) {
4863         return true;
4864     }
4865 
4866     gen_op_clear_ieee_excp_and_FTT();
4867     func(dc, &cmp, a->rd, a->rs2);
4868     return advance_pc(dc);
4869 }
4870 
4871 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
4872 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
4873 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
4874 
4875 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
4876                       void (*func)(DisasContext *, DisasCompare *, int, int))
4877 {
4878     DisasCompare cmp;
4879 
4880     if (gen_trap_ifnofpu(dc)) {
4881         return true;
4882     }
4883     if (is_128 && gen_trap_float128(dc)) {
4884         return true;
4885     }
4886 
4887     gen_op_clear_ieee_excp_and_FTT();
4888     gen_compare(&cmp, a->cc, a->cond, dc);
4889     func(dc, &cmp, a->rd, a->rs2);
4890     return advance_pc(dc);
4891 }
4892 
4893 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
4894 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
4895 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
4896 
4897 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
4898                        void (*func)(DisasContext *, DisasCompare *, int, int))
4899 {
4900     DisasCompare cmp;
4901 
4902     if (gen_trap_ifnofpu(dc)) {
4903         return true;
4904     }
4905     if (is_128 && gen_trap_float128(dc)) {
4906         return true;
4907     }
4908 
4909     gen_op_clear_ieee_excp_and_FTT();
4910     gen_fcompare(&cmp, a->cc, a->cond);
4911     func(dc, &cmp, a->rd, a->rs2);
4912     return advance_pc(dc);
4913 }
4914 
4915 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
4916 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
4917 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
4918 
4919 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
4920 {
4921     TCGv_i32 src1, src2;
4922 
4923     if (avail_32(dc) && a->cc != 0) {
4924         return false;
4925     }
4926     if (gen_trap_ifnofpu(dc)) {
4927         return true;
4928     }
4929 
4930     src1 = gen_load_fpr_F(dc, a->rs1);
4931     src2 = gen_load_fpr_F(dc, a->rs2);
4932     if (e) {
4933         gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
4934     } else {
4935         gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
4936     }
4937     return advance_pc(dc);
4938 }
4939 
4940 TRANS(FCMPs, ALL, do_fcmps, a, false)
4941 TRANS(FCMPEs, ALL, do_fcmps, a, true)
4942 
4943 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
4944 {
4945     TCGv_i64 src1, src2;
4946 
4947     if (avail_32(dc) && a->cc != 0) {
4948         return false;
4949     }
4950     if (gen_trap_ifnofpu(dc)) {
4951         return true;
4952     }
4953 
4954     src1 = gen_load_fpr_D(dc, a->rs1);
4955     src2 = gen_load_fpr_D(dc, a->rs2);
4956     if (e) {
4957         gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
4958     } else {
4959         gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
4960     }
4961     return advance_pc(dc);
4962 }
4963 
4964 TRANS(FCMPd, ALL, do_fcmpd, a, false)
4965 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
4966 
4967 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
4968 {
4969     TCGv_i128 src1, src2;
4970 
4971     if (avail_32(dc) && a->cc != 0) {
4972         return false;
4973     }
4974     if (gen_trap_ifnofpu(dc)) {
4975         return true;
4976     }
4977     if (gen_trap_float128(dc)) {
4978         return true;
4979     }
4980 
4981     src1 = gen_load_fpr_Q(dc, a->rs1);
4982     src2 = gen_load_fpr_Q(dc, a->rs2);
4983     if (e) {
4984         gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
4985     } else {
4986         gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
4987     }
4988     return advance_pc(dc);
4989 }
4990 
4991 TRANS(FCMPq, ALL, do_fcmpq, a, false)
4992 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
4993 
4994 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4995 {
4996     DisasContext *dc = container_of(dcbase, DisasContext, base);
4997     int bound;
4998 
4999     dc->pc = dc->base.pc_first;
5000     dc->npc = (target_ulong)dc->base.tb->cs_base;
5001     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5002     dc->def = &cpu_env(cs)->def;
5003     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5004     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5005 #ifndef CONFIG_USER_ONLY
5006     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5007 #endif
5008 #ifdef TARGET_SPARC64
5009     dc->fprs_dirty = 0;
5010     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5011 #ifndef CONFIG_USER_ONLY
5012     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5013 #endif
5014 #endif
5015     /*
5016      * if we reach a page boundary, we stop generation so that the
5017      * PC of a TT_TFAULT exception is always in the right page
5018      */
5019     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5020     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5021 }
5022 
5023 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5024 {
5025 }
5026 
5027 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5028 {
5029     DisasContext *dc = container_of(dcbase, DisasContext, base);
5030     target_ulong npc = dc->npc;
5031 
5032     if (npc & 3) {
5033         switch (npc) {
5034         case JUMP_PC:
5035             assert(dc->jump_pc[1] == dc->pc + 4);
5036             npc = dc->jump_pc[0] | JUMP_PC;
5037             break;
5038         case DYNAMIC_PC:
5039         case DYNAMIC_PC_LOOKUP:
5040             npc = DYNAMIC_PC;
5041             break;
5042         default:
5043             g_assert_not_reached();
5044         }
5045     }
5046     tcg_gen_insn_start(dc->pc, npc);
5047 }
5048 
5049 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5050 {
5051     DisasContext *dc = container_of(dcbase, DisasContext, base);
5052     unsigned int insn;
5053 
5054     insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5055     dc->base.pc_next += 4;
5056 
5057     if (!decode(dc, insn)) {
5058         gen_exception(dc, TT_ILL_INSN);
5059     }
5060 
5061     if (dc->base.is_jmp == DISAS_NORETURN) {
5062         return;
5063     }
5064     if (dc->pc != dc->base.pc_next) {
5065         dc->base.is_jmp = DISAS_TOO_MANY;
5066     }
5067 }
5068 
5069 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5070 {
5071     DisasContext *dc = container_of(dcbase, DisasContext, base);
5072     DisasDelayException *e, *e_next;
5073     bool may_lookup;
5074 
5075     finishing_insn(dc);
5076 
5077     switch (dc->base.is_jmp) {
5078     case DISAS_NEXT:
5079     case DISAS_TOO_MANY:
5080         if (((dc->pc | dc->npc) & 3) == 0) {
5081             /* static PC and NPC: we can use direct chaining */
5082             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5083             break;
5084         }
5085 
5086         may_lookup = true;
5087         if (dc->pc & 3) {
5088             switch (dc->pc) {
5089             case DYNAMIC_PC_LOOKUP:
5090                 break;
5091             case DYNAMIC_PC:
5092                 may_lookup = false;
5093                 break;
5094             default:
5095                 g_assert_not_reached();
5096             }
5097         } else {
5098             tcg_gen_movi_tl(cpu_pc, dc->pc);
5099         }
5100 
5101         if (dc->npc & 3) {
5102             switch (dc->npc) {
5103             case JUMP_PC:
5104                 gen_generic_branch(dc);
5105                 break;
5106             case DYNAMIC_PC:
5107                 may_lookup = false;
5108                 break;
5109             case DYNAMIC_PC_LOOKUP:
5110                 break;
5111             default:
5112                 g_assert_not_reached();
5113             }
5114         } else {
5115             tcg_gen_movi_tl(cpu_npc, dc->npc);
5116         }
5117         if (may_lookup) {
5118             tcg_gen_lookup_and_goto_ptr();
5119         } else {
5120             tcg_gen_exit_tb(NULL, 0);
5121         }
5122         break;
5123 
5124     case DISAS_NORETURN:
5125        break;
5126 
5127     case DISAS_EXIT:
5128         /* Exit TB */
5129         save_state(dc);
5130         tcg_gen_exit_tb(NULL, 0);
5131         break;
5132 
5133     default:
5134         g_assert_not_reached();
5135     }
5136 
5137     for (e = dc->delay_excp_list; e ; e = e_next) {
5138         gen_set_label(e->lab);
5139 
5140         tcg_gen_movi_tl(cpu_pc, e->pc);
5141         if (e->npc % 4 == 0) {
5142             tcg_gen_movi_tl(cpu_npc, e->npc);
5143         }
5144         gen_helper_raise_exception(tcg_env, e->excp);
5145 
5146         e_next = e->next;
5147         g_free(e);
5148     }
5149 }
5150 
5151 static const TranslatorOps sparc_tr_ops = {
5152     .init_disas_context = sparc_tr_init_disas_context,
5153     .tb_start           = sparc_tr_tb_start,
5154     .insn_start         = sparc_tr_insn_start,
5155     .translate_insn     = sparc_tr_translate_insn,
5156     .tb_stop            = sparc_tr_tb_stop,
5157 };
5158 
5159 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5160                            vaddr pc, void *host_pc)
5161 {
5162     DisasContext dc = {};
5163 
5164     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5165 }
5166 
5167 void sparc_tcg_init(void)
5168 {
5169     static const char gregnames[32][4] = {
5170         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5171         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5172         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5173         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5174     };
5175     static const char fregnames[32][4] = {
5176         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5177         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5178         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5179         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5180     };
5181 
5182     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5183 #ifdef TARGET_SPARC64
5184         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5185         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5186         { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5187         { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5188         { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5189 #else
5190         { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5191 #endif
5192     };
5193 
5194     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5195 #ifdef TARGET_SPARC64
5196         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5197         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5198         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5199 #endif
5200         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5201         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5202         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5203         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5204         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5205         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5206         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5207         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5208         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5209     };
5210 
5211     unsigned int i;
5212 
5213     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5214                                          offsetof(CPUSPARCState, regwptr),
5215                                          "regwptr");
5216 
5217     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5218         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5219     }
5220 
5221     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5222         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5223     }
5224 
5225     cpu_regs[0] = NULL;
5226     for (i = 1; i < 8; ++i) {
5227         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5228                                          offsetof(CPUSPARCState, gregs[i]),
5229                                          gregnames[i]);
5230     }
5231 
5232     for (i = 8; i < 32; ++i) {
5233         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5234                                          (i - 8) * sizeof(target_ulong),
5235                                          gregnames[i]);
5236     }
5237 
5238     for (i = 0; i < TARGET_DPREGS; i++) {
5239         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5240                                             offsetof(CPUSPARCState, fpr[i]),
5241                                             fregnames[i]);
5242     }
5243 }
5244 
5245 void sparc_restore_state_to_opc(CPUState *cs,
5246                                 const TranslationBlock *tb,
5247                                 const uint64_t *data)
5248 {
5249     CPUSPARCState *env = cpu_env(cs);
5250     target_ulong pc = data[0];
5251     target_ulong npc = data[1];
5252 
5253     env->pc = pc;
5254     if (npc == DYNAMIC_PC) {
5255         /* dynamic NPC: already stored */
5256     } else if (npc & JUMP_PC) {
5257         /* jump PC: use 'cond' and the jump targets of the translation */
5258         if (env->cond) {
5259             env->npc = npc & ~3;
5260         } else {
5261             env->npc = pc + 4;
5262         }
5263     } else {
5264         env->npc = npc;
5265     }
5266 }
5267