xref: /openbmc/qemu/target/sparc/translate.c (revision 5e6aceb2)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S)                 qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S)                 qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
64 # define gen_helper_fabsq                ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fnegq                ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
90 # define FSR_LDXFSR_MASK                        0
91 # define FSR_LDXFSR_OLDMASK                     0
92 # define MAXTL_MASK                             0
93 #endif
94 
95 /* Dynamic PC, must exit to main loop. */
96 #define DYNAMIC_PC         1
97 /* Dynamic PC, one of two values according to jump_pc[T2]. */
98 #define JUMP_PC            2
99 /* Dynamic PC, may lookup next TB. */
100 #define DYNAMIC_PC_LOOKUP  3
101 
102 #define DISAS_EXIT  DISAS_TARGET_0
103 
104 /* global register indexes */
105 static TCGv_ptr cpu_regwptr;
106 static TCGv cpu_fsr, cpu_pc, cpu_npc;
107 static TCGv cpu_regs[32];
108 static TCGv cpu_y;
109 static TCGv cpu_tbr;
110 static TCGv cpu_cond;
111 static TCGv cpu_cc_N;
112 static TCGv cpu_cc_V;
113 static TCGv cpu_icc_Z;
114 static TCGv cpu_icc_C;
115 #ifdef TARGET_SPARC64
116 static TCGv cpu_xcc_Z;
117 static TCGv cpu_xcc_C;
118 static TCGv_i32 cpu_fprs;
119 static TCGv cpu_gsr;
120 #else
121 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
122 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
123 #endif
124 
125 #ifdef TARGET_SPARC64
126 #define cpu_cc_Z  cpu_xcc_Z
127 #define cpu_cc_C  cpu_xcc_C
128 #else
129 #define cpu_cc_Z  cpu_icc_Z
130 #define cpu_cc_C  cpu_icc_C
131 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
132 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
133 #endif
134 
135 /* Floating point registers */
136 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
137 
138 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
139 #ifdef TARGET_SPARC64
140 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
141 # define env64_field_offsetof(X)  env_field_offsetof(X)
142 #else
143 # define env32_field_offsetof(X)  env_field_offsetof(X)
144 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 #endif
146 
147 typedef struct DisasCompare {
148     TCGCond cond;
149     TCGv c1;
150     int c2;
151 } DisasCompare;
152 
153 typedef struct DisasDelayException {
154     struct DisasDelayException *next;
155     TCGLabel *lab;
156     TCGv_i32 excp;
157     /* Saved state at parent insn. */
158     target_ulong pc;
159     target_ulong npc;
160 } DisasDelayException;
161 
162 typedef struct DisasContext {
163     DisasContextBase base;
164     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
165     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
166 
167     /* Used when JUMP_PC value is used. */
168     DisasCompare jump;
169     target_ulong jump_pc[2];
170 
171     int mem_idx;
172     bool cpu_cond_live;
173     bool fpu_enabled;
174     bool address_mask_32bit;
175 #ifndef CONFIG_USER_ONLY
176     bool supervisor;
177 #ifdef TARGET_SPARC64
178     bool hypervisor;
179 #endif
180 #endif
181 
182     sparc_def_t *def;
183 #ifdef TARGET_SPARC64
184     int fprs_dirty;
185     int asi;
186 #endif
187     DisasDelayException *delay_excp_list;
188 } DisasContext;
189 
190 // This function uses non-native bit order
191 #define GET_FIELD(X, FROM, TO)                                  \
192     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
193 
194 // This function uses the order in the manuals, i.e. bit 0 is 2^0
195 #define GET_FIELD_SP(X, FROM, TO)               \
196     GET_FIELD(X, 31 - (TO), 31 - (FROM))
197 
198 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
199 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
200 
201 #ifdef TARGET_SPARC64
202 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
203 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
204 #else
205 #define DFPREG(r) (r & 0x1e)
206 #define QFPREG(r) (r & 0x1c)
207 #endif
208 
209 #define UA2005_HTRAP_MASK 0xff
210 #define V8_TRAP_MASK 0x7f
211 
212 #define IS_IMM (insn & (1<<13))
213 
214 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
215 {
216 #if defined(TARGET_SPARC64)
217     int bit = (rd < 32) ? 1 : 2;
218     /* If we know we've already set this bit within the TB,
219        we can avoid setting it again.  */
220     if (!(dc->fprs_dirty & bit)) {
221         dc->fprs_dirty |= bit;
222         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
223     }
224 #endif
225 }
226 
227 /* floating point registers moves */
228 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
229 {
230     TCGv_i32 ret = tcg_temp_new_i32();
231     if (src & 1) {
232         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
233     } else {
234         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
235     }
236     return ret;
237 }
238 
239 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
240 {
241     TCGv_i64 t = tcg_temp_new_i64();
242 
243     tcg_gen_extu_i32_i64(t, v);
244     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
245                         (dst & 1 ? 0 : 32), 32);
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
250 {
251     return tcg_temp_new_i32();
252 }
253 
254 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
255 {
256     src = DFPREG(src);
257     return cpu_fpr[src / 2];
258 }
259 
260 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
261 {
262     dst = DFPREG(dst);
263     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
264     gen_update_fprs_dirty(dc, dst);
265 }
266 
267 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
268 {
269     return cpu_fpr[DFPREG(dst) / 2];
270 }
271 
272 static void gen_op_load_fpr_QT0(unsigned int src)
273 {
274     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
275                    offsetof(CPU_QuadU, ll.upper));
276     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
277                    offsetof(CPU_QuadU, ll.lower));
278 }
279 
280 static void gen_op_load_fpr_QT1(unsigned int src)
281 {
282     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
283                    offsetof(CPU_QuadU, ll.upper));
284     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
285                    offsetof(CPU_QuadU, ll.lower));
286 }
287 
288 static void gen_op_store_QT0_fpr(unsigned int dst)
289 {
290     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
291                    offsetof(CPU_QuadU, ll.upper));
292     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
293                    offsetof(CPU_QuadU, ll.lower));
294 }
295 
296 /* moves */
297 #ifdef CONFIG_USER_ONLY
298 #define supervisor(dc) 0
299 #define hypervisor(dc) 0
300 #else
301 #ifdef TARGET_SPARC64
302 #define hypervisor(dc) (dc->hypervisor)
303 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
304 #else
305 #define supervisor(dc) (dc->supervisor)
306 #define hypervisor(dc) 0
307 #endif
308 #endif
309 
310 #if !defined(TARGET_SPARC64)
311 # define AM_CHECK(dc)  false
312 #elif defined(TARGET_ABI32)
313 # define AM_CHECK(dc)  true
314 #elif defined(CONFIG_USER_ONLY)
315 # define AM_CHECK(dc)  false
316 #else
317 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
318 #endif
319 
320 static void gen_address_mask(DisasContext *dc, TCGv addr)
321 {
322     if (AM_CHECK(dc)) {
323         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
324     }
325 }
326 
327 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
328 {
329     return AM_CHECK(dc) ? (uint32_t)addr : addr;
330 }
331 
332 static TCGv gen_load_gpr(DisasContext *dc, int reg)
333 {
334     if (reg > 0) {
335         assert(reg < 32);
336         return cpu_regs[reg];
337     } else {
338         TCGv t = tcg_temp_new();
339         tcg_gen_movi_tl(t, 0);
340         return t;
341     }
342 }
343 
344 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
345 {
346     if (reg > 0) {
347         assert(reg < 32);
348         tcg_gen_mov_tl(cpu_regs[reg], v);
349     }
350 }
351 
352 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
353 {
354     if (reg > 0) {
355         assert(reg < 32);
356         return cpu_regs[reg];
357     } else {
358         return tcg_temp_new();
359     }
360 }
361 
362 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
363 {
364     return translator_use_goto_tb(&s->base, pc) &&
365            translator_use_goto_tb(&s->base, npc);
366 }
367 
368 static void gen_goto_tb(DisasContext *s, int tb_num,
369                         target_ulong pc, target_ulong npc)
370 {
371     if (use_goto_tb(s, pc, npc))  {
372         /* jump to same page: we can use a direct jump */
373         tcg_gen_goto_tb(tb_num);
374         tcg_gen_movi_tl(cpu_pc, pc);
375         tcg_gen_movi_tl(cpu_npc, npc);
376         tcg_gen_exit_tb(s->base.tb, tb_num);
377     } else {
378         /* jump to another page: we can use an indirect jump */
379         tcg_gen_movi_tl(cpu_pc, pc);
380         tcg_gen_movi_tl(cpu_npc, npc);
381         tcg_gen_lookup_and_goto_ptr();
382     }
383 }
384 
385 static TCGv gen_carry32(void)
386 {
387     if (TARGET_LONG_BITS == 64) {
388         TCGv t = tcg_temp_new();
389         tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
390         return t;
391     }
392     return cpu_icc_C;
393 }
394 
395 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
396 {
397     TCGv z = tcg_constant_tl(0);
398 
399     if (cin) {
400         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
401         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
402     } else {
403         tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
404     }
405     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
406     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
407     tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
408     if (TARGET_LONG_BITS == 64) {
409         /*
410          * Carry-in to bit 32 is result ^ src1 ^ src2.
411          * We already have the src xor term in Z, from computation of V.
412          */
413         tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
414         tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
415     }
416     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
417     tcg_gen_mov_tl(dst, cpu_cc_N);
418 }
419 
420 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
421 {
422     gen_op_addcc_int(dst, src1, src2, NULL);
423 }
424 
425 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
426 {
427     TCGv t = tcg_temp_new();
428 
429     /* Save the tag bits around modification of dst. */
430     tcg_gen_or_tl(t, src1, src2);
431 
432     gen_op_addcc(dst, src1, src2);
433 
434     /* Incorprate tag bits into icc.V */
435     tcg_gen_andi_tl(t, t, 3);
436     tcg_gen_neg_tl(t, t);
437     tcg_gen_ext32u_tl(t, t);
438     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
439 }
440 
441 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
442 {
443     tcg_gen_add_tl(dst, src1, src2);
444     tcg_gen_add_tl(dst, dst, gen_carry32());
445 }
446 
447 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
448 {
449     gen_op_addcc_int(dst, src1, src2, gen_carry32());
450 }
451 
452 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
453 {
454     TCGv z = tcg_constant_tl(0);
455 
456     if (cin) {
457         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
458         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
459     } else {
460         tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
461     }
462     tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
463     tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
464     tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
465     tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
466 #ifdef TARGET_SPARC64
467     tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
468     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
469 #endif
470     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
471     tcg_gen_mov_tl(dst, cpu_cc_N);
472 }
473 
474 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
475 {
476     gen_op_subcc_int(dst, src1, src2, NULL);
477 }
478 
479 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
480 {
481     TCGv t = tcg_temp_new();
482 
483     /* Save the tag bits around modification of dst. */
484     tcg_gen_or_tl(t, src1, src2);
485 
486     gen_op_subcc(dst, src1, src2);
487 
488     /* Incorprate tag bits into icc.V */
489     tcg_gen_andi_tl(t, t, 3);
490     tcg_gen_neg_tl(t, t);
491     tcg_gen_ext32u_tl(t, t);
492     tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
493 }
494 
495 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
496 {
497     tcg_gen_sub_tl(dst, src1, src2);
498     tcg_gen_sub_tl(dst, dst, gen_carry32());
499 }
500 
501 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
502 {
503     gen_op_subcc_int(dst, src1, src2, gen_carry32());
504 }
505 
506 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
507 {
508     TCGv zero = tcg_constant_tl(0);
509     TCGv t_src1 = tcg_temp_new();
510     TCGv t_src2 = tcg_temp_new();
511     TCGv t0 = tcg_temp_new();
512 
513     tcg_gen_ext32u_tl(t_src1, src1);
514     tcg_gen_ext32u_tl(t_src2, src2);
515 
516     /*
517      * if (!(env->y & 1))
518      *   src2 = 0;
519      */
520     tcg_gen_andi_tl(t0, cpu_y, 0x1);
521     tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
522 
523     /*
524      * b2 = src1 & 1;
525      * y = (b2 << 31) | (y >> 1);
526      */
527     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
528     tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
529 
530     // b1 = N ^ V;
531     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
532 
533     /*
534      * src1 = (b1 << 31) | (src1 >> 1)
535      */
536     tcg_gen_andi_tl(t0, t0, 1u << 31);
537     tcg_gen_shri_tl(t_src1, t_src1, 1);
538     tcg_gen_or_tl(t_src1, t_src1, t0);
539 
540     gen_op_addcc(dst, t_src1, t_src2);
541 }
542 
543 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
544 {
545 #if TARGET_LONG_BITS == 32
546     if (sign_ext) {
547         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
548     } else {
549         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
550     }
551 #else
552     TCGv t0 = tcg_temp_new_i64();
553     TCGv t1 = tcg_temp_new_i64();
554 
555     if (sign_ext) {
556         tcg_gen_ext32s_i64(t0, src1);
557         tcg_gen_ext32s_i64(t1, src2);
558     } else {
559         tcg_gen_ext32u_i64(t0, src1);
560         tcg_gen_ext32u_i64(t1, src2);
561     }
562 
563     tcg_gen_mul_i64(dst, t0, t1);
564     tcg_gen_shri_i64(cpu_y, dst, 32);
565 #endif
566 }
567 
568 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
569 {
570     /* zero-extend truncated operands before multiplication */
571     gen_op_multiply(dst, src1, src2, 0);
572 }
573 
574 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
575 {
576     /* sign-extend truncated operands before multiplication */
577     gen_op_multiply(dst, src1, src2, 1);
578 }
579 
580 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
581 {
582 #ifdef TARGET_SPARC64
583     gen_helper_sdiv(dst, tcg_env, src1, src2);
584     tcg_gen_ext32s_tl(dst, dst);
585 #else
586     TCGv_i64 t64 = tcg_temp_new_i64();
587     gen_helper_sdiv(t64, tcg_env, src1, src2);
588     tcg_gen_trunc_i64_tl(dst, t64);
589 #endif
590 }
591 
592 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
593 {
594     TCGv_i64 t64;
595 
596 #ifdef TARGET_SPARC64
597     t64 = cpu_cc_V;
598 #else
599     t64 = tcg_temp_new_i64();
600 #endif
601 
602     gen_helper_udiv(t64, tcg_env, src1, src2);
603 
604 #ifdef TARGET_SPARC64
605     tcg_gen_ext32u_tl(cpu_cc_N, t64);
606     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
607     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
608     tcg_gen_movi_tl(cpu_icc_C, 0);
609 #else
610     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
611 #endif
612     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
613     tcg_gen_movi_tl(cpu_cc_C, 0);
614     tcg_gen_mov_tl(dst, cpu_cc_N);
615 }
616 
617 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
618 {
619     TCGv_i64 t64;
620 
621 #ifdef TARGET_SPARC64
622     t64 = cpu_cc_V;
623 #else
624     t64 = tcg_temp_new_i64();
625 #endif
626 
627     gen_helper_sdiv(t64, tcg_env, src1, src2);
628 
629 #ifdef TARGET_SPARC64
630     tcg_gen_ext32s_tl(cpu_cc_N, t64);
631     tcg_gen_shri_tl(cpu_cc_V, t64, 32);
632     tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
633     tcg_gen_movi_tl(cpu_icc_C, 0);
634 #else
635     tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
636 #endif
637     tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
638     tcg_gen_movi_tl(cpu_cc_C, 0);
639     tcg_gen_mov_tl(dst, cpu_cc_N);
640 }
641 
642 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
643 {
644     gen_helper_taddcctv(dst, tcg_env, src1, src2);
645 }
646 
647 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
648 {
649     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
650 }
651 
652 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
653 {
654     tcg_gen_ctpop_tl(dst, src2);
655 }
656 
657 #ifndef TARGET_SPARC64
658 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
659 {
660     g_assert_not_reached();
661 }
662 #endif
663 
664 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
665 {
666     gen_helper_array8(dst, src1, src2);
667     tcg_gen_shli_tl(dst, dst, 1);
668 }
669 
670 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
671 {
672     gen_helper_array8(dst, src1, src2);
673     tcg_gen_shli_tl(dst, dst, 2);
674 }
675 
676 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
677 {
678 #ifdef TARGET_SPARC64
679     gen_helper_fpack16(dst, cpu_gsr, src);
680 #else
681     g_assert_not_reached();
682 #endif
683 }
684 
685 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
686 {
687 #ifdef TARGET_SPARC64
688     gen_helper_fpackfix(dst, cpu_gsr, src);
689 #else
690     g_assert_not_reached();
691 #endif
692 }
693 
694 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
695 {
696 #ifdef TARGET_SPARC64
697     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
698 #else
699     g_assert_not_reached();
700 #endif
701 }
702 
703 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
704 {
705 #ifdef TARGET_SPARC64
706     TCGv t1, t2, shift;
707 
708     t1 = tcg_temp_new();
709     t2 = tcg_temp_new();
710     shift = tcg_temp_new();
711 
712     tcg_gen_andi_tl(shift, cpu_gsr, 7);
713     tcg_gen_shli_tl(shift, shift, 3);
714     tcg_gen_shl_tl(t1, s1, shift);
715 
716     /*
717      * A shift of 64 does not produce 0 in TCG.  Divide this into a
718      * shift of (up to 63) followed by a constant shift of 1.
719      */
720     tcg_gen_xori_tl(shift, shift, 63);
721     tcg_gen_shr_tl(t2, s2, shift);
722     tcg_gen_shri_tl(t2, t2, 1);
723 
724     tcg_gen_or_tl(dst, t1, t2);
725 #else
726     g_assert_not_reached();
727 #endif
728 }
729 
730 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
731 {
732 #ifdef TARGET_SPARC64
733     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
734 #else
735     g_assert_not_reached();
736 #endif
737 }
738 
739 // 1
740 static void gen_op_eval_ba(TCGv dst)
741 {
742     tcg_gen_movi_tl(dst, 1);
743 }
744 
745 // 0
746 static void gen_op_eval_bn(TCGv dst)
747 {
748     tcg_gen_movi_tl(dst, 0);
749 }
750 
751 /*
752   FPSR bit field FCC1 | FCC0:
753    0 =
754    1 <
755    2 >
756    3 unordered
757 */
758 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
759                                     unsigned int fcc_offset)
760 {
761     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
762     tcg_gen_andi_tl(reg, reg, 0x1);
763 }
764 
765 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
766 {
767     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
768     tcg_gen_andi_tl(reg, reg, 0x1);
769 }
770 
771 // !0: FCC0 | FCC1
772 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
773 {
774     TCGv t0 = tcg_temp_new();
775     gen_mov_reg_FCC0(dst, src, fcc_offset);
776     gen_mov_reg_FCC1(t0, src, fcc_offset);
777     tcg_gen_or_tl(dst, dst, t0);
778 }
779 
780 // 1 or 2: FCC0 ^ FCC1
781 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
782 {
783     TCGv t0 = tcg_temp_new();
784     gen_mov_reg_FCC0(dst, src, fcc_offset);
785     gen_mov_reg_FCC1(t0, src, fcc_offset);
786     tcg_gen_xor_tl(dst, dst, t0);
787 }
788 
789 // 1 or 3: FCC0
790 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
791 {
792     gen_mov_reg_FCC0(dst, src, fcc_offset);
793 }
794 
795 // 1: FCC0 & !FCC1
796 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
797 {
798     TCGv t0 = tcg_temp_new();
799     gen_mov_reg_FCC0(dst, src, fcc_offset);
800     gen_mov_reg_FCC1(t0, src, fcc_offset);
801     tcg_gen_andc_tl(dst, dst, t0);
802 }
803 
804 // 2 or 3: FCC1
805 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
806 {
807     gen_mov_reg_FCC1(dst, src, fcc_offset);
808 }
809 
810 // 2: !FCC0 & FCC1
811 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
812 {
813     TCGv t0 = tcg_temp_new();
814     gen_mov_reg_FCC0(dst, src, fcc_offset);
815     gen_mov_reg_FCC1(t0, src, fcc_offset);
816     tcg_gen_andc_tl(dst, t0, dst);
817 }
818 
819 // 3: FCC0 & FCC1
820 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
821 {
822     TCGv t0 = tcg_temp_new();
823     gen_mov_reg_FCC0(dst, src, fcc_offset);
824     gen_mov_reg_FCC1(t0, src, fcc_offset);
825     tcg_gen_and_tl(dst, dst, t0);
826 }
827 
828 // 0: !(FCC0 | FCC1)
829 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
830 {
831     TCGv t0 = tcg_temp_new();
832     gen_mov_reg_FCC0(dst, src, fcc_offset);
833     gen_mov_reg_FCC1(t0, src, fcc_offset);
834     tcg_gen_or_tl(dst, dst, t0);
835     tcg_gen_xori_tl(dst, dst, 0x1);
836 }
837 
838 // 0 or 3: !(FCC0 ^ FCC1)
839 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
840 {
841     TCGv t0 = tcg_temp_new();
842     gen_mov_reg_FCC0(dst, src, fcc_offset);
843     gen_mov_reg_FCC1(t0, src, fcc_offset);
844     tcg_gen_xor_tl(dst, dst, t0);
845     tcg_gen_xori_tl(dst, dst, 0x1);
846 }
847 
848 // 0 or 2: !FCC0
849 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
850 {
851     gen_mov_reg_FCC0(dst, src, fcc_offset);
852     tcg_gen_xori_tl(dst, dst, 0x1);
853 }
854 
855 // !1: !(FCC0 & !FCC1)
856 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
857 {
858     TCGv t0 = tcg_temp_new();
859     gen_mov_reg_FCC0(dst, src, fcc_offset);
860     gen_mov_reg_FCC1(t0, src, fcc_offset);
861     tcg_gen_andc_tl(dst, dst, t0);
862     tcg_gen_xori_tl(dst, dst, 0x1);
863 }
864 
865 // 0 or 1: !FCC1
866 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
867 {
868     gen_mov_reg_FCC1(dst, src, fcc_offset);
869     tcg_gen_xori_tl(dst, dst, 0x1);
870 }
871 
872 // !2: !(!FCC0 & FCC1)
873 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
874 {
875     TCGv t0 = tcg_temp_new();
876     gen_mov_reg_FCC0(dst, src, fcc_offset);
877     gen_mov_reg_FCC1(t0, src, fcc_offset);
878     tcg_gen_andc_tl(dst, t0, dst);
879     tcg_gen_xori_tl(dst, dst, 0x1);
880 }
881 
882 // !3: !(FCC0 & FCC1)
883 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
884 {
885     TCGv t0 = tcg_temp_new();
886     gen_mov_reg_FCC0(dst, src, fcc_offset);
887     gen_mov_reg_FCC1(t0, src, fcc_offset);
888     tcg_gen_and_tl(dst, dst, t0);
889     tcg_gen_xori_tl(dst, dst, 0x1);
890 }
891 
892 static void finishing_insn(DisasContext *dc)
893 {
894     /*
895      * From here, there is no future path through an unwinding exception.
896      * If the current insn cannot raise an exception, the computation of
897      * cpu_cond may be able to be elided.
898      */
899     if (dc->cpu_cond_live) {
900         tcg_gen_discard_tl(cpu_cond);
901         dc->cpu_cond_live = false;
902     }
903 }
904 
905 static void gen_generic_branch(DisasContext *dc)
906 {
907     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
908     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
909     TCGv c2 = tcg_constant_tl(dc->jump.c2);
910 
911     tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
912 }
913 
914 /* call this function before using the condition register as it may
915    have been set for a jump */
916 static void flush_cond(DisasContext *dc)
917 {
918     if (dc->npc == JUMP_PC) {
919         gen_generic_branch(dc);
920         dc->npc = DYNAMIC_PC_LOOKUP;
921     }
922 }
923 
924 static void save_npc(DisasContext *dc)
925 {
926     if (dc->npc & 3) {
927         switch (dc->npc) {
928         case JUMP_PC:
929             gen_generic_branch(dc);
930             dc->npc = DYNAMIC_PC_LOOKUP;
931             break;
932         case DYNAMIC_PC:
933         case DYNAMIC_PC_LOOKUP:
934             break;
935         default:
936             g_assert_not_reached();
937         }
938     } else {
939         tcg_gen_movi_tl(cpu_npc, dc->npc);
940     }
941 }
942 
943 static void save_state(DisasContext *dc)
944 {
945     tcg_gen_movi_tl(cpu_pc, dc->pc);
946     save_npc(dc);
947 }
948 
949 static void gen_exception(DisasContext *dc, int which)
950 {
951     finishing_insn(dc);
952     save_state(dc);
953     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
954     dc->base.is_jmp = DISAS_NORETURN;
955 }
956 
957 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
958 {
959     DisasDelayException *e = g_new0(DisasDelayException, 1);
960 
961     e->next = dc->delay_excp_list;
962     dc->delay_excp_list = e;
963 
964     e->lab = gen_new_label();
965     e->excp = excp;
966     e->pc = dc->pc;
967     /* Caller must have used flush_cond before branch. */
968     assert(e->npc != JUMP_PC);
969     e->npc = dc->npc;
970 
971     return e->lab;
972 }
973 
974 static TCGLabel *delay_exception(DisasContext *dc, int excp)
975 {
976     return delay_exceptionv(dc, tcg_constant_i32(excp));
977 }
978 
979 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
980 {
981     TCGv t = tcg_temp_new();
982     TCGLabel *lab;
983 
984     tcg_gen_andi_tl(t, addr, mask);
985 
986     flush_cond(dc);
987     lab = delay_exception(dc, TT_UNALIGNED);
988     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
989 }
990 
991 static void gen_mov_pc_npc(DisasContext *dc)
992 {
993     finishing_insn(dc);
994 
995     if (dc->npc & 3) {
996         switch (dc->npc) {
997         case JUMP_PC:
998             gen_generic_branch(dc);
999             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1000             dc->pc = DYNAMIC_PC_LOOKUP;
1001             break;
1002         case DYNAMIC_PC:
1003         case DYNAMIC_PC_LOOKUP:
1004             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1005             dc->pc = dc->npc;
1006             break;
1007         default:
1008             g_assert_not_reached();
1009         }
1010     } else {
1011         dc->pc = dc->npc;
1012     }
1013 }
1014 
1015 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1016                         DisasContext *dc)
1017 {
1018     TCGv t1;
1019 
1020     cmp->c1 = t1 = tcg_temp_new();
1021     cmp->c2 = 0;
1022 
1023     switch (cond & 7) {
1024     case 0x0: /* never */
1025         cmp->cond = TCG_COND_NEVER;
1026         cmp->c1 = tcg_constant_tl(0);
1027         break;
1028 
1029     case 0x1: /* eq: Z */
1030         cmp->cond = TCG_COND_EQ;
1031         if (TARGET_LONG_BITS == 32 || xcc) {
1032             tcg_gen_mov_tl(t1, cpu_cc_Z);
1033         } else {
1034             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1035         }
1036         break;
1037 
1038     case 0x2: /* le: Z | (N ^ V) */
1039         /*
1040          * Simplify:
1041          *   cc_Z || (N ^ V) < 0        NE
1042          *   cc_Z && !((N ^ V) < 0)     EQ
1043          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1044          */
1045         cmp->cond = TCG_COND_EQ;
1046         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1047         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1048         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1049         if (TARGET_LONG_BITS == 64 && !xcc) {
1050             tcg_gen_ext32u_tl(t1, t1);
1051         }
1052         break;
1053 
1054     case 0x3: /* lt: N ^ V */
1055         cmp->cond = TCG_COND_LT;
1056         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1057         if (TARGET_LONG_BITS == 64 && !xcc) {
1058             tcg_gen_ext32s_tl(t1, t1);
1059         }
1060         break;
1061 
1062     case 0x4: /* leu: Z | C */
1063         /*
1064          * Simplify:
1065          *   cc_Z == 0 || cc_C != 0     NE
1066          *   cc_Z != 0 && cc_C == 0     EQ
1067          *   cc_Z & (cc_C ? 0 : -1)     EQ
1068          *   cc_Z & (cc_C - 1)          EQ
1069          */
1070         cmp->cond = TCG_COND_EQ;
1071         if (TARGET_LONG_BITS == 32 || xcc) {
1072             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1073             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1074         } else {
1075             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1076             tcg_gen_subi_tl(t1, t1, 1);
1077             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1078             tcg_gen_ext32u_tl(t1, t1);
1079         }
1080         break;
1081 
1082     case 0x5: /* ltu: C */
1083         cmp->cond = TCG_COND_NE;
1084         if (TARGET_LONG_BITS == 32 || xcc) {
1085             tcg_gen_mov_tl(t1, cpu_cc_C);
1086         } else {
1087             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1088         }
1089         break;
1090 
1091     case 0x6: /* neg: N */
1092         cmp->cond = TCG_COND_LT;
1093         if (TARGET_LONG_BITS == 32 || xcc) {
1094             tcg_gen_mov_tl(t1, cpu_cc_N);
1095         } else {
1096             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1097         }
1098         break;
1099 
1100     case 0x7: /* vs: V */
1101         cmp->cond = TCG_COND_LT;
1102         if (TARGET_LONG_BITS == 32 || xcc) {
1103             tcg_gen_mov_tl(t1, cpu_cc_V);
1104         } else {
1105             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1106         }
1107         break;
1108     }
1109     if (cond & 8) {
1110         cmp->cond = tcg_invert_cond(cmp->cond);
1111     }
1112 }
1113 
1114 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1115 {
1116     unsigned int offset;
1117     TCGv r_dst;
1118 
1119     /* For now we still generate a straight boolean result.  */
1120     cmp->cond = TCG_COND_NE;
1121     cmp->c1 = r_dst = tcg_temp_new();
1122     cmp->c2 = 0;
1123 
1124     switch (cc) {
1125     default:
1126     case 0x0:
1127         offset = 0;
1128         break;
1129     case 0x1:
1130         offset = 32 - 10;
1131         break;
1132     case 0x2:
1133         offset = 34 - 10;
1134         break;
1135     case 0x3:
1136         offset = 36 - 10;
1137         break;
1138     }
1139 
1140     switch (cond) {
1141     case 0x0:
1142         gen_op_eval_bn(r_dst);
1143         break;
1144     case 0x1:
1145         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1146         break;
1147     case 0x2:
1148         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1149         break;
1150     case 0x3:
1151         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1152         break;
1153     case 0x4:
1154         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1155         break;
1156     case 0x5:
1157         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1158         break;
1159     case 0x6:
1160         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1161         break;
1162     case 0x7:
1163         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1164         break;
1165     case 0x8:
1166         gen_op_eval_ba(r_dst);
1167         break;
1168     case 0x9:
1169         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1170         break;
1171     case 0xa:
1172         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1173         break;
1174     case 0xb:
1175         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1176         break;
1177     case 0xc:
1178         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1179         break;
1180     case 0xd:
1181         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1182         break;
1183     case 0xe:
1184         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1185         break;
1186     case 0xf:
1187         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1188         break;
1189     }
1190 }
1191 
1192 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1193 {
1194     static const TCGCond cond_reg[4] = {
1195         TCG_COND_NEVER,  /* reserved */
1196         TCG_COND_EQ,
1197         TCG_COND_LE,
1198         TCG_COND_LT,
1199     };
1200     TCGCond tcond;
1201 
1202     if ((cond & 3) == 0) {
1203         return false;
1204     }
1205     tcond = cond_reg[cond & 3];
1206     if (cond & 4) {
1207         tcond = tcg_invert_cond(tcond);
1208     }
1209 
1210     cmp->cond = tcond;
1211     cmp->c1 = tcg_temp_new();
1212     cmp->c2 = 0;
1213     tcg_gen_mov_tl(cmp->c1, r_src);
1214     return true;
1215 }
1216 
1217 static void gen_op_clear_ieee_excp_and_FTT(void)
1218 {
1219     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1220 }
1221 
1222 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1223 {
1224     gen_op_clear_ieee_excp_and_FTT();
1225     tcg_gen_mov_i32(dst, src);
1226 }
1227 
1228 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1229 {
1230     gen_op_clear_ieee_excp_and_FTT();
1231     gen_helper_fnegs(dst, src);
1232 }
1233 
1234 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1235 {
1236     gen_op_clear_ieee_excp_and_FTT();
1237     gen_helper_fabss(dst, src);
1238 }
1239 
1240 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1241 {
1242     gen_op_clear_ieee_excp_and_FTT();
1243     tcg_gen_mov_i64(dst, src);
1244 }
1245 
1246 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1247 {
1248     gen_op_clear_ieee_excp_and_FTT();
1249     gen_helper_fnegd(dst, src);
1250 }
1251 
1252 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1253 {
1254     gen_op_clear_ieee_excp_and_FTT();
1255     gen_helper_fabsd(dst, src);
1256 }
1257 
1258 #ifdef TARGET_SPARC64
1259 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1260 {
1261     switch (fccno) {
1262     case 0:
1263         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1264         break;
1265     case 1:
1266         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1267         break;
1268     case 2:
1269         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1270         break;
1271     case 3:
1272         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1273         break;
1274     }
1275 }
1276 
1277 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1278 {
1279     switch (fccno) {
1280     case 0:
1281         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1282         break;
1283     case 1:
1284         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1285         break;
1286     case 2:
1287         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1288         break;
1289     case 3:
1290         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1291         break;
1292     }
1293 }
1294 
1295 static void gen_op_fcmpq(int fccno)
1296 {
1297     switch (fccno) {
1298     case 0:
1299         gen_helper_fcmpq(cpu_fsr, tcg_env);
1300         break;
1301     case 1:
1302         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1303         break;
1304     case 2:
1305         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1306         break;
1307     case 3:
1308         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1309         break;
1310     }
1311 }
1312 
1313 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1314 {
1315     switch (fccno) {
1316     case 0:
1317         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1318         break;
1319     case 1:
1320         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1321         break;
1322     case 2:
1323         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1324         break;
1325     case 3:
1326         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1327         break;
1328     }
1329 }
1330 
1331 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1332 {
1333     switch (fccno) {
1334     case 0:
1335         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1336         break;
1337     case 1:
1338         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1339         break;
1340     case 2:
1341         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1342         break;
1343     case 3:
1344         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1345         break;
1346     }
1347 }
1348 
1349 static void gen_op_fcmpeq(int fccno)
1350 {
1351     switch (fccno) {
1352     case 0:
1353         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1354         break;
1355     case 1:
1356         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1357         break;
1358     case 2:
1359         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1360         break;
1361     case 3:
1362         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1363         break;
1364     }
1365 }
1366 
1367 #else
1368 
1369 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1370 {
1371     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1372 }
1373 
1374 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1375 {
1376     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1377 }
1378 
1379 static void gen_op_fcmpq(int fccno)
1380 {
1381     gen_helper_fcmpq(cpu_fsr, tcg_env);
1382 }
1383 
1384 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1385 {
1386     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1387 }
1388 
1389 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1390 {
1391     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1392 }
1393 
1394 static void gen_op_fcmpeq(int fccno)
1395 {
1396     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1397 }
1398 #endif
1399 
1400 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1401 {
1402     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1403     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1404     gen_exception(dc, TT_FP_EXCP);
1405 }
1406 
1407 static int gen_trap_ifnofpu(DisasContext *dc)
1408 {
1409 #if !defined(CONFIG_USER_ONLY)
1410     if (!dc->fpu_enabled) {
1411         gen_exception(dc, TT_NFPU_INSN);
1412         return 1;
1413     }
1414 #endif
1415     return 0;
1416 }
1417 
1418 /* asi moves */
1419 typedef enum {
1420     GET_ASI_HELPER,
1421     GET_ASI_EXCP,
1422     GET_ASI_DIRECT,
1423     GET_ASI_DTWINX,
1424     GET_ASI_BLOCK,
1425     GET_ASI_SHORT,
1426     GET_ASI_BCOPY,
1427     GET_ASI_BFILL,
1428 } ASIType;
1429 
1430 typedef struct {
1431     ASIType type;
1432     int asi;
1433     int mem_idx;
1434     MemOp memop;
1435 } DisasASI;
1436 
1437 /*
1438  * Build DisasASI.
1439  * For asi == -1, treat as non-asi.
1440  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1441  */
1442 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1443 {
1444     ASIType type = GET_ASI_HELPER;
1445     int mem_idx = dc->mem_idx;
1446 
1447     if (asi == -1) {
1448         /* Artificial "non-asi" case. */
1449         type = GET_ASI_DIRECT;
1450         goto done;
1451     }
1452 
1453 #ifndef TARGET_SPARC64
1454     /* Before v9, all asis are immediate and privileged.  */
1455     if (asi < 0) {
1456         gen_exception(dc, TT_ILL_INSN);
1457         type = GET_ASI_EXCP;
1458     } else if (supervisor(dc)
1459                /* Note that LEON accepts ASI_USERDATA in user mode, for
1460                   use with CASA.  Also note that previous versions of
1461                   QEMU allowed (and old versions of gcc emitted) ASI_P
1462                   for LEON, which is incorrect.  */
1463                || (asi == ASI_USERDATA
1464                    && (dc->def->features & CPU_FEATURE_CASA))) {
1465         switch (asi) {
1466         case ASI_USERDATA:   /* User data access */
1467             mem_idx = MMU_USER_IDX;
1468             type = GET_ASI_DIRECT;
1469             break;
1470         case ASI_KERNELDATA: /* Supervisor data access */
1471             mem_idx = MMU_KERNEL_IDX;
1472             type = GET_ASI_DIRECT;
1473             break;
1474         case ASI_M_BYPASS:    /* MMU passthrough */
1475         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1476             mem_idx = MMU_PHYS_IDX;
1477             type = GET_ASI_DIRECT;
1478             break;
1479         case ASI_M_BCOPY: /* Block copy, sta access */
1480             mem_idx = MMU_KERNEL_IDX;
1481             type = GET_ASI_BCOPY;
1482             break;
1483         case ASI_M_BFILL: /* Block fill, stda access */
1484             mem_idx = MMU_KERNEL_IDX;
1485             type = GET_ASI_BFILL;
1486             break;
1487         }
1488 
1489         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1490          * permissions check in get_physical_address(..).
1491          */
1492         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1493     } else {
1494         gen_exception(dc, TT_PRIV_INSN);
1495         type = GET_ASI_EXCP;
1496     }
1497 #else
1498     if (asi < 0) {
1499         asi = dc->asi;
1500     }
1501     /* With v9, all asis below 0x80 are privileged.  */
1502     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1503        down that bit into DisasContext.  For the moment that's ok,
1504        since the direct implementations below doesn't have any ASIs
1505        in the restricted [0x30, 0x7f] range, and the check will be
1506        done properly in the helper.  */
1507     if (!supervisor(dc) && asi < 0x80) {
1508         gen_exception(dc, TT_PRIV_ACT);
1509         type = GET_ASI_EXCP;
1510     } else {
1511         switch (asi) {
1512         case ASI_REAL:      /* Bypass */
1513         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1514         case ASI_REAL_L:    /* Bypass LE */
1515         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1516         case ASI_TWINX_REAL:   /* Real address, twinx */
1517         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1518         case ASI_QUAD_LDD_PHYS:
1519         case ASI_QUAD_LDD_PHYS_L:
1520             mem_idx = MMU_PHYS_IDX;
1521             break;
1522         case ASI_N:  /* Nucleus */
1523         case ASI_NL: /* Nucleus LE */
1524         case ASI_TWINX_N:
1525         case ASI_TWINX_NL:
1526         case ASI_NUCLEUS_QUAD_LDD:
1527         case ASI_NUCLEUS_QUAD_LDD_L:
1528             if (hypervisor(dc)) {
1529                 mem_idx = MMU_PHYS_IDX;
1530             } else {
1531                 mem_idx = MMU_NUCLEUS_IDX;
1532             }
1533             break;
1534         case ASI_AIUP:  /* As if user primary */
1535         case ASI_AIUPL: /* As if user primary LE */
1536         case ASI_TWINX_AIUP:
1537         case ASI_TWINX_AIUP_L:
1538         case ASI_BLK_AIUP_4V:
1539         case ASI_BLK_AIUP_L_4V:
1540         case ASI_BLK_AIUP:
1541         case ASI_BLK_AIUPL:
1542             mem_idx = MMU_USER_IDX;
1543             break;
1544         case ASI_AIUS:  /* As if user secondary */
1545         case ASI_AIUSL: /* As if user secondary LE */
1546         case ASI_TWINX_AIUS:
1547         case ASI_TWINX_AIUS_L:
1548         case ASI_BLK_AIUS_4V:
1549         case ASI_BLK_AIUS_L_4V:
1550         case ASI_BLK_AIUS:
1551         case ASI_BLK_AIUSL:
1552             mem_idx = MMU_USER_SECONDARY_IDX;
1553             break;
1554         case ASI_S:  /* Secondary */
1555         case ASI_SL: /* Secondary LE */
1556         case ASI_TWINX_S:
1557         case ASI_TWINX_SL:
1558         case ASI_BLK_COMMIT_S:
1559         case ASI_BLK_S:
1560         case ASI_BLK_SL:
1561         case ASI_FL8_S:
1562         case ASI_FL8_SL:
1563         case ASI_FL16_S:
1564         case ASI_FL16_SL:
1565             if (mem_idx == MMU_USER_IDX) {
1566                 mem_idx = MMU_USER_SECONDARY_IDX;
1567             } else if (mem_idx == MMU_KERNEL_IDX) {
1568                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1569             }
1570             break;
1571         case ASI_P:  /* Primary */
1572         case ASI_PL: /* Primary LE */
1573         case ASI_TWINX_P:
1574         case ASI_TWINX_PL:
1575         case ASI_BLK_COMMIT_P:
1576         case ASI_BLK_P:
1577         case ASI_BLK_PL:
1578         case ASI_FL8_P:
1579         case ASI_FL8_PL:
1580         case ASI_FL16_P:
1581         case ASI_FL16_PL:
1582             break;
1583         }
1584         switch (asi) {
1585         case ASI_REAL:
1586         case ASI_REAL_IO:
1587         case ASI_REAL_L:
1588         case ASI_REAL_IO_L:
1589         case ASI_N:
1590         case ASI_NL:
1591         case ASI_AIUP:
1592         case ASI_AIUPL:
1593         case ASI_AIUS:
1594         case ASI_AIUSL:
1595         case ASI_S:
1596         case ASI_SL:
1597         case ASI_P:
1598         case ASI_PL:
1599             type = GET_ASI_DIRECT;
1600             break;
1601         case ASI_TWINX_REAL:
1602         case ASI_TWINX_REAL_L:
1603         case ASI_TWINX_N:
1604         case ASI_TWINX_NL:
1605         case ASI_TWINX_AIUP:
1606         case ASI_TWINX_AIUP_L:
1607         case ASI_TWINX_AIUS:
1608         case ASI_TWINX_AIUS_L:
1609         case ASI_TWINX_P:
1610         case ASI_TWINX_PL:
1611         case ASI_TWINX_S:
1612         case ASI_TWINX_SL:
1613         case ASI_QUAD_LDD_PHYS:
1614         case ASI_QUAD_LDD_PHYS_L:
1615         case ASI_NUCLEUS_QUAD_LDD:
1616         case ASI_NUCLEUS_QUAD_LDD_L:
1617             type = GET_ASI_DTWINX;
1618             break;
1619         case ASI_BLK_COMMIT_P:
1620         case ASI_BLK_COMMIT_S:
1621         case ASI_BLK_AIUP_4V:
1622         case ASI_BLK_AIUP_L_4V:
1623         case ASI_BLK_AIUP:
1624         case ASI_BLK_AIUPL:
1625         case ASI_BLK_AIUS_4V:
1626         case ASI_BLK_AIUS_L_4V:
1627         case ASI_BLK_AIUS:
1628         case ASI_BLK_AIUSL:
1629         case ASI_BLK_S:
1630         case ASI_BLK_SL:
1631         case ASI_BLK_P:
1632         case ASI_BLK_PL:
1633             type = GET_ASI_BLOCK;
1634             break;
1635         case ASI_FL8_S:
1636         case ASI_FL8_SL:
1637         case ASI_FL8_P:
1638         case ASI_FL8_PL:
1639             memop = MO_UB;
1640             type = GET_ASI_SHORT;
1641             break;
1642         case ASI_FL16_S:
1643         case ASI_FL16_SL:
1644         case ASI_FL16_P:
1645         case ASI_FL16_PL:
1646             memop = MO_TEUW;
1647             type = GET_ASI_SHORT;
1648             break;
1649         }
1650         /* The little-endian asis all have bit 3 set.  */
1651         if (asi & 8) {
1652             memop ^= MO_BSWAP;
1653         }
1654     }
1655 #endif
1656 
1657  done:
1658     return (DisasASI){ type, asi, mem_idx, memop };
1659 }
1660 
1661 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1662 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1663                               TCGv_i32 asi, TCGv_i32 mop)
1664 {
1665     g_assert_not_reached();
1666 }
1667 
1668 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1669                               TCGv_i32 asi, TCGv_i32 mop)
1670 {
1671     g_assert_not_reached();
1672 }
1673 #endif
1674 
1675 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1676 {
1677     switch (da->type) {
1678     case GET_ASI_EXCP:
1679         break;
1680     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1681         gen_exception(dc, TT_ILL_INSN);
1682         break;
1683     case GET_ASI_DIRECT:
1684         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1685         break;
1686     default:
1687         {
1688             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1689             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1690 
1691             save_state(dc);
1692 #ifdef TARGET_SPARC64
1693             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1694 #else
1695             {
1696                 TCGv_i64 t64 = tcg_temp_new_i64();
1697                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1698                 tcg_gen_trunc_i64_tl(dst, t64);
1699             }
1700 #endif
1701         }
1702         break;
1703     }
1704 }
1705 
1706 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1707 {
1708     switch (da->type) {
1709     case GET_ASI_EXCP:
1710         break;
1711 
1712     case GET_ASI_DTWINX: /* Reserved for stda.  */
1713         if (TARGET_LONG_BITS == 32) {
1714             gen_exception(dc, TT_ILL_INSN);
1715             break;
1716         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1717             /* Pre OpenSPARC CPUs don't have these */
1718             gen_exception(dc, TT_ILL_INSN);
1719             break;
1720         }
1721         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1722         /* fall through */
1723 
1724     case GET_ASI_DIRECT:
1725         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1726         break;
1727 
1728     case GET_ASI_BCOPY:
1729         assert(TARGET_LONG_BITS == 32);
1730         /* Copy 32 bytes from the address in SRC to ADDR.  */
1731         /* ??? The original qemu code suggests 4-byte alignment, dropping
1732            the low bits, but the only place I can see this used is in the
1733            Linux kernel with 32 byte alignment, which would make more sense
1734            as a cacheline-style operation.  */
1735         {
1736             TCGv saddr = tcg_temp_new();
1737             TCGv daddr = tcg_temp_new();
1738             TCGv four = tcg_constant_tl(4);
1739             TCGv_i32 tmp = tcg_temp_new_i32();
1740             int i;
1741 
1742             tcg_gen_andi_tl(saddr, src, -4);
1743             tcg_gen_andi_tl(daddr, addr, -4);
1744             for (i = 0; i < 32; i += 4) {
1745                 /* Since the loads and stores are paired, allow the
1746                    copy to happen in the host endianness.  */
1747                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1748                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1749                 tcg_gen_add_tl(saddr, saddr, four);
1750                 tcg_gen_add_tl(daddr, daddr, four);
1751             }
1752         }
1753         break;
1754 
1755     default:
1756         {
1757             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1758             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1759 
1760             save_state(dc);
1761 #ifdef TARGET_SPARC64
1762             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1763 #else
1764             {
1765                 TCGv_i64 t64 = tcg_temp_new_i64();
1766                 tcg_gen_extu_tl_i64(t64, src);
1767                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1768             }
1769 #endif
1770 
1771             /* A write to a TLB register may alter page maps.  End the TB. */
1772             dc->npc = DYNAMIC_PC;
1773         }
1774         break;
1775     }
1776 }
1777 
1778 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1779                          TCGv dst, TCGv src, TCGv addr)
1780 {
1781     switch (da->type) {
1782     case GET_ASI_EXCP:
1783         break;
1784     case GET_ASI_DIRECT:
1785         tcg_gen_atomic_xchg_tl(dst, addr, src,
1786                                da->mem_idx, da->memop | MO_ALIGN);
1787         break;
1788     default:
1789         /* ??? Should be DAE_invalid_asi.  */
1790         gen_exception(dc, TT_DATA_ACCESS);
1791         break;
1792     }
1793 }
1794 
1795 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1796                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1797 {
1798     switch (da->type) {
1799     case GET_ASI_EXCP:
1800         return;
1801     case GET_ASI_DIRECT:
1802         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1803                                   da->mem_idx, da->memop | MO_ALIGN);
1804         break;
1805     default:
1806         /* ??? Should be DAE_invalid_asi.  */
1807         gen_exception(dc, TT_DATA_ACCESS);
1808         break;
1809     }
1810 }
1811 
1812 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1813 {
1814     switch (da->type) {
1815     case GET_ASI_EXCP:
1816         break;
1817     case GET_ASI_DIRECT:
1818         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1819                                da->mem_idx, MO_UB);
1820         break;
1821     default:
1822         /* ??? In theory, this should be raise DAE_invalid_asi.
1823            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1824         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1825             gen_helper_exit_atomic(tcg_env);
1826         } else {
1827             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1828             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1829             TCGv_i64 s64, t64;
1830 
1831             save_state(dc);
1832             t64 = tcg_temp_new_i64();
1833             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1834 
1835             s64 = tcg_constant_i64(0xff);
1836             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1837 
1838             tcg_gen_trunc_i64_tl(dst, t64);
1839 
1840             /* End the TB.  */
1841             dc->npc = DYNAMIC_PC;
1842         }
1843         break;
1844     }
1845 }
1846 
1847 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1848                         TCGv addr, int rd)
1849 {
1850     MemOp memop = da->memop;
1851     MemOp size = memop & MO_SIZE;
1852     TCGv_i32 d32;
1853     TCGv_i64 d64;
1854     TCGv addr_tmp;
1855 
1856     /* TODO: Use 128-bit load/store below. */
1857     if (size == MO_128) {
1858         memop = (memop & ~MO_SIZE) | MO_64;
1859     }
1860 
1861     switch (da->type) {
1862     case GET_ASI_EXCP:
1863         break;
1864 
1865     case GET_ASI_DIRECT:
1866         memop |= MO_ALIGN_4;
1867         switch (size) {
1868         case MO_32:
1869             d32 = gen_dest_fpr_F(dc);
1870             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1871             gen_store_fpr_F(dc, rd, d32);
1872             break;
1873 
1874         case MO_64:
1875             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
1876             break;
1877 
1878         case MO_128:
1879             d64 = tcg_temp_new_i64();
1880             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1881             addr_tmp = tcg_temp_new();
1882             tcg_gen_addi_tl(addr_tmp, addr, 8);
1883             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1884             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1885             break;
1886         default:
1887             g_assert_not_reached();
1888         }
1889         break;
1890 
1891     case GET_ASI_BLOCK:
1892         /* Valid for lddfa on aligned registers only.  */
1893         if (orig_size == MO_64 && (rd & 7) == 0) {
1894             /* The first operation checks required alignment.  */
1895             addr_tmp = tcg_temp_new();
1896             for (int i = 0; ; ++i) {
1897                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
1898                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
1899                 if (i == 7) {
1900                     break;
1901                 }
1902                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1903                 addr = addr_tmp;
1904             }
1905         } else {
1906             gen_exception(dc, TT_ILL_INSN);
1907         }
1908         break;
1909 
1910     case GET_ASI_SHORT:
1911         /* Valid for lddfa only.  */
1912         if (orig_size == MO_64) {
1913             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1914                                 memop | MO_ALIGN);
1915         } else {
1916             gen_exception(dc, TT_ILL_INSN);
1917         }
1918         break;
1919 
1920     default:
1921         {
1922             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1923             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
1924 
1925             save_state(dc);
1926             /* According to the table in the UA2011 manual, the only
1927                other asis that are valid for ldfa/lddfa/ldqfa are
1928                the NO_FAULT asis.  We still need a helper for these,
1929                but we can just use the integer asi helper for them.  */
1930             switch (size) {
1931             case MO_32:
1932                 d64 = tcg_temp_new_i64();
1933                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1934                 d32 = gen_dest_fpr_F(dc);
1935                 tcg_gen_extrl_i64_i32(d32, d64);
1936                 gen_store_fpr_F(dc, rd, d32);
1937                 break;
1938             case MO_64:
1939                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
1940                                   r_asi, r_mop);
1941                 break;
1942             case MO_128:
1943                 d64 = tcg_temp_new_i64();
1944                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
1945                 addr_tmp = tcg_temp_new();
1946                 tcg_gen_addi_tl(addr_tmp, addr, 8);
1947                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
1948                                   r_asi, r_mop);
1949                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
1950                 break;
1951             default:
1952                 g_assert_not_reached();
1953             }
1954         }
1955         break;
1956     }
1957 }
1958 
1959 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1960                         TCGv addr, int rd)
1961 {
1962     MemOp memop = da->memop;
1963     MemOp size = memop & MO_SIZE;
1964     TCGv_i32 d32;
1965     TCGv addr_tmp;
1966 
1967     /* TODO: Use 128-bit load/store below. */
1968     if (size == MO_128) {
1969         memop = (memop & ~MO_SIZE) | MO_64;
1970     }
1971 
1972     switch (da->type) {
1973     case GET_ASI_EXCP:
1974         break;
1975 
1976     case GET_ASI_DIRECT:
1977         memop |= MO_ALIGN_4;
1978         switch (size) {
1979         case MO_32:
1980             d32 = gen_load_fpr_F(dc, rd);
1981             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
1982             break;
1983         case MO_64:
1984             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1985                                 memop | MO_ALIGN_4);
1986             break;
1987         case MO_128:
1988             /* Only 4-byte alignment required.  However, it is legal for the
1989                cpu to signal the alignment fault, and the OS trap handler is
1990                required to fix it up.  Requiring 16-byte alignment here avoids
1991                having to probe the second page before performing the first
1992                write.  */
1993             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
1994                                 memop | MO_ALIGN_16);
1995             addr_tmp = tcg_temp_new();
1996             tcg_gen_addi_tl(addr_tmp, addr, 8);
1997             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
1998             break;
1999         default:
2000             g_assert_not_reached();
2001         }
2002         break;
2003 
2004     case GET_ASI_BLOCK:
2005         /* Valid for stdfa on aligned registers only.  */
2006         if (orig_size == MO_64 && (rd & 7) == 0) {
2007             /* The first operation checks required alignment.  */
2008             addr_tmp = tcg_temp_new();
2009             for (int i = 0; ; ++i) {
2010                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2011                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2012                 if (i == 7) {
2013                     break;
2014                 }
2015                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2016                 addr = addr_tmp;
2017             }
2018         } else {
2019             gen_exception(dc, TT_ILL_INSN);
2020         }
2021         break;
2022 
2023     case GET_ASI_SHORT:
2024         /* Valid for stdfa only.  */
2025         if (orig_size == MO_64) {
2026             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2027                                 memop | MO_ALIGN);
2028         } else {
2029             gen_exception(dc, TT_ILL_INSN);
2030         }
2031         break;
2032 
2033     default:
2034         /* According to the table in the UA2011 manual, the only
2035            other asis that are valid for ldfa/lddfa/ldqfa are
2036            the PST* asis, which aren't currently handled.  */
2037         gen_exception(dc, TT_ILL_INSN);
2038         break;
2039     }
2040 }
2041 
2042 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2043 {
2044     TCGv hi = gen_dest_gpr(dc, rd);
2045     TCGv lo = gen_dest_gpr(dc, rd + 1);
2046 
2047     switch (da->type) {
2048     case GET_ASI_EXCP:
2049         return;
2050 
2051     case GET_ASI_DTWINX:
2052 #ifdef TARGET_SPARC64
2053         {
2054             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2055             TCGv_i128 t = tcg_temp_new_i128();
2056 
2057             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2058             /*
2059              * Note that LE twinx acts as if each 64-bit register result is
2060              * byte swapped.  We perform one 128-bit LE load, so must swap
2061              * the order of the writebacks.
2062              */
2063             if ((mop & MO_BSWAP) == MO_TE) {
2064                 tcg_gen_extr_i128_i64(lo, hi, t);
2065             } else {
2066                 tcg_gen_extr_i128_i64(hi, lo, t);
2067             }
2068         }
2069         break;
2070 #else
2071         g_assert_not_reached();
2072 #endif
2073 
2074     case GET_ASI_DIRECT:
2075         {
2076             TCGv_i64 tmp = tcg_temp_new_i64();
2077 
2078             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2079 
2080             /* Note that LE ldda acts as if each 32-bit register
2081                result is byte swapped.  Having just performed one
2082                64-bit bswap, we need now to swap the writebacks.  */
2083             if ((da->memop & MO_BSWAP) == MO_TE) {
2084                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2085             } else {
2086                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2087             }
2088         }
2089         break;
2090 
2091     default:
2092         /* ??? In theory we've handled all of the ASIs that are valid
2093            for ldda, and this should raise DAE_invalid_asi.  However,
2094            real hardware allows others.  This can be seen with e.g.
2095            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2096         {
2097             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2098             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2099             TCGv_i64 tmp = tcg_temp_new_i64();
2100 
2101             save_state(dc);
2102             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2103 
2104             /* See above.  */
2105             if ((da->memop & MO_BSWAP) == MO_TE) {
2106                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2107             } else {
2108                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2109             }
2110         }
2111         break;
2112     }
2113 
2114     gen_store_gpr(dc, rd, hi);
2115     gen_store_gpr(dc, rd + 1, lo);
2116 }
2117 
2118 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2119 {
2120     TCGv hi = gen_load_gpr(dc, rd);
2121     TCGv lo = gen_load_gpr(dc, rd + 1);
2122 
2123     switch (da->type) {
2124     case GET_ASI_EXCP:
2125         break;
2126 
2127     case GET_ASI_DTWINX:
2128 #ifdef TARGET_SPARC64
2129         {
2130             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2131             TCGv_i128 t = tcg_temp_new_i128();
2132 
2133             /*
2134              * Note that LE twinx acts as if each 64-bit register result is
2135              * byte swapped.  We perform one 128-bit LE store, so must swap
2136              * the order of the construction.
2137              */
2138             if ((mop & MO_BSWAP) == MO_TE) {
2139                 tcg_gen_concat_i64_i128(t, lo, hi);
2140             } else {
2141                 tcg_gen_concat_i64_i128(t, hi, lo);
2142             }
2143             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2144         }
2145         break;
2146 #else
2147         g_assert_not_reached();
2148 #endif
2149 
2150     case GET_ASI_DIRECT:
2151         {
2152             TCGv_i64 t64 = tcg_temp_new_i64();
2153 
2154             /* Note that LE stda acts as if each 32-bit register result is
2155                byte swapped.  We will perform one 64-bit LE store, so now
2156                we must swap the order of the construction.  */
2157             if ((da->memop & MO_BSWAP) == MO_TE) {
2158                 tcg_gen_concat_tl_i64(t64, lo, hi);
2159             } else {
2160                 tcg_gen_concat_tl_i64(t64, hi, lo);
2161             }
2162             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2163         }
2164         break;
2165 
2166     case GET_ASI_BFILL:
2167         assert(TARGET_LONG_BITS == 32);
2168         /* Store 32 bytes of T64 to ADDR.  */
2169         /* ??? The original qemu code suggests 8-byte alignment, dropping
2170            the low bits, but the only place I can see this used is in the
2171            Linux kernel with 32 byte alignment, which would make more sense
2172            as a cacheline-style operation.  */
2173         {
2174             TCGv_i64 t64 = tcg_temp_new_i64();
2175             TCGv d_addr = tcg_temp_new();
2176             TCGv eight = tcg_constant_tl(8);
2177             int i;
2178 
2179             tcg_gen_concat_tl_i64(t64, lo, hi);
2180             tcg_gen_andi_tl(d_addr, addr, -8);
2181             for (i = 0; i < 32; i += 8) {
2182                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2183                 tcg_gen_add_tl(d_addr, d_addr, eight);
2184             }
2185         }
2186         break;
2187 
2188     default:
2189         /* ??? In theory we've handled all of the ASIs that are valid
2190            for stda, and this should raise DAE_invalid_asi.  */
2191         {
2192             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2193             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2194             TCGv_i64 t64 = tcg_temp_new_i64();
2195 
2196             /* See above.  */
2197             if ((da->memop & MO_BSWAP) == MO_TE) {
2198                 tcg_gen_concat_tl_i64(t64, lo, hi);
2199             } else {
2200                 tcg_gen_concat_tl_i64(t64, hi, lo);
2201             }
2202 
2203             save_state(dc);
2204             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2205         }
2206         break;
2207     }
2208 }
2209 
2210 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2211 {
2212 #ifdef TARGET_SPARC64
2213     TCGv_i32 c32, zero, dst, s1, s2;
2214     TCGv_i64 c64 = tcg_temp_new_i64();
2215 
2216     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2217        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2218        the later.  */
2219     c32 = tcg_temp_new_i32();
2220     tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2221     tcg_gen_extrl_i64_i32(c32, c64);
2222 
2223     s1 = gen_load_fpr_F(dc, rs);
2224     s2 = gen_load_fpr_F(dc, rd);
2225     dst = gen_dest_fpr_F(dc);
2226     zero = tcg_constant_i32(0);
2227 
2228     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2229 
2230     gen_store_fpr_F(dc, rd, dst);
2231 #else
2232     qemu_build_not_reached();
2233 #endif
2234 }
2235 
2236 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2237 {
2238 #ifdef TARGET_SPARC64
2239     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2240     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2241                         gen_load_fpr_D(dc, rs),
2242                         gen_load_fpr_D(dc, rd));
2243     gen_store_fpr_D(dc, rd, dst);
2244 #else
2245     qemu_build_not_reached();
2246 #endif
2247 }
2248 
2249 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2250 {
2251 #ifdef TARGET_SPARC64
2252     int qd = QFPREG(rd);
2253     int qs = QFPREG(rs);
2254     TCGv c2 = tcg_constant_tl(cmp->c2);
2255 
2256     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, c2,
2257                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2258     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, c2,
2259                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2260 
2261     gen_update_fprs_dirty(dc, qd);
2262 #else
2263     qemu_build_not_reached();
2264 #endif
2265 }
2266 
2267 #ifdef TARGET_SPARC64
2268 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2269 {
2270     TCGv_i32 r_tl = tcg_temp_new_i32();
2271 
2272     /* load env->tl into r_tl */
2273     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2274 
2275     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2276     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2277 
2278     /* calculate offset to current trap state from env->ts, reuse r_tl */
2279     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2280     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2281 
2282     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2283     {
2284         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2285         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2286         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2287     }
2288 }
2289 #endif
2290 
2291 static int extract_dfpreg(DisasContext *dc, int x)
2292 {
2293     return DFPREG(x);
2294 }
2295 
2296 static int extract_qfpreg(DisasContext *dc, int x)
2297 {
2298     return QFPREG(x);
2299 }
2300 
2301 /* Include the auto-generated decoder.  */
2302 #include "decode-insns.c.inc"
2303 
2304 #define TRANS(NAME, AVAIL, FUNC, ...) \
2305     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2306     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2307 
2308 #define avail_ALL(C)      true
2309 #ifdef TARGET_SPARC64
2310 # define avail_32(C)      false
2311 # define avail_ASR17(C)   false
2312 # define avail_CASA(C)    true
2313 # define avail_DIV(C)     true
2314 # define avail_MUL(C)     true
2315 # define avail_POWERDOWN(C) false
2316 # define avail_64(C)      true
2317 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2318 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2319 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2320 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2321 #else
2322 # define avail_32(C)      true
2323 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2324 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2325 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2326 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2327 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2328 # define avail_64(C)      false
2329 # define avail_GL(C)      false
2330 # define avail_HYPV(C)    false
2331 # define avail_VIS1(C)    false
2332 # define avail_VIS2(C)    false
2333 #endif
2334 
2335 /* Default case for non jump instructions. */
2336 static bool advance_pc(DisasContext *dc)
2337 {
2338     TCGLabel *l1;
2339 
2340     finishing_insn(dc);
2341 
2342     if (dc->npc & 3) {
2343         switch (dc->npc) {
2344         case DYNAMIC_PC:
2345         case DYNAMIC_PC_LOOKUP:
2346             dc->pc = dc->npc;
2347             tcg_gen_mov_tl(cpu_pc, cpu_npc);
2348             tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2349             break;
2350 
2351         case JUMP_PC:
2352             /* we can do a static jump */
2353             l1 = gen_new_label();
2354             tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2355 
2356             /* jump not taken */
2357             gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2358 
2359             /* jump taken */
2360             gen_set_label(l1);
2361             gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2362 
2363             dc->base.is_jmp = DISAS_NORETURN;
2364             break;
2365 
2366         default:
2367             g_assert_not_reached();
2368         }
2369     } else {
2370         dc->pc = dc->npc;
2371         dc->npc = dc->npc + 4;
2372     }
2373     return true;
2374 }
2375 
2376 /*
2377  * Major opcodes 00 and 01 -- branches, call, and sethi
2378  */
2379 
2380 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2381                               bool annul, int disp)
2382 {
2383     target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2384     target_ulong npc;
2385 
2386     finishing_insn(dc);
2387 
2388     if (cmp->cond == TCG_COND_ALWAYS) {
2389         if (annul) {
2390             dc->pc = dest;
2391             dc->npc = dest + 4;
2392         } else {
2393             gen_mov_pc_npc(dc);
2394             dc->npc = dest;
2395         }
2396         return true;
2397     }
2398 
2399     if (cmp->cond == TCG_COND_NEVER) {
2400         npc = dc->npc;
2401         if (npc & 3) {
2402             gen_mov_pc_npc(dc);
2403             if (annul) {
2404                 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2405             }
2406             tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2407         } else {
2408             dc->pc = npc + (annul ? 4 : 0);
2409             dc->npc = dc->pc + 4;
2410         }
2411         return true;
2412     }
2413 
2414     flush_cond(dc);
2415     npc = dc->npc;
2416 
2417     if (annul) {
2418         TCGLabel *l1 = gen_new_label();
2419 
2420         tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2421         gen_goto_tb(dc, 0, npc, dest);
2422         gen_set_label(l1);
2423         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2424 
2425         dc->base.is_jmp = DISAS_NORETURN;
2426     } else {
2427         if (npc & 3) {
2428             switch (npc) {
2429             case DYNAMIC_PC:
2430             case DYNAMIC_PC_LOOKUP:
2431                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2432                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2433                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2434                                    cmp->c1, tcg_constant_tl(cmp->c2),
2435                                    tcg_constant_tl(dest), cpu_npc);
2436                 dc->pc = npc;
2437                 break;
2438             default:
2439                 g_assert_not_reached();
2440             }
2441         } else {
2442             dc->pc = npc;
2443             dc->npc = JUMP_PC;
2444             dc->jump = *cmp;
2445             dc->jump_pc[0] = dest;
2446             dc->jump_pc[1] = npc + 4;
2447 
2448             /* The condition for cpu_cond is always NE -- normalize. */
2449             if (cmp->cond == TCG_COND_NE) {
2450                 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2451             } else {
2452                 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2453             }
2454             dc->cpu_cond_live = true;
2455         }
2456     }
2457     return true;
2458 }
2459 
2460 static bool raise_priv(DisasContext *dc)
2461 {
2462     gen_exception(dc, TT_PRIV_INSN);
2463     return true;
2464 }
2465 
2466 static bool raise_unimpfpop(DisasContext *dc)
2467 {
2468     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2469     return true;
2470 }
2471 
2472 static bool gen_trap_float128(DisasContext *dc)
2473 {
2474     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2475         return false;
2476     }
2477     return raise_unimpfpop(dc);
2478 }
2479 
2480 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2481 {
2482     DisasCompare cmp;
2483 
2484     gen_compare(&cmp, a->cc, a->cond, dc);
2485     return advance_jump_cond(dc, &cmp, a->a, a->i);
2486 }
2487 
2488 TRANS(Bicc, ALL, do_bpcc, a)
2489 TRANS(BPcc,  64, do_bpcc, a)
2490 
2491 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2492 {
2493     DisasCompare cmp;
2494 
2495     if (gen_trap_ifnofpu(dc)) {
2496         return true;
2497     }
2498     gen_fcompare(&cmp, a->cc, a->cond);
2499     return advance_jump_cond(dc, &cmp, a->a, a->i);
2500 }
2501 
2502 TRANS(FBPfcc,  64, do_fbpfcc, a)
2503 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2504 
2505 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2506 {
2507     DisasCompare cmp;
2508 
2509     if (!avail_64(dc)) {
2510         return false;
2511     }
2512     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2513         return false;
2514     }
2515     return advance_jump_cond(dc, &cmp, a->a, a->i);
2516 }
2517 
2518 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2519 {
2520     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2521 
2522     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2523     gen_mov_pc_npc(dc);
2524     dc->npc = target;
2525     return true;
2526 }
2527 
2528 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2529 {
2530     /*
2531      * For sparc32, always generate the no-coprocessor exception.
2532      * For sparc64, always generate illegal instruction.
2533      */
2534 #ifdef TARGET_SPARC64
2535     return false;
2536 #else
2537     gen_exception(dc, TT_NCP_INSN);
2538     return true;
2539 #endif
2540 }
2541 
2542 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2543 {
2544     /* Special-case %g0 because that's the canonical nop.  */
2545     if (a->rd) {
2546         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2547     }
2548     return advance_pc(dc);
2549 }
2550 
2551 /*
2552  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2553  */
2554 
2555 static bool do_tcc(DisasContext *dc, int cond, int cc,
2556                    int rs1, bool imm, int rs2_or_imm)
2557 {
2558     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2559                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2560     DisasCompare cmp;
2561     TCGLabel *lab;
2562     TCGv_i32 trap;
2563 
2564     /* Trap never.  */
2565     if (cond == 0) {
2566         return advance_pc(dc);
2567     }
2568 
2569     /*
2570      * Immediate traps are the most common case.  Since this value is
2571      * live across the branch, it really pays to evaluate the constant.
2572      */
2573     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2574         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2575     } else {
2576         trap = tcg_temp_new_i32();
2577         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2578         if (imm) {
2579             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2580         } else {
2581             TCGv_i32 t2 = tcg_temp_new_i32();
2582             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2583             tcg_gen_add_i32(trap, trap, t2);
2584         }
2585         tcg_gen_andi_i32(trap, trap, mask);
2586         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2587     }
2588 
2589     finishing_insn(dc);
2590 
2591     /* Trap always.  */
2592     if (cond == 8) {
2593         save_state(dc);
2594         gen_helper_raise_exception(tcg_env, trap);
2595         dc->base.is_jmp = DISAS_NORETURN;
2596         return true;
2597     }
2598 
2599     /* Conditional trap.  */
2600     flush_cond(dc);
2601     lab = delay_exceptionv(dc, trap);
2602     gen_compare(&cmp, cc, cond, dc);
2603     tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2604 
2605     return advance_pc(dc);
2606 }
2607 
2608 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2609 {
2610     if (avail_32(dc) && a->cc) {
2611         return false;
2612     }
2613     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2614 }
2615 
2616 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2617 {
2618     if (avail_64(dc)) {
2619         return false;
2620     }
2621     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2622 }
2623 
2624 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2625 {
2626     if (avail_32(dc)) {
2627         return false;
2628     }
2629     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2630 }
2631 
2632 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2633 {
2634     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2635     return advance_pc(dc);
2636 }
2637 
2638 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2639 {
2640     if (avail_32(dc)) {
2641         return false;
2642     }
2643     if (a->mmask) {
2644         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2645         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2646     }
2647     if (a->cmask) {
2648         /* For #Sync, etc, end the TB to recognize interrupts. */
2649         dc->base.is_jmp = DISAS_EXIT;
2650     }
2651     return advance_pc(dc);
2652 }
2653 
2654 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2655                           TCGv (*func)(DisasContext *, TCGv))
2656 {
2657     if (!priv) {
2658         return raise_priv(dc);
2659     }
2660     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2661     return advance_pc(dc);
2662 }
2663 
2664 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2665 {
2666     return cpu_y;
2667 }
2668 
2669 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2670 {
2671     /*
2672      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2673      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2674      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2675      */
2676     if (avail_64(dc) && a->rs1 != 0) {
2677         return false;
2678     }
2679     return do_rd_special(dc, true, a->rd, do_rdy);
2680 }
2681 
2682 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2683 {
2684     uint32_t val;
2685 
2686     /*
2687      * TODO: There are many more fields to be filled,
2688      * some of which are writable.
2689      */
2690     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
2691     val |= 1 << 8;                 /* [8]   V8   */
2692 
2693     return tcg_constant_tl(val);
2694 }
2695 
2696 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2697 
2698 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2699 {
2700     gen_helper_rdccr(dst, tcg_env);
2701     return dst;
2702 }
2703 
2704 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2705 
2706 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2707 {
2708 #ifdef TARGET_SPARC64
2709     return tcg_constant_tl(dc->asi);
2710 #else
2711     qemu_build_not_reached();
2712 #endif
2713 }
2714 
2715 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2716 
2717 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2718 {
2719     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2720 
2721     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2722     if (translator_io_start(&dc->base)) {
2723         dc->base.is_jmp = DISAS_EXIT;
2724     }
2725     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2726                               tcg_constant_i32(dc->mem_idx));
2727     return dst;
2728 }
2729 
2730 /* TODO: non-priv access only allowed when enabled. */
2731 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2732 
2733 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2734 {
2735     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2736 }
2737 
2738 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2739 
2740 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2741 {
2742     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2743     return dst;
2744 }
2745 
2746 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2747 
2748 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2749 {
2750     gen_trap_ifnofpu(dc);
2751     return cpu_gsr;
2752 }
2753 
2754 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2755 
2756 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2757 {
2758     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2759     return dst;
2760 }
2761 
2762 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2763 
2764 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2765 {
2766     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2767     return dst;
2768 }
2769 
2770 /* TODO: non-priv access only allowed when enabled. */
2771 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2772 
2773 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2774 {
2775     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2776 
2777     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2778     if (translator_io_start(&dc->base)) {
2779         dc->base.is_jmp = DISAS_EXIT;
2780     }
2781     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2782                               tcg_constant_i32(dc->mem_idx));
2783     return dst;
2784 }
2785 
2786 /* TODO: non-priv access only allowed when enabled. */
2787 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2788 
2789 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2790 {
2791     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2792     return dst;
2793 }
2794 
2795 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2796 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2797 
2798 /*
2799  * UltraSPARC-T1 Strand status.
2800  * HYPV check maybe not enough, UA2005 & UA2007 describe
2801  * this ASR as impl. dep
2802  */
2803 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2804 {
2805     return tcg_constant_tl(1);
2806 }
2807 
2808 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2809 
2810 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2811 {
2812     gen_helper_rdpsr(dst, tcg_env);
2813     return dst;
2814 }
2815 
2816 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2817 
2818 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2819 {
2820     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2821     return dst;
2822 }
2823 
2824 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2825 
2826 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2827 {
2828     TCGv_i32 tl = tcg_temp_new_i32();
2829     TCGv_ptr tp = tcg_temp_new_ptr();
2830 
2831     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2832     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2833     tcg_gen_shli_i32(tl, tl, 3);
2834     tcg_gen_ext_i32_ptr(tp, tl);
2835     tcg_gen_add_ptr(tp, tp, tcg_env);
2836 
2837     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2838     return dst;
2839 }
2840 
2841 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2842 
2843 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2844 {
2845     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2846     return dst;
2847 }
2848 
2849 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2850 
2851 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2852 {
2853     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2854     return dst;
2855 }
2856 
2857 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2858 
2859 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
2860 {
2861     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
2862     return dst;
2863 }
2864 
2865 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
2866 
2867 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
2868 {
2869     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
2870     return dst;
2871 }
2872 
2873 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
2874       do_rdhstick_cmpr)
2875 
2876 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
2877 {
2878     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
2879     return dst;
2880 }
2881 
2882 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
2883 
2884 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
2885 {
2886 #ifdef TARGET_SPARC64
2887     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2888 
2889     gen_load_trap_state_at_tl(r_tsptr);
2890     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
2891     return dst;
2892 #else
2893     qemu_build_not_reached();
2894 #endif
2895 }
2896 
2897 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
2898 
2899 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
2900 {
2901 #ifdef TARGET_SPARC64
2902     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2903 
2904     gen_load_trap_state_at_tl(r_tsptr);
2905     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
2906     return dst;
2907 #else
2908     qemu_build_not_reached();
2909 #endif
2910 }
2911 
2912 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
2913 
2914 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
2915 {
2916 #ifdef TARGET_SPARC64
2917     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2918 
2919     gen_load_trap_state_at_tl(r_tsptr);
2920     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
2921     return dst;
2922 #else
2923     qemu_build_not_reached();
2924 #endif
2925 }
2926 
2927 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
2928 
2929 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
2930 {
2931 #ifdef TARGET_SPARC64
2932     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
2933 
2934     gen_load_trap_state_at_tl(r_tsptr);
2935     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
2936     return dst;
2937 #else
2938     qemu_build_not_reached();
2939 #endif
2940 }
2941 
2942 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
2943 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
2944 
2945 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
2946 {
2947     return cpu_tbr;
2948 }
2949 
2950 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2951 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
2952 
2953 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
2954 {
2955     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
2956     return dst;
2957 }
2958 
2959 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
2960 
2961 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
2962 {
2963     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
2964     return dst;
2965 }
2966 
2967 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
2968 
2969 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
2970 {
2971     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
2972     return dst;
2973 }
2974 
2975 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
2976 
2977 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
2978 {
2979     gen_helper_rdcwp(dst, tcg_env);
2980     return dst;
2981 }
2982 
2983 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
2984 
2985 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
2986 {
2987     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
2988     return dst;
2989 }
2990 
2991 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
2992 
2993 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
2994 {
2995     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
2996     return dst;
2997 }
2998 
2999 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3000       do_rdcanrestore)
3001 
3002 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3003 {
3004     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3005     return dst;
3006 }
3007 
3008 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3009 
3010 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3011 {
3012     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3013     return dst;
3014 }
3015 
3016 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3017 
3018 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3019 {
3020     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3021     return dst;
3022 }
3023 
3024 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3025 
3026 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3027 {
3028     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3029     return dst;
3030 }
3031 
3032 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3033 
3034 /* UA2005 strand status */
3035 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3036 {
3037     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3038     return dst;
3039 }
3040 
3041 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3042 
3043 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3044 {
3045     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3046     return dst;
3047 }
3048 
3049 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3050 
3051 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3052 {
3053     if (avail_64(dc)) {
3054         gen_helper_flushw(tcg_env);
3055         return advance_pc(dc);
3056     }
3057     return false;
3058 }
3059 
3060 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3061                           void (*func)(DisasContext *, TCGv))
3062 {
3063     TCGv src;
3064 
3065     /* For simplicity, we under-decoded the rs2 form. */
3066     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3067         return false;
3068     }
3069     if (!priv) {
3070         return raise_priv(dc);
3071     }
3072 
3073     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3074         src = tcg_constant_tl(a->rs2_or_imm);
3075     } else {
3076         TCGv src1 = gen_load_gpr(dc, a->rs1);
3077         if (a->rs2_or_imm == 0) {
3078             src = src1;
3079         } else {
3080             src = tcg_temp_new();
3081             if (a->imm) {
3082                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3083             } else {
3084                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3085             }
3086         }
3087     }
3088     func(dc, src);
3089     return advance_pc(dc);
3090 }
3091 
3092 static void do_wry(DisasContext *dc, TCGv src)
3093 {
3094     tcg_gen_ext32u_tl(cpu_y, src);
3095 }
3096 
3097 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3098 
3099 static void do_wrccr(DisasContext *dc, TCGv src)
3100 {
3101     gen_helper_wrccr(tcg_env, src);
3102 }
3103 
3104 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3105 
3106 static void do_wrasi(DisasContext *dc, TCGv src)
3107 {
3108     TCGv tmp = tcg_temp_new();
3109 
3110     tcg_gen_ext8u_tl(tmp, src);
3111     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3112     /* End TB to notice changed ASI. */
3113     dc->base.is_jmp = DISAS_EXIT;
3114 }
3115 
3116 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3117 
3118 static void do_wrfprs(DisasContext *dc, TCGv src)
3119 {
3120 #ifdef TARGET_SPARC64
3121     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3122     dc->fprs_dirty = 0;
3123     dc->base.is_jmp = DISAS_EXIT;
3124 #else
3125     qemu_build_not_reached();
3126 #endif
3127 }
3128 
3129 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3130 
3131 static void do_wrgsr(DisasContext *dc, TCGv src)
3132 {
3133     gen_trap_ifnofpu(dc);
3134     tcg_gen_mov_tl(cpu_gsr, src);
3135 }
3136 
3137 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3138 
3139 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3140 {
3141     gen_helper_set_softint(tcg_env, src);
3142 }
3143 
3144 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3145 
3146 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3147 {
3148     gen_helper_clear_softint(tcg_env, src);
3149 }
3150 
3151 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3152 
3153 static void do_wrsoftint(DisasContext *dc, TCGv src)
3154 {
3155     gen_helper_write_softint(tcg_env, src);
3156 }
3157 
3158 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3159 
3160 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3161 {
3162     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3163 
3164     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3165     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3166     translator_io_start(&dc->base);
3167     gen_helper_tick_set_limit(r_tickptr, src);
3168     /* End TB to handle timer interrupt */
3169     dc->base.is_jmp = DISAS_EXIT;
3170 }
3171 
3172 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3173 
3174 static void do_wrstick(DisasContext *dc, TCGv src)
3175 {
3176 #ifdef TARGET_SPARC64
3177     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3178 
3179     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3180     translator_io_start(&dc->base);
3181     gen_helper_tick_set_count(r_tickptr, src);
3182     /* End TB to handle timer interrupt */
3183     dc->base.is_jmp = DISAS_EXIT;
3184 #else
3185     qemu_build_not_reached();
3186 #endif
3187 }
3188 
3189 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3190 
3191 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3192 {
3193     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3194 
3195     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3196     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3197     translator_io_start(&dc->base);
3198     gen_helper_tick_set_limit(r_tickptr, src);
3199     /* End TB to handle timer interrupt */
3200     dc->base.is_jmp = DISAS_EXIT;
3201 }
3202 
3203 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3204 
3205 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3206 {
3207     finishing_insn(dc);
3208     save_state(dc);
3209     gen_helper_power_down(tcg_env);
3210 }
3211 
3212 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3213 
3214 static void do_wrpsr(DisasContext *dc, TCGv src)
3215 {
3216     gen_helper_wrpsr(tcg_env, src);
3217     dc->base.is_jmp = DISAS_EXIT;
3218 }
3219 
3220 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3221 
3222 static void do_wrwim(DisasContext *dc, TCGv src)
3223 {
3224     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3225     TCGv tmp = tcg_temp_new();
3226 
3227     tcg_gen_andi_tl(tmp, src, mask);
3228     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3229 }
3230 
3231 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3232 
3233 static void do_wrtpc(DisasContext *dc, TCGv src)
3234 {
3235 #ifdef TARGET_SPARC64
3236     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3237 
3238     gen_load_trap_state_at_tl(r_tsptr);
3239     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3240 #else
3241     qemu_build_not_reached();
3242 #endif
3243 }
3244 
3245 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3246 
3247 static void do_wrtnpc(DisasContext *dc, TCGv src)
3248 {
3249 #ifdef TARGET_SPARC64
3250     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3251 
3252     gen_load_trap_state_at_tl(r_tsptr);
3253     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3254 #else
3255     qemu_build_not_reached();
3256 #endif
3257 }
3258 
3259 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3260 
3261 static void do_wrtstate(DisasContext *dc, TCGv src)
3262 {
3263 #ifdef TARGET_SPARC64
3264     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3265 
3266     gen_load_trap_state_at_tl(r_tsptr);
3267     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3268 #else
3269     qemu_build_not_reached();
3270 #endif
3271 }
3272 
3273 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3274 
3275 static void do_wrtt(DisasContext *dc, TCGv src)
3276 {
3277 #ifdef TARGET_SPARC64
3278     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3279 
3280     gen_load_trap_state_at_tl(r_tsptr);
3281     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3282 #else
3283     qemu_build_not_reached();
3284 #endif
3285 }
3286 
3287 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3288 
3289 static void do_wrtick(DisasContext *dc, TCGv src)
3290 {
3291     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3292 
3293     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3294     translator_io_start(&dc->base);
3295     gen_helper_tick_set_count(r_tickptr, src);
3296     /* End TB to handle timer interrupt */
3297     dc->base.is_jmp = DISAS_EXIT;
3298 }
3299 
3300 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3301 
3302 static void do_wrtba(DisasContext *dc, TCGv src)
3303 {
3304     tcg_gen_mov_tl(cpu_tbr, src);
3305 }
3306 
3307 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3308 
3309 static void do_wrpstate(DisasContext *dc, TCGv src)
3310 {
3311     save_state(dc);
3312     if (translator_io_start(&dc->base)) {
3313         dc->base.is_jmp = DISAS_EXIT;
3314     }
3315     gen_helper_wrpstate(tcg_env, src);
3316     dc->npc = DYNAMIC_PC;
3317 }
3318 
3319 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3320 
3321 static void do_wrtl(DisasContext *dc, TCGv src)
3322 {
3323     save_state(dc);
3324     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3325     dc->npc = DYNAMIC_PC;
3326 }
3327 
3328 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3329 
3330 static void do_wrpil(DisasContext *dc, TCGv src)
3331 {
3332     if (translator_io_start(&dc->base)) {
3333         dc->base.is_jmp = DISAS_EXIT;
3334     }
3335     gen_helper_wrpil(tcg_env, src);
3336 }
3337 
3338 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3339 
3340 static void do_wrcwp(DisasContext *dc, TCGv src)
3341 {
3342     gen_helper_wrcwp(tcg_env, src);
3343 }
3344 
3345 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3346 
3347 static void do_wrcansave(DisasContext *dc, TCGv src)
3348 {
3349     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3350 }
3351 
3352 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3353 
3354 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3355 {
3356     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3357 }
3358 
3359 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3360 
3361 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3362 {
3363     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3364 }
3365 
3366 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3367 
3368 static void do_wrotherwin(DisasContext *dc, TCGv src)
3369 {
3370     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3371 }
3372 
3373 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3374 
3375 static void do_wrwstate(DisasContext *dc, TCGv src)
3376 {
3377     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3378 }
3379 
3380 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3381 
3382 static void do_wrgl(DisasContext *dc, TCGv src)
3383 {
3384     gen_helper_wrgl(tcg_env, src);
3385 }
3386 
3387 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3388 
3389 /* UA2005 strand status */
3390 static void do_wrssr(DisasContext *dc, TCGv src)
3391 {
3392     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3393 }
3394 
3395 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3396 
3397 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3398 
3399 static void do_wrhpstate(DisasContext *dc, TCGv src)
3400 {
3401     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3402     dc->base.is_jmp = DISAS_EXIT;
3403 }
3404 
3405 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3406 
3407 static void do_wrhtstate(DisasContext *dc, TCGv src)
3408 {
3409     TCGv_i32 tl = tcg_temp_new_i32();
3410     TCGv_ptr tp = tcg_temp_new_ptr();
3411 
3412     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3413     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3414     tcg_gen_shli_i32(tl, tl, 3);
3415     tcg_gen_ext_i32_ptr(tp, tl);
3416     tcg_gen_add_ptr(tp, tp, tcg_env);
3417 
3418     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3419 }
3420 
3421 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3422 
3423 static void do_wrhintp(DisasContext *dc, TCGv src)
3424 {
3425     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3426 }
3427 
3428 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3429 
3430 static void do_wrhtba(DisasContext *dc, TCGv src)
3431 {
3432     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3433 }
3434 
3435 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3436 
3437 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3438 {
3439     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3440 
3441     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3442     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3443     translator_io_start(&dc->base);
3444     gen_helper_tick_set_limit(r_tickptr, src);
3445     /* End TB to handle timer interrupt */
3446     dc->base.is_jmp = DISAS_EXIT;
3447 }
3448 
3449 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3450       do_wrhstick_cmpr)
3451 
3452 static bool do_saved_restored(DisasContext *dc, bool saved)
3453 {
3454     if (!supervisor(dc)) {
3455         return raise_priv(dc);
3456     }
3457     if (saved) {
3458         gen_helper_saved(tcg_env);
3459     } else {
3460         gen_helper_restored(tcg_env);
3461     }
3462     return advance_pc(dc);
3463 }
3464 
3465 TRANS(SAVED, 64, do_saved_restored, true)
3466 TRANS(RESTORED, 64, do_saved_restored, false)
3467 
3468 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3469 {
3470     return advance_pc(dc);
3471 }
3472 
3473 /*
3474  * TODO: Need a feature bit for sparcv8.
3475  * In the meantime, treat all 32-bit cpus like sparcv7.
3476  */
3477 TRANS(NOP_v7, 32, trans_NOP, a)
3478 TRANS(NOP_v9, 64, trans_NOP, a)
3479 
3480 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3481                          void (*func)(TCGv, TCGv, TCGv),
3482                          void (*funci)(TCGv, TCGv, target_long),
3483                          bool logic_cc)
3484 {
3485     TCGv dst, src1;
3486 
3487     /* For simplicity, we under-decoded the rs2 form. */
3488     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3489         return false;
3490     }
3491 
3492     if (logic_cc) {
3493         dst = cpu_cc_N;
3494     } else {
3495         dst = gen_dest_gpr(dc, a->rd);
3496     }
3497     src1 = gen_load_gpr(dc, a->rs1);
3498 
3499     if (a->imm || a->rs2_or_imm == 0) {
3500         if (funci) {
3501             funci(dst, src1, a->rs2_or_imm);
3502         } else {
3503             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3504         }
3505     } else {
3506         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3507     }
3508 
3509     if (logic_cc) {
3510         if (TARGET_LONG_BITS == 64) {
3511             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3512             tcg_gen_movi_tl(cpu_icc_C, 0);
3513         }
3514         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3515         tcg_gen_movi_tl(cpu_cc_C, 0);
3516         tcg_gen_movi_tl(cpu_cc_V, 0);
3517     }
3518 
3519     gen_store_gpr(dc, a->rd, dst);
3520     return advance_pc(dc);
3521 }
3522 
3523 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3524                      void (*func)(TCGv, TCGv, TCGv),
3525                      void (*funci)(TCGv, TCGv, target_long),
3526                      void (*func_cc)(TCGv, TCGv, TCGv))
3527 {
3528     if (a->cc) {
3529         return do_arith_int(dc, a, func_cc, NULL, false);
3530     }
3531     return do_arith_int(dc, a, func, funci, false);
3532 }
3533 
3534 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3535                      void (*func)(TCGv, TCGv, TCGv),
3536                      void (*funci)(TCGv, TCGv, target_long))
3537 {
3538     return do_arith_int(dc, a, func, funci, a->cc);
3539 }
3540 
3541 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3542 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3543 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3544 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3545 
3546 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3547 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3548 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3549 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3550 
3551 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3552 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3553 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3554 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3555 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3556 
3557 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3558 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3559 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3560 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3561 
3562 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3563 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3564 
3565 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3566 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3567 
3568 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3569 {
3570     /* OR with %g0 is the canonical alias for MOV. */
3571     if (!a->cc && a->rs1 == 0) {
3572         if (a->imm || a->rs2_or_imm == 0) {
3573             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3574         } else if (a->rs2_or_imm & ~0x1f) {
3575             /* For simplicity, we under-decoded the rs2 form. */
3576             return false;
3577         } else {
3578             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3579         }
3580         return advance_pc(dc);
3581     }
3582     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3583 }
3584 
3585 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3586 {
3587     TCGv_i64 t1, t2;
3588     TCGv dst;
3589 
3590     if (!avail_DIV(dc)) {
3591         return false;
3592     }
3593     /* For simplicity, we under-decoded the rs2 form. */
3594     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3595         return false;
3596     }
3597 
3598     if (unlikely(a->rs2_or_imm == 0)) {
3599         gen_exception(dc, TT_DIV_ZERO);
3600         return true;
3601     }
3602 
3603     if (a->imm) {
3604         t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3605     } else {
3606         TCGLabel *lab;
3607         TCGv_i32 n2;
3608 
3609         finishing_insn(dc);
3610         flush_cond(dc);
3611 
3612         n2 = tcg_temp_new_i32();
3613         tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3614 
3615         lab = delay_exception(dc, TT_DIV_ZERO);
3616         tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3617 
3618         t2 = tcg_temp_new_i64();
3619 #ifdef TARGET_SPARC64
3620         tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3621 #else
3622         tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3623 #endif
3624     }
3625 
3626     t1 = tcg_temp_new_i64();
3627     tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3628 
3629     tcg_gen_divu_i64(t1, t1, t2);
3630     tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3631 
3632     dst = gen_dest_gpr(dc, a->rd);
3633     tcg_gen_trunc_i64_tl(dst, t1);
3634     gen_store_gpr(dc, a->rd, dst);
3635     return advance_pc(dc);
3636 }
3637 
3638 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3639 {
3640     TCGv dst, src1, src2;
3641 
3642     if (!avail_64(dc)) {
3643         return false;
3644     }
3645     /* For simplicity, we under-decoded the rs2 form. */
3646     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3647         return false;
3648     }
3649 
3650     if (unlikely(a->rs2_or_imm == 0)) {
3651         gen_exception(dc, TT_DIV_ZERO);
3652         return true;
3653     }
3654 
3655     if (a->imm) {
3656         src2 = tcg_constant_tl(a->rs2_or_imm);
3657     } else {
3658         TCGLabel *lab;
3659 
3660         finishing_insn(dc);
3661         flush_cond(dc);
3662 
3663         lab = delay_exception(dc, TT_DIV_ZERO);
3664         src2 = cpu_regs[a->rs2_or_imm];
3665         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3666     }
3667 
3668     dst = gen_dest_gpr(dc, a->rd);
3669     src1 = gen_load_gpr(dc, a->rs1);
3670 
3671     tcg_gen_divu_tl(dst, src1, src2);
3672     gen_store_gpr(dc, a->rd, dst);
3673     return advance_pc(dc);
3674 }
3675 
3676 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3677 {
3678     TCGv dst, src1, src2;
3679 
3680     if (!avail_64(dc)) {
3681         return false;
3682     }
3683     /* For simplicity, we under-decoded the rs2 form. */
3684     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3685         return false;
3686     }
3687 
3688     if (unlikely(a->rs2_or_imm == 0)) {
3689         gen_exception(dc, TT_DIV_ZERO);
3690         return true;
3691     }
3692 
3693     dst = gen_dest_gpr(dc, a->rd);
3694     src1 = gen_load_gpr(dc, a->rs1);
3695 
3696     if (a->imm) {
3697         if (unlikely(a->rs2_or_imm == -1)) {
3698             tcg_gen_neg_tl(dst, src1);
3699             gen_store_gpr(dc, a->rd, dst);
3700             return advance_pc(dc);
3701         }
3702         src2 = tcg_constant_tl(a->rs2_or_imm);
3703     } else {
3704         TCGLabel *lab;
3705         TCGv t1, t2;
3706 
3707         finishing_insn(dc);
3708         flush_cond(dc);
3709 
3710         lab = delay_exception(dc, TT_DIV_ZERO);
3711         src2 = cpu_regs[a->rs2_or_imm];
3712         tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3713 
3714         /*
3715          * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3716          * Set SRC2 to 1 as a new divisor, to produce the correct result.
3717          */
3718         t1 = tcg_temp_new();
3719         t2 = tcg_temp_new();
3720         tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3721         tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3722         tcg_gen_and_tl(t1, t1, t2);
3723         tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3724                            tcg_constant_tl(1), src2);
3725         src2 = t1;
3726     }
3727 
3728     tcg_gen_div_tl(dst, src1, src2);
3729     gen_store_gpr(dc, a->rd, dst);
3730     return advance_pc(dc);
3731 }
3732 
3733 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3734                      int width, bool cc, bool left)
3735 {
3736     TCGv dst, s1, s2, lo1, lo2;
3737     uint64_t amask, tabl, tabr;
3738     int shift, imask, omask;
3739 
3740     dst = gen_dest_gpr(dc, a->rd);
3741     s1 = gen_load_gpr(dc, a->rs1);
3742     s2 = gen_load_gpr(dc, a->rs2);
3743 
3744     if (cc) {
3745         gen_op_subcc(cpu_cc_N, s1, s2);
3746     }
3747 
3748     /*
3749      * Theory of operation: there are two tables, left and right (not to
3750      * be confused with the left and right versions of the opcode).  These
3751      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3752      * these tables are loaded into two constants, TABL and TABR below.
3753      * The operation index = (input & imask) << shift calculates the index
3754      * into the constant, while val = (table >> index) & omask calculates
3755      * the value we're looking for.
3756      */
3757     switch (width) {
3758     case 8:
3759         imask = 0x7;
3760         shift = 3;
3761         omask = 0xff;
3762         if (left) {
3763             tabl = 0x80c0e0f0f8fcfeffULL;
3764             tabr = 0xff7f3f1f0f070301ULL;
3765         } else {
3766             tabl = 0x0103070f1f3f7fffULL;
3767             tabr = 0xfffefcf8f0e0c080ULL;
3768         }
3769         break;
3770     case 16:
3771         imask = 0x6;
3772         shift = 1;
3773         omask = 0xf;
3774         if (left) {
3775             tabl = 0x8cef;
3776             tabr = 0xf731;
3777         } else {
3778             tabl = 0x137f;
3779             tabr = 0xfec8;
3780         }
3781         break;
3782     case 32:
3783         imask = 0x4;
3784         shift = 0;
3785         omask = 0x3;
3786         if (left) {
3787             tabl = (2 << 2) | 3;
3788             tabr = (3 << 2) | 1;
3789         } else {
3790             tabl = (1 << 2) | 3;
3791             tabr = (3 << 2) | 2;
3792         }
3793         break;
3794     default:
3795         abort();
3796     }
3797 
3798     lo1 = tcg_temp_new();
3799     lo2 = tcg_temp_new();
3800     tcg_gen_andi_tl(lo1, s1, imask);
3801     tcg_gen_andi_tl(lo2, s2, imask);
3802     tcg_gen_shli_tl(lo1, lo1, shift);
3803     tcg_gen_shli_tl(lo2, lo2, shift);
3804 
3805     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3806     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3807     tcg_gen_andi_tl(lo1, lo1, omask);
3808     tcg_gen_andi_tl(lo2, lo2, omask);
3809 
3810     amask = address_mask_i(dc, -8);
3811     tcg_gen_andi_tl(s1, s1, amask);
3812     tcg_gen_andi_tl(s2, s2, amask);
3813 
3814     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3815     tcg_gen_and_tl(lo2, lo2, lo1);
3816     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3817 
3818     gen_store_gpr(dc, a->rd, dst);
3819     return advance_pc(dc);
3820 }
3821 
3822 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3823 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3824 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3825 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3826 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3827 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3828 
3829 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3830 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3831 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3832 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3833 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3834 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3835 
3836 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3837                    void (*func)(TCGv, TCGv, TCGv))
3838 {
3839     TCGv dst = gen_dest_gpr(dc, a->rd);
3840     TCGv src1 = gen_load_gpr(dc, a->rs1);
3841     TCGv src2 = gen_load_gpr(dc, a->rs2);
3842 
3843     func(dst, src1, src2);
3844     gen_store_gpr(dc, a->rd, dst);
3845     return advance_pc(dc);
3846 }
3847 
3848 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3849 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3850 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3851 
3852 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3853 {
3854 #ifdef TARGET_SPARC64
3855     TCGv tmp = tcg_temp_new();
3856 
3857     tcg_gen_add_tl(tmp, s1, s2);
3858     tcg_gen_andi_tl(dst, tmp, -8);
3859     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3860 #else
3861     g_assert_not_reached();
3862 #endif
3863 }
3864 
3865 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3866 {
3867 #ifdef TARGET_SPARC64
3868     TCGv tmp = tcg_temp_new();
3869 
3870     tcg_gen_add_tl(tmp, s1, s2);
3871     tcg_gen_andi_tl(dst, tmp, -8);
3872     tcg_gen_neg_tl(tmp, tmp);
3873     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3874 #else
3875     g_assert_not_reached();
3876 #endif
3877 }
3878 
3879 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3880 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3881 
3882 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3883 {
3884 #ifdef TARGET_SPARC64
3885     tcg_gen_add_tl(dst, s1, s2);
3886     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3887 #else
3888     g_assert_not_reached();
3889 #endif
3890 }
3891 
3892 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3893 
3894 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3895 {
3896     TCGv dst, src1, src2;
3897 
3898     /* Reject 64-bit shifts for sparc32. */
3899     if (avail_32(dc) && a->x) {
3900         return false;
3901     }
3902 
3903     src2 = tcg_temp_new();
3904     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3905     src1 = gen_load_gpr(dc, a->rs1);
3906     dst = gen_dest_gpr(dc, a->rd);
3907 
3908     if (l) {
3909         tcg_gen_shl_tl(dst, src1, src2);
3910         if (!a->x) {
3911             tcg_gen_ext32u_tl(dst, dst);
3912         }
3913     } else if (u) {
3914         if (!a->x) {
3915             tcg_gen_ext32u_tl(dst, src1);
3916             src1 = dst;
3917         }
3918         tcg_gen_shr_tl(dst, src1, src2);
3919     } else {
3920         if (!a->x) {
3921             tcg_gen_ext32s_tl(dst, src1);
3922             src1 = dst;
3923         }
3924         tcg_gen_sar_tl(dst, src1, src2);
3925     }
3926     gen_store_gpr(dc, a->rd, dst);
3927     return advance_pc(dc);
3928 }
3929 
3930 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3931 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3932 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3933 
3934 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
3935 {
3936     TCGv dst, src1;
3937 
3938     /* Reject 64-bit shifts for sparc32. */
3939     if (avail_32(dc) && (a->x || a->i >= 32)) {
3940         return false;
3941     }
3942 
3943     src1 = gen_load_gpr(dc, a->rs1);
3944     dst = gen_dest_gpr(dc, a->rd);
3945 
3946     if (avail_32(dc) || a->x) {
3947         if (l) {
3948             tcg_gen_shli_tl(dst, src1, a->i);
3949         } else if (u) {
3950             tcg_gen_shri_tl(dst, src1, a->i);
3951         } else {
3952             tcg_gen_sari_tl(dst, src1, a->i);
3953         }
3954     } else {
3955         if (l) {
3956             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
3957         } else if (u) {
3958             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
3959         } else {
3960             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
3961         }
3962     }
3963     gen_store_gpr(dc, a->rd, dst);
3964     return advance_pc(dc);
3965 }
3966 
3967 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
3968 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
3969 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
3970 
3971 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
3972 {
3973     /* For simplicity, we under-decoded the rs2 form. */
3974     if (!imm && rs2_or_imm & ~0x1f) {
3975         return NULL;
3976     }
3977     if (imm || rs2_or_imm == 0) {
3978         return tcg_constant_tl(rs2_or_imm);
3979     } else {
3980         return cpu_regs[rs2_or_imm];
3981     }
3982 }
3983 
3984 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
3985 {
3986     TCGv dst = gen_load_gpr(dc, rd);
3987     TCGv c2 = tcg_constant_tl(cmp->c2);
3988 
3989     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
3990     gen_store_gpr(dc, rd, dst);
3991     return advance_pc(dc);
3992 }
3993 
3994 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
3995 {
3996     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
3997     DisasCompare cmp;
3998 
3999     if (src2 == NULL) {
4000         return false;
4001     }
4002     gen_compare(&cmp, a->cc, a->cond, dc);
4003     return do_mov_cond(dc, &cmp, a->rd, src2);
4004 }
4005 
4006 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4007 {
4008     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4009     DisasCompare cmp;
4010 
4011     if (src2 == NULL) {
4012         return false;
4013     }
4014     gen_fcompare(&cmp, a->cc, a->cond);
4015     return do_mov_cond(dc, &cmp, a->rd, src2);
4016 }
4017 
4018 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4019 {
4020     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4021     DisasCompare cmp;
4022 
4023     if (src2 == NULL) {
4024         return false;
4025     }
4026     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4027         return false;
4028     }
4029     return do_mov_cond(dc, &cmp, a->rd, src2);
4030 }
4031 
4032 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4033                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4034 {
4035     TCGv src1, sum;
4036 
4037     /* For simplicity, we under-decoded the rs2 form. */
4038     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4039         return false;
4040     }
4041 
4042     /*
4043      * Always load the sum into a new temporary.
4044      * This is required to capture the value across a window change,
4045      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4046      */
4047     sum = tcg_temp_new();
4048     src1 = gen_load_gpr(dc, a->rs1);
4049     if (a->imm || a->rs2_or_imm == 0) {
4050         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4051     } else {
4052         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4053     }
4054     return func(dc, a->rd, sum);
4055 }
4056 
4057 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4058 {
4059     /*
4060      * Preserve pc across advance, so that we can delay
4061      * the writeback to rd until after src is consumed.
4062      */
4063     target_ulong cur_pc = dc->pc;
4064 
4065     gen_check_align(dc, src, 3);
4066 
4067     gen_mov_pc_npc(dc);
4068     tcg_gen_mov_tl(cpu_npc, src);
4069     gen_address_mask(dc, cpu_npc);
4070     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4071 
4072     dc->npc = DYNAMIC_PC_LOOKUP;
4073     return true;
4074 }
4075 
4076 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4077 
4078 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4079 {
4080     if (!supervisor(dc)) {
4081         return raise_priv(dc);
4082     }
4083 
4084     gen_check_align(dc, src, 3);
4085 
4086     gen_mov_pc_npc(dc);
4087     tcg_gen_mov_tl(cpu_npc, src);
4088     gen_helper_rett(tcg_env);
4089 
4090     dc->npc = DYNAMIC_PC;
4091     return true;
4092 }
4093 
4094 TRANS(RETT, 32, do_add_special, a, do_rett)
4095 
4096 static bool do_return(DisasContext *dc, int rd, TCGv src)
4097 {
4098     gen_check_align(dc, src, 3);
4099     gen_helper_restore(tcg_env);
4100 
4101     gen_mov_pc_npc(dc);
4102     tcg_gen_mov_tl(cpu_npc, src);
4103     gen_address_mask(dc, cpu_npc);
4104 
4105     dc->npc = DYNAMIC_PC_LOOKUP;
4106     return true;
4107 }
4108 
4109 TRANS(RETURN, 64, do_add_special, a, do_return)
4110 
4111 static bool do_save(DisasContext *dc, int rd, TCGv src)
4112 {
4113     gen_helper_save(tcg_env);
4114     gen_store_gpr(dc, rd, src);
4115     return advance_pc(dc);
4116 }
4117 
4118 TRANS(SAVE, ALL, do_add_special, a, do_save)
4119 
4120 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4121 {
4122     gen_helper_restore(tcg_env);
4123     gen_store_gpr(dc, rd, src);
4124     return advance_pc(dc);
4125 }
4126 
4127 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4128 
4129 static bool do_done_retry(DisasContext *dc, bool done)
4130 {
4131     if (!supervisor(dc)) {
4132         return raise_priv(dc);
4133     }
4134     dc->npc = DYNAMIC_PC;
4135     dc->pc = DYNAMIC_PC;
4136     translator_io_start(&dc->base);
4137     if (done) {
4138         gen_helper_done(tcg_env);
4139     } else {
4140         gen_helper_retry(tcg_env);
4141     }
4142     return true;
4143 }
4144 
4145 TRANS(DONE, 64, do_done_retry, true)
4146 TRANS(RETRY, 64, do_done_retry, false)
4147 
4148 /*
4149  * Major opcode 11 -- load and store instructions
4150  */
4151 
4152 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4153 {
4154     TCGv addr, tmp = NULL;
4155 
4156     /* For simplicity, we under-decoded the rs2 form. */
4157     if (!imm && rs2_or_imm & ~0x1f) {
4158         return NULL;
4159     }
4160 
4161     addr = gen_load_gpr(dc, rs1);
4162     if (rs2_or_imm) {
4163         tmp = tcg_temp_new();
4164         if (imm) {
4165             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4166         } else {
4167             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4168         }
4169         addr = tmp;
4170     }
4171     if (AM_CHECK(dc)) {
4172         if (!tmp) {
4173             tmp = tcg_temp_new();
4174         }
4175         tcg_gen_ext32u_tl(tmp, addr);
4176         addr = tmp;
4177     }
4178     return addr;
4179 }
4180 
4181 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4182 {
4183     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4184     DisasASI da;
4185 
4186     if (addr == NULL) {
4187         return false;
4188     }
4189     da = resolve_asi(dc, a->asi, mop);
4190 
4191     reg = gen_dest_gpr(dc, a->rd);
4192     gen_ld_asi(dc, &da, reg, addr);
4193     gen_store_gpr(dc, a->rd, reg);
4194     return advance_pc(dc);
4195 }
4196 
4197 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4198 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4199 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4200 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4201 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4202 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4203 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4204 
4205 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4206 {
4207     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4208     DisasASI da;
4209 
4210     if (addr == NULL) {
4211         return false;
4212     }
4213     da = resolve_asi(dc, a->asi, mop);
4214 
4215     reg = gen_load_gpr(dc, a->rd);
4216     gen_st_asi(dc, &da, reg, addr);
4217     return advance_pc(dc);
4218 }
4219 
4220 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4221 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4222 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4223 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4224 
4225 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4226 {
4227     TCGv addr;
4228     DisasASI da;
4229 
4230     if (a->rd & 1) {
4231         return false;
4232     }
4233     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4234     if (addr == NULL) {
4235         return false;
4236     }
4237     da = resolve_asi(dc, a->asi, MO_TEUQ);
4238     gen_ldda_asi(dc, &da, addr, a->rd);
4239     return advance_pc(dc);
4240 }
4241 
4242 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4243 {
4244     TCGv addr;
4245     DisasASI da;
4246 
4247     if (a->rd & 1) {
4248         return false;
4249     }
4250     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4251     if (addr == NULL) {
4252         return false;
4253     }
4254     da = resolve_asi(dc, a->asi, MO_TEUQ);
4255     gen_stda_asi(dc, &da, addr, a->rd);
4256     return advance_pc(dc);
4257 }
4258 
4259 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4260 {
4261     TCGv addr, reg;
4262     DisasASI da;
4263 
4264     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4265     if (addr == NULL) {
4266         return false;
4267     }
4268     da = resolve_asi(dc, a->asi, MO_UB);
4269 
4270     reg = gen_dest_gpr(dc, a->rd);
4271     gen_ldstub_asi(dc, &da, reg, addr);
4272     gen_store_gpr(dc, a->rd, reg);
4273     return advance_pc(dc);
4274 }
4275 
4276 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4277 {
4278     TCGv addr, dst, src;
4279     DisasASI da;
4280 
4281     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4282     if (addr == NULL) {
4283         return false;
4284     }
4285     da = resolve_asi(dc, a->asi, MO_TEUL);
4286 
4287     dst = gen_dest_gpr(dc, a->rd);
4288     src = gen_load_gpr(dc, a->rd);
4289     gen_swap_asi(dc, &da, dst, src, addr);
4290     gen_store_gpr(dc, a->rd, dst);
4291     return advance_pc(dc);
4292 }
4293 
4294 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4295 {
4296     TCGv addr, o, n, c;
4297     DisasASI da;
4298 
4299     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4300     if (addr == NULL) {
4301         return false;
4302     }
4303     da = resolve_asi(dc, a->asi, mop);
4304 
4305     o = gen_dest_gpr(dc, a->rd);
4306     n = gen_load_gpr(dc, a->rd);
4307     c = gen_load_gpr(dc, a->rs2_or_imm);
4308     gen_cas_asi(dc, &da, o, n, c, addr);
4309     gen_store_gpr(dc, a->rd, o);
4310     return advance_pc(dc);
4311 }
4312 
4313 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4314 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4315 
4316 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4317 {
4318     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4319     DisasASI da;
4320 
4321     if (addr == NULL) {
4322         return false;
4323     }
4324     if (gen_trap_ifnofpu(dc)) {
4325         return true;
4326     }
4327     if (sz == MO_128 && gen_trap_float128(dc)) {
4328         return true;
4329     }
4330     da = resolve_asi(dc, a->asi, MO_TE | sz);
4331     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4332     gen_update_fprs_dirty(dc, a->rd);
4333     return advance_pc(dc);
4334 }
4335 
4336 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4337 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4338 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4339 
4340 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4341 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4342 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4343 
4344 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4345 {
4346     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4347     DisasASI da;
4348 
4349     if (addr == NULL) {
4350         return false;
4351     }
4352     if (gen_trap_ifnofpu(dc)) {
4353         return true;
4354     }
4355     if (sz == MO_128 && gen_trap_float128(dc)) {
4356         return true;
4357     }
4358     da = resolve_asi(dc, a->asi, MO_TE | sz);
4359     gen_stf_asi(dc, &da, sz, addr, a->rd);
4360     return advance_pc(dc);
4361 }
4362 
4363 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4364 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4365 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4366 
4367 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4368 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4369 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4370 
4371 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4372 {
4373     if (!avail_32(dc)) {
4374         return false;
4375     }
4376     if (!supervisor(dc)) {
4377         return raise_priv(dc);
4378     }
4379     if (gen_trap_ifnofpu(dc)) {
4380         return true;
4381     }
4382     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4383     return true;
4384 }
4385 
4386 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4387                      target_ulong new_mask, target_ulong old_mask)
4388 {
4389     TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4390     if (addr == NULL) {
4391         return false;
4392     }
4393     if (gen_trap_ifnofpu(dc)) {
4394         return true;
4395     }
4396     tmp = tcg_temp_new();
4397     tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4398     tcg_gen_andi_tl(tmp, tmp, new_mask);
4399     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4400     tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4401     gen_helper_set_fsr(tcg_env, cpu_fsr);
4402     return advance_pc(dc);
4403 }
4404 
4405 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4406 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4407 
4408 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4409 {
4410     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4411     if (addr == NULL) {
4412         return false;
4413     }
4414     if (gen_trap_ifnofpu(dc)) {
4415         return true;
4416     }
4417     tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4418     return advance_pc(dc);
4419 }
4420 
4421 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4422 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4423 
4424 static bool do_fc(DisasContext *dc, int rd, bool c)
4425 {
4426     uint64_t mask;
4427 
4428     if (gen_trap_ifnofpu(dc)) {
4429         return true;
4430     }
4431 
4432     if (rd & 1) {
4433         mask = MAKE_64BIT_MASK(0, 32);
4434     } else {
4435         mask = MAKE_64BIT_MASK(32, 32);
4436     }
4437     if (c) {
4438         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4439     } else {
4440         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4441     }
4442     gen_update_fprs_dirty(dc, rd);
4443     return advance_pc(dc);
4444 }
4445 
4446 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4447 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4448 
4449 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4450 {
4451     if (gen_trap_ifnofpu(dc)) {
4452         return true;
4453     }
4454 
4455     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4456     gen_update_fprs_dirty(dc, rd);
4457     return advance_pc(dc);
4458 }
4459 
4460 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4461 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4462 
4463 static bool do_ff(DisasContext *dc, arg_r_r *a,
4464                   void (*func)(TCGv_i32, TCGv_i32))
4465 {
4466     TCGv_i32 tmp;
4467 
4468     if (gen_trap_ifnofpu(dc)) {
4469         return true;
4470     }
4471 
4472     tmp = gen_load_fpr_F(dc, a->rs);
4473     func(tmp, tmp);
4474     gen_store_fpr_F(dc, a->rd, tmp);
4475     return advance_pc(dc);
4476 }
4477 
4478 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4479 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4480 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4481 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4482 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4483 
4484 static bool do_fd(DisasContext *dc, arg_r_r *a,
4485                   void (*func)(TCGv_i32, TCGv_i64))
4486 {
4487     TCGv_i32 dst;
4488     TCGv_i64 src;
4489 
4490     if (gen_trap_ifnofpu(dc)) {
4491         return true;
4492     }
4493 
4494     dst = gen_dest_fpr_F(dc);
4495     src = gen_load_fpr_D(dc, a->rs);
4496     func(dst, src);
4497     gen_store_fpr_F(dc, a->rd, dst);
4498     return advance_pc(dc);
4499 }
4500 
4501 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4502 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4503 
4504 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4505                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4506 {
4507     TCGv_i32 tmp;
4508 
4509     if (gen_trap_ifnofpu(dc)) {
4510         return true;
4511     }
4512 
4513     gen_op_clear_ieee_excp_and_FTT();
4514     tmp = gen_load_fpr_F(dc, a->rs);
4515     func(tmp, tcg_env, tmp);
4516     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4517     gen_store_fpr_F(dc, a->rd, tmp);
4518     return advance_pc(dc);
4519 }
4520 
4521 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4522 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4523 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4524 
4525 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4526                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4527 {
4528     TCGv_i32 dst;
4529     TCGv_i64 src;
4530 
4531     if (gen_trap_ifnofpu(dc)) {
4532         return true;
4533     }
4534 
4535     gen_op_clear_ieee_excp_and_FTT();
4536     dst = gen_dest_fpr_F(dc);
4537     src = gen_load_fpr_D(dc, a->rs);
4538     func(dst, tcg_env, src);
4539     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4540     gen_store_fpr_F(dc, a->rd, dst);
4541     return advance_pc(dc);
4542 }
4543 
4544 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4545 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4546 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4547 
4548 static bool do_dd(DisasContext *dc, arg_r_r *a,
4549                   void (*func)(TCGv_i64, TCGv_i64))
4550 {
4551     TCGv_i64 dst, src;
4552 
4553     if (gen_trap_ifnofpu(dc)) {
4554         return true;
4555     }
4556 
4557     dst = gen_dest_fpr_D(dc, a->rd);
4558     src = gen_load_fpr_D(dc, a->rs);
4559     func(dst, src);
4560     gen_store_fpr_D(dc, a->rd, dst);
4561     return advance_pc(dc);
4562 }
4563 
4564 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4565 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4566 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4567 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4568 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4569 
4570 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4571                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4572 {
4573     TCGv_i64 dst, src;
4574 
4575     if (gen_trap_ifnofpu(dc)) {
4576         return true;
4577     }
4578 
4579     gen_op_clear_ieee_excp_and_FTT();
4580     dst = gen_dest_fpr_D(dc, a->rd);
4581     src = gen_load_fpr_D(dc, a->rs);
4582     func(dst, tcg_env, src);
4583     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4584     gen_store_fpr_D(dc, a->rd, dst);
4585     return advance_pc(dc);
4586 }
4587 
4588 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4589 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4590 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4591 
4592 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4593                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4594 {
4595     TCGv_i64 dst;
4596     TCGv_i32 src;
4597 
4598     if (gen_trap_ifnofpu(dc)) {
4599         return true;
4600     }
4601 
4602     gen_op_clear_ieee_excp_and_FTT();
4603     dst = gen_dest_fpr_D(dc, a->rd);
4604     src = gen_load_fpr_F(dc, a->rs);
4605     func(dst, tcg_env, src);
4606     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4607     gen_store_fpr_D(dc, a->rd, dst);
4608     return advance_pc(dc);
4609 }
4610 
4611 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4612 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4613 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4614 
4615 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4616 {
4617     int rd, rs;
4618 
4619     if (!avail_64(dc)) {
4620         return false;
4621     }
4622     if (gen_trap_ifnofpu(dc)) {
4623         return true;
4624     }
4625     if (gen_trap_float128(dc)) {
4626         return true;
4627     }
4628 
4629     gen_op_clear_ieee_excp_and_FTT();
4630     rd = QFPREG(a->rd);
4631     rs = QFPREG(a->rs);
4632     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4633     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4634     gen_update_fprs_dirty(dc, rd);
4635     return advance_pc(dc);
4636 }
4637 
4638 static bool do_qq(DisasContext *dc, arg_r_r *a,
4639                   void (*func)(TCGv_env))
4640 {
4641     if (gen_trap_ifnofpu(dc)) {
4642         return true;
4643     }
4644     if (gen_trap_float128(dc)) {
4645         return true;
4646     }
4647 
4648     gen_op_clear_ieee_excp_and_FTT();
4649     gen_op_load_fpr_QT1(QFPREG(a->rs));
4650     func(tcg_env);
4651     gen_op_store_QT0_fpr(QFPREG(a->rd));
4652     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4653     return advance_pc(dc);
4654 }
4655 
4656 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4657 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4658 
4659 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4660                        void (*func)(TCGv_env))
4661 {
4662     if (gen_trap_ifnofpu(dc)) {
4663         return true;
4664     }
4665     if (gen_trap_float128(dc)) {
4666         return true;
4667     }
4668 
4669     gen_op_clear_ieee_excp_and_FTT();
4670     gen_op_load_fpr_QT1(QFPREG(a->rs));
4671     func(tcg_env);
4672     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4673     gen_op_store_QT0_fpr(QFPREG(a->rd));
4674     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4675     return advance_pc(dc);
4676 }
4677 
4678 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4679 
4680 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4681                       void (*func)(TCGv_i32, TCGv_env))
4682 {
4683     TCGv_i32 dst;
4684 
4685     if (gen_trap_ifnofpu(dc)) {
4686         return true;
4687     }
4688     if (gen_trap_float128(dc)) {
4689         return true;
4690     }
4691 
4692     gen_op_clear_ieee_excp_and_FTT();
4693     gen_op_load_fpr_QT1(QFPREG(a->rs));
4694     dst = gen_dest_fpr_F(dc);
4695     func(dst, tcg_env);
4696     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4697     gen_store_fpr_F(dc, a->rd, dst);
4698     return advance_pc(dc);
4699 }
4700 
4701 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4702 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4703 
4704 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4705                       void (*func)(TCGv_i64, TCGv_env))
4706 {
4707     TCGv_i64 dst;
4708 
4709     if (gen_trap_ifnofpu(dc)) {
4710         return true;
4711     }
4712     if (gen_trap_float128(dc)) {
4713         return true;
4714     }
4715 
4716     gen_op_clear_ieee_excp_and_FTT();
4717     gen_op_load_fpr_QT1(QFPREG(a->rs));
4718     dst = gen_dest_fpr_D(dc, a->rd);
4719     func(dst, tcg_env);
4720     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4721     gen_store_fpr_D(dc, a->rd, dst);
4722     return advance_pc(dc);
4723 }
4724 
4725 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4726 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4727 
4728 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4729                       void (*func)(TCGv_env, TCGv_i32))
4730 {
4731     TCGv_i32 src;
4732 
4733     if (gen_trap_ifnofpu(dc)) {
4734         return true;
4735     }
4736     if (gen_trap_float128(dc)) {
4737         return true;
4738     }
4739 
4740     gen_op_clear_ieee_excp_and_FTT();
4741     src = gen_load_fpr_F(dc, a->rs);
4742     func(tcg_env, src);
4743     gen_op_store_QT0_fpr(QFPREG(a->rd));
4744     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4745     return advance_pc(dc);
4746 }
4747 
4748 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4749 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4750 
4751 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4752                       void (*func)(TCGv_env, TCGv_i64))
4753 {
4754     TCGv_i64 src;
4755 
4756     if (gen_trap_ifnofpu(dc)) {
4757         return true;
4758     }
4759     if (gen_trap_float128(dc)) {
4760         return true;
4761     }
4762 
4763     gen_op_clear_ieee_excp_and_FTT();
4764     src = gen_load_fpr_D(dc, a->rs);
4765     func(tcg_env, src);
4766     gen_op_store_QT0_fpr(QFPREG(a->rd));
4767     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4768     return advance_pc(dc);
4769 }
4770 
4771 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4772 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4773 
4774 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4775                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4776 {
4777     TCGv_i32 src1, src2;
4778 
4779     if (gen_trap_ifnofpu(dc)) {
4780         return true;
4781     }
4782 
4783     src1 = gen_load_fpr_F(dc, a->rs1);
4784     src2 = gen_load_fpr_F(dc, a->rs2);
4785     func(src1, src1, src2);
4786     gen_store_fpr_F(dc, a->rd, src1);
4787     return advance_pc(dc);
4788 }
4789 
4790 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4791 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4792 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4793 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4794 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4795 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4796 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4797 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4798 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4799 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4800 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4801 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4802 
4803 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4804                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4805 {
4806     TCGv_i32 src1, src2;
4807 
4808     if (gen_trap_ifnofpu(dc)) {
4809         return true;
4810     }
4811 
4812     gen_op_clear_ieee_excp_and_FTT();
4813     src1 = gen_load_fpr_F(dc, a->rs1);
4814     src2 = gen_load_fpr_F(dc, a->rs2);
4815     func(src1, tcg_env, src1, src2);
4816     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4817     gen_store_fpr_F(dc, a->rd, src1);
4818     return advance_pc(dc);
4819 }
4820 
4821 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4822 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4823 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4824 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4825 
4826 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4827                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4828 {
4829     TCGv_i64 dst, src1, src2;
4830 
4831     if (gen_trap_ifnofpu(dc)) {
4832         return true;
4833     }
4834 
4835     dst = gen_dest_fpr_D(dc, a->rd);
4836     src1 = gen_load_fpr_D(dc, a->rs1);
4837     src2 = gen_load_fpr_D(dc, a->rs2);
4838     func(dst, src1, src2);
4839     gen_store_fpr_D(dc, a->rd, dst);
4840     return advance_pc(dc);
4841 }
4842 
4843 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4844 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4845 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4846 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4847 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4848 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4849 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4850 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4851 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4852 
4853 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4854 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4855 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4856 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4857 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4858 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4859 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4860 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4861 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4862 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4863 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4864 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4865 
4866 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4867 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4868 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4869 
4870 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4871                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4872 {
4873     TCGv_i64 src1, src2;
4874     TCGv dst;
4875 
4876     if (gen_trap_ifnofpu(dc)) {
4877         return true;
4878     }
4879 
4880     dst = gen_dest_gpr(dc, a->rd);
4881     src1 = gen_load_fpr_D(dc, a->rs1);
4882     src2 = gen_load_fpr_D(dc, a->rs2);
4883     func(dst, src1, src2);
4884     gen_store_gpr(dc, a->rd, dst);
4885     return advance_pc(dc);
4886 }
4887 
4888 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4889 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4890 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4891 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4892 
4893 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4894 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4895 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4896 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4897 
4898 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4899                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4900 {
4901     TCGv_i64 dst, src1, src2;
4902 
4903     if (gen_trap_ifnofpu(dc)) {
4904         return true;
4905     }
4906 
4907     gen_op_clear_ieee_excp_and_FTT();
4908     dst = gen_dest_fpr_D(dc, a->rd);
4909     src1 = gen_load_fpr_D(dc, a->rs1);
4910     src2 = gen_load_fpr_D(dc, a->rs2);
4911     func(dst, tcg_env, src1, src2);
4912     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4913     gen_store_fpr_D(dc, a->rd, dst);
4914     return advance_pc(dc);
4915 }
4916 
4917 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4918 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4919 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4920 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4921 
4922 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4923 {
4924     TCGv_i64 dst;
4925     TCGv_i32 src1, src2;
4926 
4927     if (gen_trap_ifnofpu(dc)) {
4928         return true;
4929     }
4930     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4931         return raise_unimpfpop(dc);
4932     }
4933 
4934     gen_op_clear_ieee_excp_and_FTT();
4935     dst = gen_dest_fpr_D(dc, a->rd);
4936     src1 = gen_load_fpr_F(dc, a->rs1);
4937     src2 = gen_load_fpr_F(dc, a->rs2);
4938     gen_helper_fsmuld(dst, tcg_env, src1, src2);
4939     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4940     gen_store_fpr_D(dc, a->rd, dst);
4941     return advance_pc(dc);
4942 }
4943 
4944 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
4945                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
4946 {
4947     TCGv_i64 dst, src0, src1, src2;
4948 
4949     if (gen_trap_ifnofpu(dc)) {
4950         return true;
4951     }
4952 
4953     dst  = gen_dest_fpr_D(dc, a->rd);
4954     src0 = gen_load_fpr_D(dc, a->rd);
4955     src1 = gen_load_fpr_D(dc, a->rs1);
4956     src2 = gen_load_fpr_D(dc, a->rs2);
4957     func(dst, src0, src1, src2);
4958     gen_store_fpr_D(dc, a->rd, dst);
4959     return advance_pc(dc);
4960 }
4961 
4962 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
4963 
4964 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
4965                        void (*func)(TCGv_env))
4966 {
4967     if (gen_trap_ifnofpu(dc)) {
4968         return true;
4969     }
4970     if (gen_trap_float128(dc)) {
4971         return true;
4972     }
4973 
4974     gen_op_clear_ieee_excp_and_FTT();
4975     gen_op_load_fpr_QT0(QFPREG(a->rs1));
4976     gen_op_load_fpr_QT1(QFPREG(a->rs2));
4977     func(tcg_env);
4978     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4979     gen_op_store_QT0_fpr(QFPREG(a->rd));
4980     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4981     return advance_pc(dc);
4982 }
4983 
4984 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
4985 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
4986 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
4987 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
4988 
4989 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
4990 {
4991     TCGv_i64 src1, src2;
4992 
4993     if (gen_trap_ifnofpu(dc)) {
4994         return true;
4995     }
4996     if (gen_trap_float128(dc)) {
4997         return true;
4998     }
4999 
5000     gen_op_clear_ieee_excp_and_FTT();
5001     src1 = gen_load_fpr_D(dc, a->rs1);
5002     src2 = gen_load_fpr_D(dc, a->rs2);
5003     gen_helper_fdmulq(tcg_env, src1, src2);
5004     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5005     gen_op_store_QT0_fpr(QFPREG(a->rd));
5006     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5007     return advance_pc(dc);
5008 }
5009 
5010 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5011                      void (*func)(DisasContext *, DisasCompare *, int, int))
5012 {
5013     DisasCompare cmp;
5014 
5015     if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5016         return false;
5017     }
5018     if (gen_trap_ifnofpu(dc)) {
5019         return true;
5020     }
5021     if (is_128 && gen_trap_float128(dc)) {
5022         return true;
5023     }
5024 
5025     gen_op_clear_ieee_excp_and_FTT();
5026     func(dc, &cmp, a->rd, a->rs2);
5027     return advance_pc(dc);
5028 }
5029 
5030 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5031 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5032 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5033 
5034 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5035                       void (*func)(DisasContext *, DisasCompare *, int, int))
5036 {
5037     DisasCompare cmp;
5038 
5039     if (gen_trap_ifnofpu(dc)) {
5040         return true;
5041     }
5042     if (is_128 && gen_trap_float128(dc)) {
5043         return true;
5044     }
5045 
5046     gen_op_clear_ieee_excp_and_FTT();
5047     gen_compare(&cmp, a->cc, a->cond, dc);
5048     func(dc, &cmp, a->rd, a->rs2);
5049     return advance_pc(dc);
5050 }
5051 
5052 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5053 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5054 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5055 
5056 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5057                        void (*func)(DisasContext *, DisasCompare *, int, int))
5058 {
5059     DisasCompare cmp;
5060 
5061     if (gen_trap_ifnofpu(dc)) {
5062         return true;
5063     }
5064     if (is_128 && gen_trap_float128(dc)) {
5065         return true;
5066     }
5067 
5068     gen_op_clear_ieee_excp_and_FTT();
5069     gen_fcompare(&cmp, a->cc, a->cond);
5070     func(dc, &cmp, a->rd, a->rs2);
5071     return advance_pc(dc);
5072 }
5073 
5074 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5075 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5076 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5077 
5078 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5079 {
5080     TCGv_i32 src1, src2;
5081 
5082     if (avail_32(dc) && a->cc != 0) {
5083         return false;
5084     }
5085     if (gen_trap_ifnofpu(dc)) {
5086         return true;
5087     }
5088 
5089     gen_op_clear_ieee_excp_and_FTT();
5090     src1 = gen_load_fpr_F(dc, a->rs1);
5091     src2 = gen_load_fpr_F(dc, a->rs2);
5092     if (e) {
5093         gen_op_fcmpes(a->cc, src1, src2);
5094     } else {
5095         gen_op_fcmps(a->cc, src1, src2);
5096     }
5097     return advance_pc(dc);
5098 }
5099 
5100 TRANS(FCMPs, ALL, do_fcmps, a, false)
5101 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5102 
5103 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5104 {
5105     TCGv_i64 src1, src2;
5106 
5107     if (avail_32(dc) && a->cc != 0) {
5108         return false;
5109     }
5110     if (gen_trap_ifnofpu(dc)) {
5111         return true;
5112     }
5113 
5114     gen_op_clear_ieee_excp_and_FTT();
5115     src1 = gen_load_fpr_D(dc, a->rs1);
5116     src2 = gen_load_fpr_D(dc, a->rs2);
5117     if (e) {
5118         gen_op_fcmped(a->cc, src1, src2);
5119     } else {
5120         gen_op_fcmpd(a->cc, src1, src2);
5121     }
5122     return advance_pc(dc);
5123 }
5124 
5125 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5126 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5127 
5128 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5129 {
5130     if (avail_32(dc) && a->cc != 0) {
5131         return false;
5132     }
5133     if (gen_trap_ifnofpu(dc)) {
5134         return true;
5135     }
5136     if (gen_trap_float128(dc)) {
5137         return true;
5138     }
5139 
5140     gen_op_clear_ieee_excp_and_FTT();
5141     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5142     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5143     if (e) {
5144         gen_op_fcmpeq(a->cc);
5145     } else {
5146         gen_op_fcmpq(a->cc);
5147     }
5148     return advance_pc(dc);
5149 }
5150 
5151 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5152 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5153 
5154 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5155 {
5156     DisasContext *dc = container_of(dcbase, DisasContext, base);
5157     CPUSPARCState *env = cpu_env(cs);
5158     int bound;
5159 
5160     dc->pc = dc->base.pc_first;
5161     dc->npc = (target_ulong)dc->base.tb->cs_base;
5162     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5163     dc->def = &env->def;
5164     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5165     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5166 #ifndef CONFIG_USER_ONLY
5167     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5168 #endif
5169 #ifdef TARGET_SPARC64
5170     dc->fprs_dirty = 0;
5171     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5172 #ifndef CONFIG_USER_ONLY
5173     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5174 #endif
5175 #endif
5176     /*
5177      * if we reach a page boundary, we stop generation so that the
5178      * PC of a TT_TFAULT exception is always in the right page
5179      */
5180     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5181     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5182 }
5183 
5184 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5185 {
5186 }
5187 
5188 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5189 {
5190     DisasContext *dc = container_of(dcbase, DisasContext, base);
5191     target_ulong npc = dc->npc;
5192 
5193     if (npc & 3) {
5194         switch (npc) {
5195         case JUMP_PC:
5196             assert(dc->jump_pc[1] == dc->pc + 4);
5197             npc = dc->jump_pc[0] | JUMP_PC;
5198             break;
5199         case DYNAMIC_PC:
5200         case DYNAMIC_PC_LOOKUP:
5201             npc = DYNAMIC_PC;
5202             break;
5203         default:
5204             g_assert_not_reached();
5205         }
5206     }
5207     tcg_gen_insn_start(dc->pc, npc);
5208 }
5209 
5210 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5211 {
5212     DisasContext *dc = container_of(dcbase, DisasContext, base);
5213     CPUSPARCState *env = cpu_env(cs);
5214     unsigned int insn;
5215 
5216     insn = translator_ldl(env, &dc->base, dc->pc);
5217     dc->base.pc_next += 4;
5218 
5219     if (!decode(dc, insn)) {
5220         gen_exception(dc, TT_ILL_INSN);
5221     }
5222 
5223     if (dc->base.is_jmp == DISAS_NORETURN) {
5224         return;
5225     }
5226     if (dc->pc != dc->base.pc_next) {
5227         dc->base.is_jmp = DISAS_TOO_MANY;
5228     }
5229 }
5230 
5231 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5232 {
5233     DisasContext *dc = container_of(dcbase, DisasContext, base);
5234     DisasDelayException *e, *e_next;
5235     bool may_lookup;
5236 
5237     finishing_insn(dc);
5238 
5239     switch (dc->base.is_jmp) {
5240     case DISAS_NEXT:
5241     case DISAS_TOO_MANY:
5242         if (((dc->pc | dc->npc) & 3) == 0) {
5243             /* static PC and NPC: we can use direct chaining */
5244             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5245             break;
5246         }
5247 
5248         may_lookup = true;
5249         if (dc->pc & 3) {
5250             switch (dc->pc) {
5251             case DYNAMIC_PC_LOOKUP:
5252                 break;
5253             case DYNAMIC_PC:
5254                 may_lookup = false;
5255                 break;
5256             default:
5257                 g_assert_not_reached();
5258             }
5259         } else {
5260             tcg_gen_movi_tl(cpu_pc, dc->pc);
5261         }
5262 
5263         if (dc->npc & 3) {
5264             switch (dc->npc) {
5265             case JUMP_PC:
5266                 gen_generic_branch(dc);
5267                 break;
5268             case DYNAMIC_PC:
5269                 may_lookup = false;
5270                 break;
5271             case DYNAMIC_PC_LOOKUP:
5272                 break;
5273             default:
5274                 g_assert_not_reached();
5275             }
5276         } else {
5277             tcg_gen_movi_tl(cpu_npc, dc->npc);
5278         }
5279         if (may_lookup) {
5280             tcg_gen_lookup_and_goto_ptr();
5281         } else {
5282             tcg_gen_exit_tb(NULL, 0);
5283         }
5284         break;
5285 
5286     case DISAS_NORETURN:
5287        break;
5288 
5289     case DISAS_EXIT:
5290         /* Exit TB */
5291         save_state(dc);
5292         tcg_gen_exit_tb(NULL, 0);
5293         break;
5294 
5295     default:
5296         g_assert_not_reached();
5297     }
5298 
5299     for (e = dc->delay_excp_list; e ; e = e_next) {
5300         gen_set_label(e->lab);
5301 
5302         tcg_gen_movi_tl(cpu_pc, e->pc);
5303         if (e->npc % 4 == 0) {
5304             tcg_gen_movi_tl(cpu_npc, e->npc);
5305         }
5306         gen_helper_raise_exception(tcg_env, e->excp);
5307 
5308         e_next = e->next;
5309         g_free(e);
5310     }
5311 }
5312 
5313 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5314                                CPUState *cpu, FILE *logfile)
5315 {
5316     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5317     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5318 }
5319 
5320 static const TranslatorOps sparc_tr_ops = {
5321     .init_disas_context = sparc_tr_init_disas_context,
5322     .tb_start           = sparc_tr_tb_start,
5323     .insn_start         = sparc_tr_insn_start,
5324     .translate_insn     = sparc_tr_translate_insn,
5325     .tb_stop            = sparc_tr_tb_stop,
5326     .disas_log          = sparc_tr_disas_log,
5327 };
5328 
5329 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5330                            target_ulong pc, void *host_pc)
5331 {
5332     DisasContext dc = {};
5333 
5334     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5335 }
5336 
5337 void sparc_tcg_init(void)
5338 {
5339     static const char gregnames[32][4] = {
5340         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5341         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5342         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5343         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5344     };
5345     static const char fregnames[32][4] = {
5346         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5347         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5348         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5349         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5350     };
5351 
5352     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5353 #ifdef TARGET_SPARC64
5354         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5355         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5356         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5357 #endif
5358         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5359         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5360         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5361         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5362         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5363         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5364         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5365         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5366         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5367         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5368     };
5369 
5370     unsigned int i;
5371 
5372     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5373                                          offsetof(CPUSPARCState, regwptr),
5374                                          "regwptr");
5375 
5376     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5377         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5378     }
5379 
5380     cpu_regs[0] = NULL;
5381     for (i = 1; i < 8; ++i) {
5382         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5383                                          offsetof(CPUSPARCState, gregs[i]),
5384                                          gregnames[i]);
5385     }
5386 
5387     for (i = 8; i < 32; ++i) {
5388         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5389                                          (i - 8) * sizeof(target_ulong),
5390                                          gregnames[i]);
5391     }
5392 
5393     for (i = 0; i < TARGET_DPREGS; i++) {
5394         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5395                                             offsetof(CPUSPARCState, fpr[i]),
5396                                             fregnames[i]);
5397     }
5398 
5399 #ifdef TARGET_SPARC64
5400     cpu_fprs = tcg_global_mem_new_i32(tcg_env,
5401                                       offsetof(CPUSPARCState, fprs), "fprs");
5402 #endif
5403 }
5404 
5405 void sparc_restore_state_to_opc(CPUState *cs,
5406                                 const TranslationBlock *tb,
5407                                 const uint64_t *data)
5408 {
5409     SPARCCPU *cpu = SPARC_CPU(cs);
5410     CPUSPARCState *env = &cpu->env;
5411     target_ulong pc = data[0];
5412     target_ulong npc = data[1];
5413 
5414     env->pc = pc;
5415     if (npc == DYNAMIC_PC) {
5416         /* dynamic NPC: already stored */
5417     } else if (npc & JUMP_PC) {
5418         /* jump PC: use 'cond' and the jump targets of the translation */
5419         if (env->cond) {
5420             env->npc = npc & ~3;
5421         } else {
5422             env->npc = pc + 4;
5423         }
5424     } else {
5425         env->npc = npc;
5426     }
5427 }
5428