xref: /openbmc/qemu/target/sparc/translate.c (revision 2a45b736)
1 /*
2    SPARC translation
3 
4    Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5    Copyright (C) 2003-2005 Fabrice Bellard
6 
7    This library is free software; you can redistribute it and/or
8    modify it under the terms of the GNU Lesser General Public
9    License as published by the Free Software Foundation; either
10    version 2.1 of the License, or (at your option) any later version.
11 
12    This library is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15    Lesser General Public License for more details.
16 
17    You should have received a copy of the GNU Lesser General Public
18    License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 #include "asi.h"
33 
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef  HELPER_H
37 
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E)                 qemu_build_not_reached()
40 # define gen_helper_rett(E)                     qemu_build_not_reached()
41 # define gen_helper_power_down(E)               qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S)                 qemu_build_not_reached()
43 #else
44 # define gen_helper_clear_softint(E, S)         qemu_build_not_reached()
45 # define gen_helper_done(E)                     qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S)                 qemu_build_not_reached()
47 # define gen_helper_flushw(E)                   qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S)                 qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E)                 qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E)                 qemu_build_not_reached()
51 # define gen_helper_restored(E)                 qemu_build_not_reached()
52 # define gen_helper_retry(E)                    qemu_build_not_reached()
53 # define gen_helper_saved(E)                    qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B)           qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S)           qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C)  qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S)        qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S)        qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B)           qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S)                 qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S)                 qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S)                  qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S)         qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S)                 qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S)              qemu_build_not_reached()
66 # define gen_helper_fabsq                ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16             ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32             ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16             ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32             ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16             ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32             ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16             ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32             ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox                ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand              ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16           ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16           ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16al           ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmul8x16au           ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8x16             ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmuld8sux16          ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmuld8ulx16          ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fnegq                ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fpmerge              ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fqtox                ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fstox                ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtod                ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtoq                ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtos                ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_pdist                ({ qemu_build_not_reached(); NULL; })
92 # define FSR_LDXFSR_MASK                        0
93 # define FSR_LDXFSR_OLDMASK                     0
94 # define MAXTL_MASK                             0
95 #endif
96 
97 /* Dynamic PC, must exit to main loop. */
98 #define DYNAMIC_PC         1
99 /* Dynamic PC, one of two values according to jump_pc[T2]. */
100 #define JUMP_PC            2
101 /* Dynamic PC, may lookup next TB. */
102 #define DYNAMIC_PC_LOOKUP  3
103 
104 #define DISAS_EXIT  DISAS_TARGET_0
105 
106 /* global register indexes */
107 static TCGv_ptr cpu_regwptr;
108 static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
109 static TCGv_i32 cpu_cc_op;
110 static TCGv cpu_fsr, cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs               ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr                ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128 
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z  cpu_xcc_Z
131 #define cpu_cc_C  cpu_xcc_C
132 #else
133 #define cpu_cc_Z  cpu_icc_Z
134 #define cpu_cc_C  cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138 
139 /* Floating point registers */
140 static TCGv_i64 cpu_fpr[TARGET_DPREGS];
141 
142 #define env_field_offsetof(X)     offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X)  env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X)  env_field_offsetof(X)
148 # define env64_field_offsetof(X)  ({ qemu_build_not_reached(); 0; })
149 #endif
150 
151 typedef struct DisasDelayException {
152     struct DisasDelayException *next;
153     TCGLabel *lab;
154     TCGv_i32 excp;
155     /* Saved state at parent insn. */
156     target_ulong pc;
157     target_ulong npc;
158 } DisasDelayException;
159 
160 typedef struct DisasContext {
161     DisasContextBase base;
162     target_ulong pc;    /* current Program Counter: integer or DYNAMIC_PC */
163     target_ulong npc;   /* next PC: integer or DYNAMIC_PC or JUMP_PC */
164     target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
165     int mem_idx;
166     bool fpu_enabled;
167     bool address_mask_32bit;
168 #ifndef CONFIG_USER_ONLY
169     bool supervisor;
170 #ifdef TARGET_SPARC64
171     bool hypervisor;
172 #endif
173 #endif
174 
175     uint32_t cc_op;  /* current CC operation */
176     sparc_def_t *def;
177 #ifdef TARGET_SPARC64
178     int fprs_dirty;
179     int asi;
180 #endif
181     DisasDelayException *delay_excp_list;
182 } DisasContext;
183 
184 typedef struct {
185     TCGCond cond;
186     bool is_bool;
187     TCGv c1, c2;
188 } DisasCompare;
189 
190 // This function uses non-native bit order
191 #define GET_FIELD(X, FROM, TO)                                  \
192     ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
193 
194 // This function uses the order in the manuals, i.e. bit 0 is 2^0
195 #define GET_FIELD_SP(X, FROM, TO)               \
196     GET_FIELD(X, 31 - (TO), 31 - (FROM))
197 
198 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
199 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
200 
201 #ifdef TARGET_SPARC64
202 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
203 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
204 #else
205 #define DFPREG(r) (r & 0x1e)
206 #define QFPREG(r) (r & 0x1c)
207 #endif
208 
209 #define UA2005_HTRAP_MASK 0xff
210 #define V8_TRAP_MASK 0x7f
211 
212 #define IS_IMM (insn & (1<<13))
213 
214 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
215 {
216 #if defined(TARGET_SPARC64)
217     int bit = (rd < 32) ? 1 : 2;
218     /* If we know we've already set this bit within the TB,
219        we can avoid setting it again.  */
220     if (!(dc->fprs_dirty & bit)) {
221         dc->fprs_dirty |= bit;
222         tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
223     }
224 #endif
225 }
226 
227 /* floating point registers moves */
228 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
229 {
230     TCGv_i32 ret = tcg_temp_new_i32();
231     if (src & 1) {
232         tcg_gen_extrl_i64_i32(ret, cpu_fpr[src / 2]);
233     } else {
234         tcg_gen_extrh_i64_i32(ret, cpu_fpr[src / 2]);
235     }
236     return ret;
237 }
238 
239 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
240 {
241     TCGv_i64 t = tcg_temp_new_i64();
242 
243     tcg_gen_extu_i32_i64(t, v);
244     tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
245                         (dst & 1 ? 0 : 32), 32);
246     gen_update_fprs_dirty(dc, dst);
247 }
248 
249 static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
250 {
251     return tcg_temp_new_i32();
252 }
253 
254 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
255 {
256     src = DFPREG(src);
257     return cpu_fpr[src / 2];
258 }
259 
260 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
261 {
262     dst = DFPREG(dst);
263     tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
264     gen_update_fprs_dirty(dc, dst);
265 }
266 
267 static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
268 {
269     return cpu_fpr[DFPREG(dst) / 2];
270 }
271 
272 static void gen_op_load_fpr_QT0(unsigned int src)
273 {
274     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
275                    offsetof(CPU_QuadU, ll.upper));
276     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
277                    offsetof(CPU_QuadU, ll.lower));
278 }
279 
280 static void gen_op_load_fpr_QT1(unsigned int src)
281 {
282     tcg_gen_st_i64(cpu_fpr[src / 2], tcg_env, offsetof(CPUSPARCState, qt1) +
283                    offsetof(CPU_QuadU, ll.upper));
284     tcg_gen_st_i64(cpu_fpr[src/2 + 1], tcg_env, offsetof(CPUSPARCState, qt1) +
285                    offsetof(CPU_QuadU, ll.lower));
286 }
287 
288 static void gen_op_store_QT0_fpr(unsigned int dst)
289 {
290     tcg_gen_ld_i64(cpu_fpr[dst / 2], tcg_env, offsetof(CPUSPARCState, qt0) +
291                    offsetof(CPU_QuadU, ll.upper));
292     tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], tcg_env, offsetof(CPUSPARCState, qt0) +
293                    offsetof(CPU_QuadU, ll.lower));
294 }
295 
296 /* moves */
297 #ifdef CONFIG_USER_ONLY
298 #define supervisor(dc) 0
299 #define hypervisor(dc) 0
300 #else
301 #ifdef TARGET_SPARC64
302 #define hypervisor(dc) (dc->hypervisor)
303 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
304 #else
305 #define supervisor(dc) (dc->supervisor)
306 #define hypervisor(dc) 0
307 #endif
308 #endif
309 
310 #if !defined(TARGET_SPARC64)
311 # define AM_CHECK(dc)  false
312 #elif defined(TARGET_ABI32)
313 # define AM_CHECK(dc)  true
314 #elif defined(CONFIG_USER_ONLY)
315 # define AM_CHECK(dc)  false
316 #else
317 # define AM_CHECK(dc)  ((dc)->address_mask_32bit)
318 #endif
319 
320 static void gen_address_mask(DisasContext *dc, TCGv addr)
321 {
322     if (AM_CHECK(dc)) {
323         tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
324     }
325 }
326 
327 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
328 {
329     return AM_CHECK(dc) ? (uint32_t)addr : addr;
330 }
331 
332 static TCGv gen_load_gpr(DisasContext *dc, int reg)
333 {
334     if (reg > 0) {
335         assert(reg < 32);
336         return cpu_regs[reg];
337     } else {
338         TCGv t = tcg_temp_new();
339         tcg_gen_movi_tl(t, 0);
340         return t;
341     }
342 }
343 
344 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
345 {
346     if (reg > 0) {
347         assert(reg < 32);
348         tcg_gen_mov_tl(cpu_regs[reg], v);
349     }
350 }
351 
352 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
353 {
354     if (reg > 0) {
355         assert(reg < 32);
356         return cpu_regs[reg];
357     } else {
358         return tcg_temp_new();
359     }
360 }
361 
362 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
363 {
364     return translator_use_goto_tb(&s->base, pc) &&
365            translator_use_goto_tb(&s->base, npc);
366 }
367 
368 static void gen_goto_tb(DisasContext *s, int tb_num,
369                         target_ulong pc, target_ulong npc)
370 {
371     if (use_goto_tb(s, pc, npc))  {
372         /* jump to same page: we can use a direct jump */
373         tcg_gen_goto_tb(tb_num);
374         tcg_gen_movi_tl(cpu_pc, pc);
375         tcg_gen_movi_tl(cpu_npc, npc);
376         tcg_gen_exit_tb(s->base.tb, tb_num);
377     } else {
378         /* jump to another page: we can use an indirect jump */
379         tcg_gen_movi_tl(cpu_pc, pc);
380         tcg_gen_movi_tl(cpu_npc, npc);
381         tcg_gen_lookup_and_goto_ptr();
382     }
383 }
384 
385 static void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
386 {
387     tcg_gen_mov_tl(cpu_cc_src, src1);
388     tcg_gen_mov_tl(cpu_cc_src2, src2);
389     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
390     tcg_gen_mov_tl(dst, cpu_cc_dst);
391 }
392 
393 static TCGv_i32 gen_add32_carry32(void)
394 {
395     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
396 
397     /* Carry is computed from a previous add: (dst < src)  */
398 #if TARGET_LONG_BITS == 64
399     cc_src1_32 = tcg_temp_new_i32();
400     cc_src2_32 = tcg_temp_new_i32();
401     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
402     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
403 #else
404     cc_src1_32 = cpu_cc_dst;
405     cc_src2_32 = cpu_cc_src;
406 #endif
407 
408     carry_32 = tcg_temp_new_i32();
409     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
410 
411     return carry_32;
412 }
413 
414 static TCGv_i32 gen_sub32_carry32(void)
415 {
416     TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
417 
418     /* Carry is computed from a previous borrow: (src1 < src2)  */
419 #if TARGET_LONG_BITS == 64
420     cc_src1_32 = tcg_temp_new_i32();
421     cc_src2_32 = tcg_temp_new_i32();
422     tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
423     tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
424 #else
425     cc_src1_32 = cpu_cc_src;
426     cc_src2_32 = cpu_cc_src2;
427 #endif
428 
429     carry_32 = tcg_temp_new_i32();
430     tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
431 
432     return carry_32;
433 }
434 
435 static void gen_op_addc_int(TCGv dst, TCGv src1, TCGv src2,
436                             TCGv_i32 carry_32, bool update_cc)
437 {
438     tcg_gen_add_tl(dst, src1, src2);
439 
440 #ifdef TARGET_SPARC64
441     TCGv carry = tcg_temp_new();
442     tcg_gen_extu_i32_tl(carry, carry_32);
443     tcg_gen_add_tl(dst, dst, carry);
444 #else
445     tcg_gen_add_i32(dst, dst, carry_32);
446 #endif
447 
448     if (update_cc) {
449         tcg_debug_assert(dst == cpu_cc_dst);
450         tcg_gen_mov_tl(cpu_cc_src, src1);
451         tcg_gen_mov_tl(cpu_cc_src2, src2);
452     }
453 }
454 
455 static void gen_op_addc_int_add(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
456 {
457     TCGv discard;
458 
459     if (TARGET_LONG_BITS == 64) {
460         gen_op_addc_int(dst, src1, src2, gen_add32_carry32(), update_cc);
461         return;
462     }
463 
464     /*
465      * We can re-use the host's hardware carry generation by using
466      * an ADD2 opcode.  We discard the low part of the output.
467      * Ideally we'd combine this operation with the add that
468      * generated the carry in the first place.
469      */
470     discard = tcg_temp_new();
471     tcg_gen_add2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
472 
473     if (update_cc) {
474         tcg_debug_assert(dst == cpu_cc_dst);
475         tcg_gen_mov_tl(cpu_cc_src, src1);
476         tcg_gen_mov_tl(cpu_cc_src2, src2);
477     }
478 }
479 
480 static void gen_op_addc_add(TCGv dst, TCGv src1, TCGv src2)
481 {
482     gen_op_addc_int_add(dst, src1, src2, false);
483 }
484 
485 static void gen_op_addccc_add(TCGv dst, TCGv src1, TCGv src2)
486 {
487     gen_op_addc_int_add(dst, src1, src2, true);
488 }
489 
490 static void gen_op_addc_sub(TCGv dst, TCGv src1, TCGv src2)
491 {
492     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), false);
493 }
494 
495 static void gen_op_addccc_sub(TCGv dst, TCGv src1, TCGv src2)
496 {
497     gen_op_addc_int(dst, src1, src2, gen_sub32_carry32(), true);
498 }
499 
500 static void gen_op_addc_int_generic(TCGv dst, TCGv src1, TCGv src2,
501                                     bool update_cc)
502 {
503     TCGv_i32 carry_32 = tcg_temp_new_i32();
504     gen_helper_compute_C_icc(carry_32, tcg_env);
505     gen_op_addc_int(dst, src1, src2, carry_32, update_cc);
506 }
507 
508 static void gen_op_addc_generic(TCGv dst, TCGv src1, TCGv src2)
509 {
510     gen_op_addc_int_generic(dst, src1, src2, false);
511 }
512 
513 static void gen_op_addccc_generic(TCGv dst, TCGv src1, TCGv src2)
514 {
515     gen_op_addc_int_generic(dst, src1, src2, true);
516 }
517 
518 static void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
519 {
520     tcg_gen_mov_tl(cpu_cc_src, src1);
521     tcg_gen_mov_tl(cpu_cc_src2, src2);
522     tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
523     tcg_gen_mov_tl(dst, cpu_cc_dst);
524 }
525 
526 static void gen_op_subc_int(TCGv dst, TCGv src1, TCGv src2,
527                             TCGv_i32 carry_32, bool update_cc)
528 {
529     TCGv carry;
530 
531 #if TARGET_LONG_BITS == 64
532     carry = tcg_temp_new();
533     tcg_gen_extu_i32_i64(carry, carry_32);
534 #else
535     carry = carry_32;
536 #endif
537 
538     tcg_gen_sub_tl(dst, src1, src2);
539     tcg_gen_sub_tl(dst, dst, carry);
540 
541     if (update_cc) {
542         tcg_debug_assert(dst == cpu_cc_dst);
543         tcg_gen_mov_tl(cpu_cc_src, src1);
544         tcg_gen_mov_tl(cpu_cc_src2, src2);
545     }
546 }
547 
548 static void gen_op_subc_add(TCGv dst, TCGv src1, TCGv src2)
549 {
550     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), false);
551 }
552 
553 static void gen_op_subccc_add(TCGv dst, TCGv src1, TCGv src2)
554 {
555     gen_op_subc_int(dst, src1, src2, gen_add32_carry32(), true);
556 }
557 
558 static void gen_op_subc_int_sub(TCGv dst, TCGv src1, TCGv src2, bool update_cc)
559 {
560     TCGv discard;
561 
562     if (TARGET_LONG_BITS == 64) {
563         gen_op_subc_int(dst, src1, src2, gen_sub32_carry32(), update_cc);
564         return;
565     }
566 
567     /*
568      * We can re-use the host's hardware carry generation by using
569      * a SUB2 opcode.  We discard the low part of the output.
570      */
571     discard = tcg_temp_new();
572     tcg_gen_sub2_tl(discard, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
573 
574     if (update_cc) {
575         tcg_debug_assert(dst == cpu_cc_dst);
576         tcg_gen_mov_tl(cpu_cc_src, src1);
577         tcg_gen_mov_tl(cpu_cc_src2, src2);
578     }
579 }
580 
581 static void gen_op_subc_sub(TCGv dst, TCGv src1, TCGv src2)
582 {
583     gen_op_subc_int_sub(dst, src1, src2, false);
584 }
585 
586 static void gen_op_subccc_sub(TCGv dst, TCGv src1, TCGv src2)
587 {
588     gen_op_subc_int_sub(dst, src1, src2, true);
589 }
590 
591 static void gen_op_subc_int_generic(TCGv dst, TCGv src1, TCGv src2,
592                                     bool update_cc)
593 {
594     TCGv_i32 carry_32 = tcg_temp_new_i32();
595 
596     gen_helper_compute_C_icc(carry_32, tcg_env);
597     gen_op_subc_int(dst, src1, src2, carry_32, update_cc);
598 }
599 
600 static void gen_op_subc_generic(TCGv dst, TCGv src1, TCGv src2)
601 {
602     gen_op_subc_int_generic(dst, src1, src2, false);
603 }
604 
605 static void gen_op_subccc_generic(TCGv dst, TCGv src1, TCGv src2)
606 {
607     gen_op_subc_int_generic(dst, src1, src2, true);
608 }
609 
610 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
611 {
612     TCGv r_temp, zero, t0;
613 
614     r_temp = tcg_temp_new();
615     t0 = tcg_temp_new();
616 
617     /* old op:
618     if (!(env->y & 1))
619         T1 = 0;
620     */
621     zero = tcg_constant_tl(0);
622     tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
623     tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
624     tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
625     tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
626                        zero, cpu_cc_src2);
627 
628     // b2 = T0 & 1;
629     // env->y = (b2 << 31) | (env->y >> 1);
630     tcg_gen_extract_tl(t0, cpu_y, 1, 31);
631     tcg_gen_deposit_tl(cpu_y, t0, cpu_cc_src, 31, 1);
632 
633     // b1 = N ^ V;
634     tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
635 
636     // T0 = (b1 << 31) | (T0 >> 1);
637     // src1 = T0;
638     tcg_gen_andi_tl(t0, t0, 1u << 31);
639     tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
640     tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
641 
642     tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
643 
644     tcg_gen_mov_tl(dst, cpu_cc_dst);
645 }
646 
647 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
648 {
649 #if TARGET_LONG_BITS == 32
650     if (sign_ext) {
651         tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
652     } else {
653         tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
654     }
655 #else
656     TCGv t0 = tcg_temp_new_i64();
657     TCGv t1 = tcg_temp_new_i64();
658 
659     if (sign_ext) {
660         tcg_gen_ext32s_i64(t0, src1);
661         tcg_gen_ext32s_i64(t1, src2);
662     } else {
663         tcg_gen_ext32u_i64(t0, src1);
664         tcg_gen_ext32u_i64(t1, src2);
665     }
666 
667     tcg_gen_mul_i64(dst, t0, t1);
668     tcg_gen_shri_i64(cpu_y, dst, 32);
669 #endif
670 }
671 
672 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
673 {
674     /* zero-extend truncated operands before multiplication */
675     gen_op_multiply(dst, src1, src2, 0);
676 }
677 
678 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
679 {
680     /* sign-extend truncated operands before multiplication */
681     gen_op_multiply(dst, src1, src2, 1);
682 }
683 
684 static void gen_op_udivx(TCGv dst, TCGv src1, TCGv src2)
685 {
686     gen_helper_udivx(dst, tcg_env, src1, src2);
687 }
688 
689 static void gen_op_sdivx(TCGv dst, TCGv src1, TCGv src2)
690 {
691     gen_helper_sdivx(dst, tcg_env, src1, src2);
692 }
693 
694 static void gen_op_udiv(TCGv dst, TCGv src1, TCGv src2)
695 {
696     gen_helper_udiv(dst, tcg_env, src1, src2);
697 }
698 
699 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
700 {
701     gen_helper_sdiv(dst, tcg_env, src1, src2);
702 }
703 
704 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
705 {
706     gen_helper_udiv_cc(dst, tcg_env, src1, src2);
707 }
708 
709 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
710 {
711     gen_helper_sdiv_cc(dst, tcg_env, src1, src2);
712 }
713 
714 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
715 {
716     gen_helper_taddcctv(dst, tcg_env, src1, src2);
717 }
718 
719 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
720 {
721     gen_helper_tsubcctv(dst, tcg_env, src1, src2);
722 }
723 
724 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
725 {
726     tcg_gen_ctpop_tl(dst, src2);
727 }
728 
729 #ifndef TARGET_SPARC64
730 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
731 {
732     g_assert_not_reached();
733 }
734 #endif
735 
736 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
737 {
738     gen_helper_array8(dst, src1, src2);
739     tcg_gen_shli_tl(dst, dst, 1);
740 }
741 
742 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
743 {
744     gen_helper_array8(dst, src1, src2);
745     tcg_gen_shli_tl(dst, dst, 2);
746 }
747 
748 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
749 {
750 #ifdef TARGET_SPARC64
751     gen_helper_fpack16(dst, cpu_gsr, src);
752 #else
753     g_assert_not_reached();
754 #endif
755 }
756 
757 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
758 {
759 #ifdef TARGET_SPARC64
760     gen_helper_fpackfix(dst, cpu_gsr, src);
761 #else
762     g_assert_not_reached();
763 #endif
764 }
765 
766 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
767 {
768 #ifdef TARGET_SPARC64
769     gen_helper_fpack32(dst, cpu_gsr, src1, src2);
770 #else
771     g_assert_not_reached();
772 #endif
773 }
774 
775 static void gen_op_faligndata(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
776 {
777 #ifdef TARGET_SPARC64
778     TCGv t1, t2, shift;
779 
780     t1 = tcg_temp_new();
781     t2 = tcg_temp_new();
782     shift = tcg_temp_new();
783 
784     tcg_gen_andi_tl(shift, cpu_gsr, 7);
785     tcg_gen_shli_tl(shift, shift, 3);
786     tcg_gen_shl_tl(t1, s1, shift);
787 
788     /*
789      * A shift of 64 does not produce 0 in TCG.  Divide this into a
790      * shift of (up to 63) followed by a constant shift of 1.
791      */
792     tcg_gen_xori_tl(shift, shift, 63);
793     tcg_gen_shr_tl(t2, s2, shift);
794     tcg_gen_shri_tl(t2, t2, 1);
795 
796     tcg_gen_or_tl(dst, t1, t2);
797 #else
798     g_assert_not_reached();
799 #endif
800 }
801 
802 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
803 {
804 #ifdef TARGET_SPARC64
805     gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
806 #else
807     g_assert_not_reached();
808 #endif
809 }
810 
811 // 1
812 static void gen_op_eval_ba(TCGv dst)
813 {
814     tcg_gen_movi_tl(dst, 1);
815 }
816 
817 // 0
818 static void gen_op_eval_bn(TCGv dst)
819 {
820     tcg_gen_movi_tl(dst, 0);
821 }
822 
823 /*
824   FPSR bit field FCC1 | FCC0:
825    0 =
826    1 <
827    2 >
828    3 unordered
829 */
830 static void gen_mov_reg_FCC0(TCGv reg, TCGv src,
831                                     unsigned int fcc_offset)
832 {
833     tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
834     tcg_gen_andi_tl(reg, reg, 0x1);
835 }
836 
837 static void gen_mov_reg_FCC1(TCGv reg, TCGv src, unsigned int fcc_offset)
838 {
839     tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
840     tcg_gen_andi_tl(reg, reg, 0x1);
841 }
842 
843 // !0: FCC0 | FCC1
844 static void gen_op_eval_fbne(TCGv dst, TCGv src, unsigned int fcc_offset)
845 {
846     TCGv t0 = tcg_temp_new();
847     gen_mov_reg_FCC0(dst, src, fcc_offset);
848     gen_mov_reg_FCC1(t0, src, fcc_offset);
849     tcg_gen_or_tl(dst, dst, t0);
850 }
851 
852 // 1 or 2: FCC0 ^ FCC1
853 static void gen_op_eval_fblg(TCGv dst, TCGv src, unsigned int fcc_offset)
854 {
855     TCGv t0 = tcg_temp_new();
856     gen_mov_reg_FCC0(dst, src, fcc_offset);
857     gen_mov_reg_FCC1(t0, src, fcc_offset);
858     tcg_gen_xor_tl(dst, dst, t0);
859 }
860 
861 // 1 or 3: FCC0
862 static void gen_op_eval_fbul(TCGv dst, TCGv src, unsigned int fcc_offset)
863 {
864     gen_mov_reg_FCC0(dst, src, fcc_offset);
865 }
866 
867 // 1: FCC0 & !FCC1
868 static void gen_op_eval_fbl(TCGv dst, TCGv src, unsigned int fcc_offset)
869 {
870     TCGv t0 = tcg_temp_new();
871     gen_mov_reg_FCC0(dst, src, fcc_offset);
872     gen_mov_reg_FCC1(t0, src, fcc_offset);
873     tcg_gen_andc_tl(dst, dst, t0);
874 }
875 
876 // 2 or 3: FCC1
877 static void gen_op_eval_fbug(TCGv dst, TCGv src, unsigned int fcc_offset)
878 {
879     gen_mov_reg_FCC1(dst, src, fcc_offset);
880 }
881 
882 // 2: !FCC0 & FCC1
883 static void gen_op_eval_fbg(TCGv dst, TCGv src, unsigned int fcc_offset)
884 {
885     TCGv t0 = tcg_temp_new();
886     gen_mov_reg_FCC0(dst, src, fcc_offset);
887     gen_mov_reg_FCC1(t0, src, fcc_offset);
888     tcg_gen_andc_tl(dst, t0, dst);
889 }
890 
891 // 3: FCC0 & FCC1
892 static void gen_op_eval_fbu(TCGv dst, TCGv src, unsigned int fcc_offset)
893 {
894     TCGv t0 = tcg_temp_new();
895     gen_mov_reg_FCC0(dst, src, fcc_offset);
896     gen_mov_reg_FCC1(t0, src, fcc_offset);
897     tcg_gen_and_tl(dst, dst, t0);
898 }
899 
900 // 0: !(FCC0 | FCC1)
901 static void gen_op_eval_fbe(TCGv dst, TCGv src, unsigned int fcc_offset)
902 {
903     TCGv t0 = tcg_temp_new();
904     gen_mov_reg_FCC0(dst, src, fcc_offset);
905     gen_mov_reg_FCC1(t0, src, fcc_offset);
906     tcg_gen_or_tl(dst, dst, t0);
907     tcg_gen_xori_tl(dst, dst, 0x1);
908 }
909 
910 // 0 or 3: !(FCC0 ^ FCC1)
911 static void gen_op_eval_fbue(TCGv dst, TCGv src, unsigned int fcc_offset)
912 {
913     TCGv t0 = tcg_temp_new();
914     gen_mov_reg_FCC0(dst, src, fcc_offset);
915     gen_mov_reg_FCC1(t0, src, fcc_offset);
916     tcg_gen_xor_tl(dst, dst, t0);
917     tcg_gen_xori_tl(dst, dst, 0x1);
918 }
919 
920 // 0 or 2: !FCC0
921 static void gen_op_eval_fbge(TCGv dst, TCGv src, unsigned int fcc_offset)
922 {
923     gen_mov_reg_FCC0(dst, src, fcc_offset);
924     tcg_gen_xori_tl(dst, dst, 0x1);
925 }
926 
927 // !1: !(FCC0 & !FCC1)
928 static void gen_op_eval_fbuge(TCGv dst, TCGv src, unsigned int fcc_offset)
929 {
930     TCGv t0 = tcg_temp_new();
931     gen_mov_reg_FCC0(dst, src, fcc_offset);
932     gen_mov_reg_FCC1(t0, src, fcc_offset);
933     tcg_gen_andc_tl(dst, dst, t0);
934     tcg_gen_xori_tl(dst, dst, 0x1);
935 }
936 
937 // 0 or 1: !FCC1
938 static void gen_op_eval_fble(TCGv dst, TCGv src, unsigned int fcc_offset)
939 {
940     gen_mov_reg_FCC1(dst, src, fcc_offset);
941     tcg_gen_xori_tl(dst, dst, 0x1);
942 }
943 
944 // !2: !(!FCC0 & FCC1)
945 static void gen_op_eval_fbule(TCGv dst, TCGv src, unsigned int fcc_offset)
946 {
947     TCGv t0 = tcg_temp_new();
948     gen_mov_reg_FCC0(dst, src, fcc_offset);
949     gen_mov_reg_FCC1(t0, src, fcc_offset);
950     tcg_gen_andc_tl(dst, t0, dst);
951     tcg_gen_xori_tl(dst, dst, 0x1);
952 }
953 
954 // !3: !(FCC0 & FCC1)
955 static void gen_op_eval_fbo(TCGv dst, TCGv src, unsigned int fcc_offset)
956 {
957     TCGv t0 = tcg_temp_new();
958     gen_mov_reg_FCC0(dst, src, fcc_offset);
959     gen_mov_reg_FCC1(t0, src, fcc_offset);
960     tcg_gen_and_tl(dst, dst, t0);
961     tcg_gen_xori_tl(dst, dst, 0x1);
962 }
963 
964 static void gen_branch2(DisasContext *dc, target_ulong pc1,
965                         target_ulong pc2, TCGv r_cond)
966 {
967     TCGLabel *l1 = gen_new_label();
968 
969     tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
970 
971     gen_goto_tb(dc, 0, pc1, pc1 + 4);
972 
973     gen_set_label(l1);
974     gen_goto_tb(dc, 1, pc2, pc2 + 4);
975 }
976 
977 static void gen_generic_branch(DisasContext *dc)
978 {
979     TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
980     TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
981     TCGv zero = tcg_constant_tl(0);
982 
983     tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
984 }
985 
986 /* call this function before using the condition register as it may
987    have been set for a jump */
988 static void flush_cond(DisasContext *dc)
989 {
990     if (dc->npc == JUMP_PC) {
991         gen_generic_branch(dc);
992         dc->npc = DYNAMIC_PC_LOOKUP;
993     }
994 }
995 
996 static void save_npc(DisasContext *dc)
997 {
998     if (dc->npc & 3) {
999         switch (dc->npc) {
1000         case JUMP_PC:
1001             gen_generic_branch(dc);
1002             dc->npc = DYNAMIC_PC_LOOKUP;
1003             break;
1004         case DYNAMIC_PC:
1005         case DYNAMIC_PC_LOOKUP:
1006             break;
1007         default:
1008             g_assert_not_reached();
1009         }
1010     } else {
1011         tcg_gen_movi_tl(cpu_npc, dc->npc);
1012     }
1013 }
1014 
1015 static void update_psr(DisasContext *dc)
1016 {
1017     if (dc->cc_op != CC_OP_FLAGS) {
1018         dc->cc_op = CC_OP_FLAGS;
1019         gen_helper_compute_psr(tcg_env);
1020     }
1021 }
1022 
1023 static void save_state(DisasContext *dc)
1024 {
1025     tcg_gen_movi_tl(cpu_pc, dc->pc);
1026     save_npc(dc);
1027 }
1028 
1029 static void gen_exception(DisasContext *dc, int which)
1030 {
1031     save_state(dc);
1032     gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1033     dc->base.is_jmp = DISAS_NORETURN;
1034 }
1035 
1036 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1037 {
1038     DisasDelayException *e = g_new0(DisasDelayException, 1);
1039 
1040     e->next = dc->delay_excp_list;
1041     dc->delay_excp_list = e;
1042 
1043     e->lab = gen_new_label();
1044     e->excp = excp;
1045     e->pc = dc->pc;
1046     /* Caller must have used flush_cond before branch. */
1047     assert(e->npc != JUMP_PC);
1048     e->npc = dc->npc;
1049 
1050     return e->lab;
1051 }
1052 
1053 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1054 {
1055     return delay_exceptionv(dc, tcg_constant_i32(excp));
1056 }
1057 
1058 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1059 {
1060     TCGv t = tcg_temp_new();
1061     TCGLabel *lab;
1062 
1063     tcg_gen_andi_tl(t, addr, mask);
1064 
1065     flush_cond(dc);
1066     lab = delay_exception(dc, TT_UNALIGNED);
1067     tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1068 }
1069 
1070 static void gen_mov_pc_npc(DisasContext *dc)
1071 {
1072     if (dc->npc & 3) {
1073         switch (dc->npc) {
1074         case JUMP_PC:
1075             gen_generic_branch(dc);
1076             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1077             dc->pc = DYNAMIC_PC_LOOKUP;
1078             break;
1079         case DYNAMIC_PC:
1080         case DYNAMIC_PC_LOOKUP:
1081             tcg_gen_mov_tl(cpu_pc, cpu_npc);
1082             dc->pc = dc->npc;
1083             break;
1084         default:
1085             g_assert_not_reached();
1086         }
1087     } else {
1088         dc->pc = dc->npc;
1089     }
1090 }
1091 
1092 static void gen_op_next_insn(void)
1093 {
1094     tcg_gen_mov_tl(cpu_pc, cpu_npc);
1095     tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
1096 }
1097 
1098 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1099                         DisasContext *dc)
1100 {
1101     static int subcc_cond[16] = {
1102         TCG_COND_NEVER,
1103         TCG_COND_EQ,
1104         TCG_COND_LE,
1105         TCG_COND_LT,
1106         TCG_COND_LEU,
1107         TCG_COND_LTU,
1108         -1, /* neg */
1109         -1, /* overflow */
1110         TCG_COND_ALWAYS,
1111         TCG_COND_NE,
1112         TCG_COND_GT,
1113         TCG_COND_GE,
1114         TCG_COND_GTU,
1115         TCG_COND_GEU,
1116         -1, /* pos */
1117         -1, /* no overflow */
1118     };
1119 
1120     TCGv t1, t2;
1121 
1122     cmp->is_bool = false;
1123 
1124     switch (dc->cc_op) {
1125     case CC_OP_SUB:
1126         switch (cond) {
1127         case 6:  /* neg */
1128         case 14: /* pos */
1129             cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
1130             cmp->c2 = tcg_constant_tl(0);
1131             if (TARGET_LONG_BITS == 32 || xcc) {
1132                 cmp->c1 = cpu_cc_dst;
1133             } else {
1134                 cmp->c1 = t1 = tcg_temp_new();
1135                 tcg_gen_ext32s_tl(t1, cpu_cc_dst);
1136             }
1137             return;
1138 
1139         case 7: /* overflow */
1140         case 15: /* !overflow */
1141             break;
1142 
1143         default:
1144             cmp->cond = subcc_cond[cond];
1145             if (TARGET_LONG_BITS == 32 || xcc) {
1146                 cmp->c1 = cpu_cc_src;
1147                 cmp->c2 = cpu_cc_src2;
1148             } else {
1149                 /* Note that sign-extension works for unsigned compares as
1150                    long as both operands are sign-extended.  */
1151                 cmp->c1 = t1 = tcg_temp_new();
1152                 tcg_gen_ext32s_tl(t1, cpu_cc_src);
1153                 cmp->c2 = t2 = tcg_temp_new();
1154                 tcg_gen_ext32s_tl(t2, cpu_cc_src2);
1155             }
1156             return;
1157         }
1158         break;
1159 
1160     default:
1161         gen_helper_compute_psr(tcg_env);
1162         dc->cc_op = CC_OP_FLAGS;
1163         break;
1164 
1165     case CC_OP_FLAGS:
1166         break;
1167     }
1168 
1169     cmp->c1 = t1 = tcg_temp_new();
1170     cmp->c2 = tcg_constant_tl(0);
1171 
1172     switch (cond & 7) {
1173     case 0x0: /* never */
1174         cmp->cond = TCG_COND_NEVER;
1175         cmp->c1 = cmp->c2;
1176         break;
1177 
1178     case 0x1: /* eq: Z */
1179         cmp->cond = TCG_COND_EQ;
1180         if (TARGET_LONG_BITS == 32 || xcc) {
1181             tcg_gen_mov_tl(t1, cpu_cc_Z);
1182         } else {
1183             tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1184         }
1185         break;
1186 
1187     case 0x2: /* le: Z | (N ^ V) */
1188         /*
1189          * Simplify:
1190          *   cc_Z || (N ^ V) < 0        NE
1191          *   cc_Z && !((N ^ V) < 0)     EQ
1192          *   cc_Z & ~((N ^ V) >> TLB)   EQ
1193          */
1194         cmp->cond = TCG_COND_EQ;
1195         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1196         tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1197         tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1198         if (TARGET_LONG_BITS == 64 && !xcc) {
1199             tcg_gen_ext32u_tl(t1, t1);
1200         }
1201         break;
1202 
1203     case 0x3: /* lt: N ^ V */
1204         cmp->cond = TCG_COND_LT;
1205         tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1206         if (TARGET_LONG_BITS == 64 && !xcc) {
1207             tcg_gen_ext32s_tl(t1, t1);
1208         }
1209         break;
1210 
1211     case 0x4: /* leu: Z | C */
1212         /*
1213          * Simplify:
1214          *   cc_Z == 0 || cc_C != 0     NE
1215          *   cc_Z != 0 && cc_C == 0     EQ
1216          *   cc_Z & (cc_C ? 0 : -1)     EQ
1217          *   cc_Z & (cc_C - 1)          EQ
1218          */
1219         cmp->cond = TCG_COND_EQ;
1220         if (TARGET_LONG_BITS == 32 || xcc) {
1221             tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1222             tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1223         } else {
1224             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1225             tcg_gen_subi_tl(t1, t1, 1);
1226             tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1227             tcg_gen_ext32u_tl(t1, t1);
1228         }
1229         break;
1230 
1231     case 0x5: /* ltu: C */
1232         cmp->cond = TCG_COND_NE;
1233         cmp->is_bool = true;
1234         if (TARGET_LONG_BITS == 32 || xcc) {
1235             tcg_gen_mov_tl(t1, cpu_cc_C);
1236         } else {
1237             tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1238         }
1239         break;
1240 
1241     case 0x6: /* neg: N */
1242         cmp->cond = TCG_COND_LT;
1243         if (TARGET_LONG_BITS == 32 || xcc) {
1244             tcg_gen_mov_tl(t1, cpu_cc_N);
1245         } else {
1246             tcg_gen_ext32s_tl(t1, cpu_cc_N);
1247         }
1248         break;
1249 
1250     case 0x7: /* vs: V */
1251         cmp->cond = TCG_COND_LT;
1252         if (TARGET_LONG_BITS == 32 || xcc) {
1253             tcg_gen_mov_tl(t1, cpu_cc_V);
1254         } else {
1255             tcg_gen_ext32s_tl(t1, cpu_cc_V);
1256         }
1257         break;
1258     }
1259     if (cond & 8) {
1260         cmp->cond = tcg_invert_cond(cmp->cond);
1261         cmp->is_bool = false;
1262     }
1263 }
1264 
1265 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1266 {
1267     unsigned int offset;
1268     TCGv r_dst;
1269 
1270     /* For now we still generate a straight boolean result.  */
1271     cmp->cond = TCG_COND_NE;
1272     cmp->is_bool = true;
1273     cmp->c1 = r_dst = tcg_temp_new();
1274     cmp->c2 = tcg_constant_tl(0);
1275 
1276     switch (cc) {
1277     default:
1278     case 0x0:
1279         offset = 0;
1280         break;
1281     case 0x1:
1282         offset = 32 - 10;
1283         break;
1284     case 0x2:
1285         offset = 34 - 10;
1286         break;
1287     case 0x3:
1288         offset = 36 - 10;
1289         break;
1290     }
1291 
1292     switch (cond) {
1293     case 0x0:
1294         gen_op_eval_bn(r_dst);
1295         break;
1296     case 0x1:
1297         gen_op_eval_fbne(r_dst, cpu_fsr, offset);
1298         break;
1299     case 0x2:
1300         gen_op_eval_fblg(r_dst, cpu_fsr, offset);
1301         break;
1302     case 0x3:
1303         gen_op_eval_fbul(r_dst, cpu_fsr, offset);
1304         break;
1305     case 0x4:
1306         gen_op_eval_fbl(r_dst, cpu_fsr, offset);
1307         break;
1308     case 0x5:
1309         gen_op_eval_fbug(r_dst, cpu_fsr, offset);
1310         break;
1311     case 0x6:
1312         gen_op_eval_fbg(r_dst, cpu_fsr, offset);
1313         break;
1314     case 0x7:
1315         gen_op_eval_fbu(r_dst, cpu_fsr, offset);
1316         break;
1317     case 0x8:
1318         gen_op_eval_ba(r_dst);
1319         break;
1320     case 0x9:
1321         gen_op_eval_fbe(r_dst, cpu_fsr, offset);
1322         break;
1323     case 0xa:
1324         gen_op_eval_fbue(r_dst, cpu_fsr, offset);
1325         break;
1326     case 0xb:
1327         gen_op_eval_fbge(r_dst, cpu_fsr, offset);
1328         break;
1329     case 0xc:
1330         gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
1331         break;
1332     case 0xd:
1333         gen_op_eval_fble(r_dst, cpu_fsr, offset);
1334         break;
1335     case 0xe:
1336         gen_op_eval_fbule(r_dst, cpu_fsr, offset);
1337         break;
1338     case 0xf:
1339         gen_op_eval_fbo(r_dst, cpu_fsr, offset);
1340         break;
1341     }
1342 }
1343 
1344 // Inverted logic
1345 static const TCGCond gen_tcg_cond_reg[8] = {
1346     TCG_COND_NEVER,  /* reserved */
1347     TCG_COND_NE,
1348     TCG_COND_GT,
1349     TCG_COND_GE,
1350     TCG_COND_NEVER,  /* reserved */
1351     TCG_COND_EQ,
1352     TCG_COND_LE,
1353     TCG_COND_LT,
1354 };
1355 
1356 static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1357 {
1358     cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
1359     cmp->is_bool = false;
1360     cmp->c1 = r_src;
1361     cmp->c2 = tcg_constant_tl(0);
1362 }
1363 
1364 static void gen_op_clear_ieee_excp_and_FTT(void)
1365 {
1366     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
1367 }
1368 
1369 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1370 {
1371     gen_op_clear_ieee_excp_and_FTT();
1372     tcg_gen_mov_i32(dst, src);
1373 }
1374 
1375 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1376 {
1377     gen_op_clear_ieee_excp_and_FTT();
1378     gen_helper_fnegs(dst, src);
1379 }
1380 
1381 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1382 {
1383     gen_op_clear_ieee_excp_and_FTT();
1384     gen_helper_fabss(dst, src);
1385 }
1386 
1387 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1388 {
1389     gen_op_clear_ieee_excp_and_FTT();
1390     tcg_gen_mov_i64(dst, src);
1391 }
1392 
1393 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1394 {
1395     gen_op_clear_ieee_excp_and_FTT();
1396     gen_helper_fnegd(dst, src);
1397 }
1398 
1399 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1400 {
1401     gen_op_clear_ieee_excp_and_FTT();
1402     gen_helper_fabsd(dst, src);
1403 }
1404 
1405 #ifdef TARGET_SPARC64
1406 static void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1407 {
1408     switch (fccno) {
1409     case 0:
1410         gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1411         break;
1412     case 1:
1413         gen_helper_fcmps_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1414         break;
1415     case 2:
1416         gen_helper_fcmps_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1417         break;
1418     case 3:
1419         gen_helper_fcmps_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1420         break;
1421     }
1422 }
1423 
1424 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1425 {
1426     switch (fccno) {
1427     case 0:
1428         gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1429         break;
1430     case 1:
1431         gen_helper_fcmpd_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1432         break;
1433     case 2:
1434         gen_helper_fcmpd_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1435         break;
1436     case 3:
1437         gen_helper_fcmpd_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1438         break;
1439     }
1440 }
1441 
1442 static void gen_op_fcmpq(int fccno)
1443 {
1444     switch (fccno) {
1445     case 0:
1446         gen_helper_fcmpq(cpu_fsr, tcg_env);
1447         break;
1448     case 1:
1449         gen_helper_fcmpq_fcc1(cpu_fsr, tcg_env);
1450         break;
1451     case 2:
1452         gen_helper_fcmpq_fcc2(cpu_fsr, tcg_env);
1453         break;
1454     case 3:
1455         gen_helper_fcmpq_fcc3(cpu_fsr, tcg_env);
1456         break;
1457     }
1458 }
1459 
1460 static void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
1461 {
1462     switch (fccno) {
1463     case 0:
1464         gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1465         break;
1466     case 1:
1467         gen_helper_fcmpes_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1468         break;
1469     case 2:
1470         gen_helper_fcmpes_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1471         break;
1472     case 3:
1473         gen_helper_fcmpes_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1474         break;
1475     }
1476 }
1477 
1478 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1479 {
1480     switch (fccno) {
1481     case 0:
1482         gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1483         break;
1484     case 1:
1485         gen_helper_fcmped_fcc1(cpu_fsr, tcg_env, r_rs1, r_rs2);
1486         break;
1487     case 2:
1488         gen_helper_fcmped_fcc2(cpu_fsr, tcg_env, r_rs1, r_rs2);
1489         break;
1490     case 3:
1491         gen_helper_fcmped_fcc3(cpu_fsr, tcg_env, r_rs1, r_rs2);
1492         break;
1493     }
1494 }
1495 
1496 static void gen_op_fcmpeq(int fccno)
1497 {
1498     switch (fccno) {
1499     case 0:
1500         gen_helper_fcmpeq(cpu_fsr, tcg_env);
1501         break;
1502     case 1:
1503         gen_helper_fcmpeq_fcc1(cpu_fsr, tcg_env);
1504         break;
1505     case 2:
1506         gen_helper_fcmpeq_fcc2(cpu_fsr, tcg_env);
1507         break;
1508     case 3:
1509         gen_helper_fcmpeq_fcc3(cpu_fsr, tcg_env);
1510         break;
1511     }
1512 }
1513 
1514 #else
1515 
1516 static void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
1517 {
1518     gen_helper_fcmps(cpu_fsr, tcg_env, r_rs1, r_rs2);
1519 }
1520 
1521 static void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1522 {
1523     gen_helper_fcmpd(cpu_fsr, tcg_env, r_rs1, r_rs2);
1524 }
1525 
1526 static void gen_op_fcmpq(int fccno)
1527 {
1528     gen_helper_fcmpq(cpu_fsr, tcg_env);
1529 }
1530 
1531 static void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
1532 {
1533     gen_helper_fcmpes(cpu_fsr, tcg_env, r_rs1, r_rs2);
1534 }
1535 
1536 static void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
1537 {
1538     gen_helper_fcmped(cpu_fsr, tcg_env, r_rs1, r_rs2);
1539 }
1540 
1541 static void gen_op_fcmpeq(int fccno)
1542 {
1543     gen_helper_fcmpeq(cpu_fsr, tcg_env);
1544 }
1545 #endif
1546 
1547 static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
1548 {
1549     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
1550     tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
1551     gen_exception(dc, TT_FP_EXCP);
1552 }
1553 
1554 static int gen_trap_ifnofpu(DisasContext *dc)
1555 {
1556 #if !defined(CONFIG_USER_ONLY)
1557     if (!dc->fpu_enabled) {
1558         gen_exception(dc, TT_NFPU_INSN);
1559         return 1;
1560     }
1561 #endif
1562     return 0;
1563 }
1564 
1565 /* asi moves */
1566 typedef enum {
1567     GET_ASI_HELPER,
1568     GET_ASI_EXCP,
1569     GET_ASI_DIRECT,
1570     GET_ASI_DTWINX,
1571     GET_ASI_BLOCK,
1572     GET_ASI_SHORT,
1573     GET_ASI_BCOPY,
1574     GET_ASI_BFILL,
1575 } ASIType;
1576 
1577 typedef struct {
1578     ASIType type;
1579     int asi;
1580     int mem_idx;
1581     MemOp memop;
1582 } DisasASI;
1583 
1584 /*
1585  * Build DisasASI.
1586  * For asi == -1, treat as non-asi.
1587  * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1588  */
1589 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1590 {
1591     ASIType type = GET_ASI_HELPER;
1592     int mem_idx = dc->mem_idx;
1593 
1594     if (asi == -1) {
1595         /* Artificial "non-asi" case. */
1596         type = GET_ASI_DIRECT;
1597         goto done;
1598     }
1599 
1600 #ifndef TARGET_SPARC64
1601     /* Before v9, all asis are immediate and privileged.  */
1602     if (asi < 0) {
1603         gen_exception(dc, TT_ILL_INSN);
1604         type = GET_ASI_EXCP;
1605     } else if (supervisor(dc)
1606                /* Note that LEON accepts ASI_USERDATA in user mode, for
1607                   use with CASA.  Also note that previous versions of
1608                   QEMU allowed (and old versions of gcc emitted) ASI_P
1609                   for LEON, which is incorrect.  */
1610                || (asi == ASI_USERDATA
1611                    && (dc->def->features & CPU_FEATURE_CASA))) {
1612         switch (asi) {
1613         case ASI_USERDATA:   /* User data access */
1614             mem_idx = MMU_USER_IDX;
1615             type = GET_ASI_DIRECT;
1616             break;
1617         case ASI_KERNELDATA: /* Supervisor data access */
1618             mem_idx = MMU_KERNEL_IDX;
1619             type = GET_ASI_DIRECT;
1620             break;
1621         case ASI_M_BYPASS:    /* MMU passthrough */
1622         case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1623             mem_idx = MMU_PHYS_IDX;
1624             type = GET_ASI_DIRECT;
1625             break;
1626         case ASI_M_BCOPY: /* Block copy, sta access */
1627             mem_idx = MMU_KERNEL_IDX;
1628             type = GET_ASI_BCOPY;
1629             break;
1630         case ASI_M_BFILL: /* Block fill, stda access */
1631             mem_idx = MMU_KERNEL_IDX;
1632             type = GET_ASI_BFILL;
1633             break;
1634         }
1635 
1636         /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1637          * permissions check in get_physical_address(..).
1638          */
1639         mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1640     } else {
1641         gen_exception(dc, TT_PRIV_INSN);
1642         type = GET_ASI_EXCP;
1643     }
1644 #else
1645     if (asi < 0) {
1646         asi = dc->asi;
1647     }
1648     /* With v9, all asis below 0x80 are privileged.  */
1649     /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1650        down that bit into DisasContext.  For the moment that's ok,
1651        since the direct implementations below doesn't have any ASIs
1652        in the restricted [0x30, 0x7f] range, and the check will be
1653        done properly in the helper.  */
1654     if (!supervisor(dc) && asi < 0x80) {
1655         gen_exception(dc, TT_PRIV_ACT);
1656         type = GET_ASI_EXCP;
1657     } else {
1658         switch (asi) {
1659         case ASI_REAL:      /* Bypass */
1660         case ASI_REAL_IO:   /* Bypass, non-cacheable */
1661         case ASI_REAL_L:    /* Bypass LE */
1662         case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1663         case ASI_TWINX_REAL:   /* Real address, twinx */
1664         case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1665         case ASI_QUAD_LDD_PHYS:
1666         case ASI_QUAD_LDD_PHYS_L:
1667             mem_idx = MMU_PHYS_IDX;
1668             break;
1669         case ASI_N:  /* Nucleus */
1670         case ASI_NL: /* Nucleus LE */
1671         case ASI_TWINX_N:
1672         case ASI_TWINX_NL:
1673         case ASI_NUCLEUS_QUAD_LDD:
1674         case ASI_NUCLEUS_QUAD_LDD_L:
1675             if (hypervisor(dc)) {
1676                 mem_idx = MMU_PHYS_IDX;
1677             } else {
1678                 mem_idx = MMU_NUCLEUS_IDX;
1679             }
1680             break;
1681         case ASI_AIUP:  /* As if user primary */
1682         case ASI_AIUPL: /* As if user primary LE */
1683         case ASI_TWINX_AIUP:
1684         case ASI_TWINX_AIUP_L:
1685         case ASI_BLK_AIUP_4V:
1686         case ASI_BLK_AIUP_L_4V:
1687         case ASI_BLK_AIUP:
1688         case ASI_BLK_AIUPL:
1689             mem_idx = MMU_USER_IDX;
1690             break;
1691         case ASI_AIUS:  /* As if user secondary */
1692         case ASI_AIUSL: /* As if user secondary LE */
1693         case ASI_TWINX_AIUS:
1694         case ASI_TWINX_AIUS_L:
1695         case ASI_BLK_AIUS_4V:
1696         case ASI_BLK_AIUS_L_4V:
1697         case ASI_BLK_AIUS:
1698         case ASI_BLK_AIUSL:
1699             mem_idx = MMU_USER_SECONDARY_IDX;
1700             break;
1701         case ASI_S:  /* Secondary */
1702         case ASI_SL: /* Secondary LE */
1703         case ASI_TWINX_S:
1704         case ASI_TWINX_SL:
1705         case ASI_BLK_COMMIT_S:
1706         case ASI_BLK_S:
1707         case ASI_BLK_SL:
1708         case ASI_FL8_S:
1709         case ASI_FL8_SL:
1710         case ASI_FL16_S:
1711         case ASI_FL16_SL:
1712             if (mem_idx == MMU_USER_IDX) {
1713                 mem_idx = MMU_USER_SECONDARY_IDX;
1714             } else if (mem_idx == MMU_KERNEL_IDX) {
1715                 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1716             }
1717             break;
1718         case ASI_P:  /* Primary */
1719         case ASI_PL: /* Primary LE */
1720         case ASI_TWINX_P:
1721         case ASI_TWINX_PL:
1722         case ASI_BLK_COMMIT_P:
1723         case ASI_BLK_P:
1724         case ASI_BLK_PL:
1725         case ASI_FL8_P:
1726         case ASI_FL8_PL:
1727         case ASI_FL16_P:
1728         case ASI_FL16_PL:
1729             break;
1730         }
1731         switch (asi) {
1732         case ASI_REAL:
1733         case ASI_REAL_IO:
1734         case ASI_REAL_L:
1735         case ASI_REAL_IO_L:
1736         case ASI_N:
1737         case ASI_NL:
1738         case ASI_AIUP:
1739         case ASI_AIUPL:
1740         case ASI_AIUS:
1741         case ASI_AIUSL:
1742         case ASI_S:
1743         case ASI_SL:
1744         case ASI_P:
1745         case ASI_PL:
1746             type = GET_ASI_DIRECT;
1747             break;
1748         case ASI_TWINX_REAL:
1749         case ASI_TWINX_REAL_L:
1750         case ASI_TWINX_N:
1751         case ASI_TWINX_NL:
1752         case ASI_TWINX_AIUP:
1753         case ASI_TWINX_AIUP_L:
1754         case ASI_TWINX_AIUS:
1755         case ASI_TWINX_AIUS_L:
1756         case ASI_TWINX_P:
1757         case ASI_TWINX_PL:
1758         case ASI_TWINX_S:
1759         case ASI_TWINX_SL:
1760         case ASI_QUAD_LDD_PHYS:
1761         case ASI_QUAD_LDD_PHYS_L:
1762         case ASI_NUCLEUS_QUAD_LDD:
1763         case ASI_NUCLEUS_QUAD_LDD_L:
1764             type = GET_ASI_DTWINX;
1765             break;
1766         case ASI_BLK_COMMIT_P:
1767         case ASI_BLK_COMMIT_S:
1768         case ASI_BLK_AIUP_4V:
1769         case ASI_BLK_AIUP_L_4V:
1770         case ASI_BLK_AIUP:
1771         case ASI_BLK_AIUPL:
1772         case ASI_BLK_AIUS_4V:
1773         case ASI_BLK_AIUS_L_4V:
1774         case ASI_BLK_AIUS:
1775         case ASI_BLK_AIUSL:
1776         case ASI_BLK_S:
1777         case ASI_BLK_SL:
1778         case ASI_BLK_P:
1779         case ASI_BLK_PL:
1780             type = GET_ASI_BLOCK;
1781             break;
1782         case ASI_FL8_S:
1783         case ASI_FL8_SL:
1784         case ASI_FL8_P:
1785         case ASI_FL8_PL:
1786             memop = MO_UB;
1787             type = GET_ASI_SHORT;
1788             break;
1789         case ASI_FL16_S:
1790         case ASI_FL16_SL:
1791         case ASI_FL16_P:
1792         case ASI_FL16_PL:
1793             memop = MO_TEUW;
1794             type = GET_ASI_SHORT;
1795             break;
1796         }
1797         /* The little-endian asis all have bit 3 set.  */
1798         if (asi & 8) {
1799             memop ^= MO_BSWAP;
1800         }
1801     }
1802 #endif
1803 
1804  done:
1805     return (DisasASI){ type, asi, mem_idx, memop };
1806 }
1807 
1808 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1809 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1810                               TCGv_i32 asi, TCGv_i32 mop)
1811 {
1812     g_assert_not_reached();
1813 }
1814 
1815 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1816                               TCGv_i32 asi, TCGv_i32 mop)
1817 {
1818     g_assert_not_reached();
1819 }
1820 #endif
1821 
1822 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1823 {
1824     switch (da->type) {
1825     case GET_ASI_EXCP:
1826         break;
1827     case GET_ASI_DTWINX: /* Reserved for ldda.  */
1828         gen_exception(dc, TT_ILL_INSN);
1829         break;
1830     case GET_ASI_DIRECT:
1831         tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1832         break;
1833     default:
1834         {
1835             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1836             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1837 
1838             save_state(dc);
1839 #ifdef TARGET_SPARC64
1840             gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1841 #else
1842             {
1843                 TCGv_i64 t64 = tcg_temp_new_i64();
1844                 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1845                 tcg_gen_trunc_i64_tl(dst, t64);
1846             }
1847 #endif
1848         }
1849         break;
1850     }
1851 }
1852 
1853 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1854 {
1855     switch (da->type) {
1856     case GET_ASI_EXCP:
1857         break;
1858 
1859     case GET_ASI_DTWINX: /* Reserved for stda.  */
1860         if (TARGET_LONG_BITS == 32) {
1861             gen_exception(dc, TT_ILL_INSN);
1862             break;
1863         } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1864             /* Pre OpenSPARC CPUs don't have these */
1865             gen_exception(dc, TT_ILL_INSN);
1866             break;
1867         }
1868         /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1869         /* fall through */
1870 
1871     case GET_ASI_DIRECT:
1872         tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1873         break;
1874 
1875     case GET_ASI_BCOPY:
1876         assert(TARGET_LONG_BITS == 32);
1877         /* Copy 32 bytes from the address in SRC to ADDR.  */
1878         /* ??? The original qemu code suggests 4-byte alignment, dropping
1879            the low bits, but the only place I can see this used is in the
1880            Linux kernel with 32 byte alignment, which would make more sense
1881            as a cacheline-style operation.  */
1882         {
1883             TCGv saddr = tcg_temp_new();
1884             TCGv daddr = tcg_temp_new();
1885             TCGv four = tcg_constant_tl(4);
1886             TCGv_i32 tmp = tcg_temp_new_i32();
1887             int i;
1888 
1889             tcg_gen_andi_tl(saddr, src, -4);
1890             tcg_gen_andi_tl(daddr, addr, -4);
1891             for (i = 0; i < 32; i += 4) {
1892                 /* Since the loads and stores are paired, allow the
1893                    copy to happen in the host endianness.  */
1894                 tcg_gen_qemu_ld_i32(tmp, saddr, da->mem_idx, MO_UL);
1895                 tcg_gen_qemu_st_i32(tmp, daddr, da->mem_idx, MO_UL);
1896                 tcg_gen_add_tl(saddr, saddr, four);
1897                 tcg_gen_add_tl(daddr, daddr, four);
1898             }
1899         }
1900         break;
1901 
1902     default:
1903         {
1904             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1905             TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1906 
1907             save_state(dc);
1908 #ifdef TARGET_SPARC64
1909             gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1910 #else
1911             {
1912                 TCGv_i64 t64 = tcg_temp_new_i64();
1913                 tcg_gen_extu_tl_i64(t64, src);
1914                 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1915             }
1916 #endif
1917 
1918             /* A write to a TLB register may alter page maps.  End the TB. */
1919             dc->npc = DYNAMIC_PC;
1920         }
1921         break;
1922     }
1923 }
1924 
1925 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1926                          TCGv dst, TCGv src, TCGv addr)
1927 {
1928     switch (da->type) {
1929     case GET_ASI_EXCP:
1930         break;
1931     case GET_ASI_DIRECT:
1932         tcg_gen_atomic_xchg_tl(dst, addr, src,
1933                                da->mem_idx, da->memop | MO_ALIGN);
1934         break;
1935     default:
1936         /* ??? Should be DAE_invalid_asi.  */
1937         gen_exception(dc, TT_DATA_ACCESS);
1938         break;
1939     }
1940 }
1941 
1942 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1943                         TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1944 {
1945     switch (da->type) {
1946     case GET_ASI_EXCP:
1947         return;
1948     case GET_ASI_DIRECT:
1949         tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1950                                   da->mem_idx, da->memop | MO_ALIGN);
1951         break;
1952     default:
1953         /* ??? Should be DAE_invalid_asi.  */
1954         gen_exception(dc, TT_DATA_ACCESS);
1955         break;
1956     }
1957 }
1958 
1959 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1960 {
1961     switch (da->type) {
1962     case GET_ASI_EXCP:
1963         break;
1964     case GET_ASI_DIRECT:
1965         tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1966                                da->mem_idx, MO_UB);
1967         break;
1968     default:
1969         /* ??? In theory, this should be raise DAE_invalid_asi.
1970            But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1.  */
1971         if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1972             gen_helper_exit_atomic(tcg_env);
1973         } else {
1974             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1975             TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1976             TCGv_i64 s64, t64;
1977 
1978             save_state(dc);
1979             t64 = tcg_temp_new_i64();
1980             gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1981 
1982             s64 = tcg_constant_i64(0xff);
1983             gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1984 
1985             tcg_gen_trunc_i64_tl(dst, t64);
1986 
1987             /* End the TB.  */
1988             dc->npc = DYNAMIC_PC;
1989         }
1990         break;
1991     }
1992 }
1993 
1994 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1995                         TCGv addr, int rd)
1996 {
1997     MemOp memop = da->memop;
1998     MemOp size = memop & MO_SIZE;
1999     TCGv_i32 d32;
2000     TCGv_i64 d64;
2001     TCGv addr_tmp;
2002 
2003     /* TODO: Use 128-bit load/store below. */
2004     if (size == MO_128) {
2005         memop = (memop & ~MO_SIZE) | MO_64;
2006     }
2007 
2008     switch (da->type) {
2009     case GET_ASI_EXCP:
2010         break;
2011 
2012     case GET_ASI_DIRECT:
2013         memop |= MO_ALIGN_4;
2014         switch (size) {
2015         case MO_32:
2016             d32 = gen_dest_fpr_F(dc);
2017             tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2018             gen_store_fpr_F(dc, rd, d32);
2019             break;
2020 
2021         case MO_64:
2022             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
2023             break;
2024 
2025         case MO_128:
2026             d64 = tcg_temp_new_i64();
2027             tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2028             addr_tmp = tcg_temp_new();
2029             tcg_gen_addi_tl(addr_tmp, addr, 8);
2030             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2031             tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2032             break;
2033         default:
2034             g_assert_not_reached();
2035         }
2036         break;
2037 
2038     case GET_ASI_BLOCK:
2039         /* Valid for lddfa on aligned registers only.  */
2040         if (orig_size == MO_64 && (rd & 7) == 0) {
2041             /* The first operation checks required alignment.  */
2042             addr_tmp = tcg_temp_new();
2043             for (int i = 0; ; ++i) {
2044                 tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2045                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2046                 if (i == 7) {
2047                     break;
2048                 }
2049                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2050                 addr = addr_tmp;
2051             }
2052         } else {
2053             gen_exception(dc, TT_ILL_INSN);
2054         }
2055         break;
2056 
2057     case GET_ASI_SHORT:
2058         /* Valid for lddfa only.  */
2059         if (orig_size == MO_64) {
2060             tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2061                                 memop | MO_ALIGN);
2062         } else {
2063             gen_exception(dc, TT_ILL_INSN);
2064         }
2065         break;
2066 
2067     default:
2068         {
2069             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2070             TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2071 
2072             save_state(dc);
2073             /* According to the table in the UA2011 manual, the only
2074                other asis that are valid for ldfa/lddfa/ldqfa are
2075                the NO_FAULT asis.  We still need a helper for these,
2076                but we can just use the integer asi helper for them.  */
2077             switch (size) {
2078             case MO_32:
2079                 d64 = tcg_temp_new_i64();
2080                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2081                 d32 = gen_dest_fpr_F(dc);
2082                 tcg_gen_extrl_i64_i32(d32, d64);
2083                 gen_store_fpr_F(dc, rd, d32);
2084                 break;
2085             case MO_64:
2086                 gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
2087                                   r_asi, r_mop);
2088                 break;
2089             case MO_128:
2090                 d64 = tcg_temp_new_i64();
2091                 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2092                 addr_tmp = tcg_temp_new();
2093                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2094                 gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr_tmp,
2095                                   r_asi, r_mop);
2096                 tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
2097                 break;
2098             default:
2099                 g_assert_not_reached();
2100             }
2101         }
2102         break;
2103     }
2104 }
2105 
2106 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2107                         TCGv addr, int rd)
2108 {
2109     MemOp memop = da->memop;
2110     MemOp size = memop & MO_SIZE;
2111     TCGv_i32 d32;
2112     TCGv addr_tmp;
2113 
2114     /* TODO: Use 128-bit load/store below. */
2115     if (size == MO_128) {
2116         memop = (memop & ~MO_SIZE) | MO_64;
2117     }
2118 
2119     switch (da->type) {
2120     case GET_ASI_EXCP:
2121         break;
2122 
2123     case GET_ASI_DIRECT:
2124         memop |= MO_ALIGN_4;
2125         switch (size) {
2126         case MO_32:
2127             d32 = gen_load_fpr_F(dc, rd);
2128             tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2129             break;
2130         case MO_64:
2131             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2132                                 memop | MO_ALIGN_4);
2133             break;
2134         case MO_128:
2135             /* Only 4-byte alignment required.  However, it is legal for the
2136                cpu to signal the alignment fault, and the OS trap handler is
2137                required to fix it up.  Requiring 16-byte alignment here avoids
2138                having to probe the second page before performing the first
2139                write.  */
2140             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2141                                 memop | MO_ALIGN_16);
2142             addr_tmp = tcg_temp_new();
2143             tcg_gen_addi_tl(addr_tmp, addr, 8);
2144             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr_tmp, da->mem_idx, memop);
2145             break;
2146         default:
2147             g_assert_not_reached();
2148         }
2149         break;
2150 
2151     case GET_ASI_BLOCK:
2152         /* Valid for stdfa on aligned registers only.  */
2153         if (orig_size == MO_64 && (rd & 7) == 0) {
2154             /* The first operation checks required alignment.  */
2155             addr_tmp = tcg_temp_new();
2156             for (int i = 0; ; ++i) {
2157                 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
2158                                     memop | (i == 0 ? MO_ALIGN_64 : 0));
2159                 if (i == 7) {
2160                     break;
2161                 }
2162                 tcg_gen_addi_tl(addr_tmp, addr, 8);
2163                 addr = addr_tmp;
2164             }
2165         } else {
2166             gen_exception(dc, TT_ILL_INSN);
2167         }
2168         break;
2169 
2170     case GET_ASI_SHORT:
2171         /* Valid for stdfa only.  */
2172         if (orig_size == MO_64) {
2173             tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
2174                                 memop | MO_ALIGN);
2175         } else {
2176             gen_exception(dc, TT_ILL_INSN);
2177         }
2178         break;
2179 
2180     default:
2181         /* According to the table in the UA2011 manual, the only
2182            other asis that are valid for ldfa/lddfa/ldqfa are
2183            the PST* asis, which aren't currently handled.  */
2184         gen_exception(dc, TT_ILL_INSN);
2185         break;
2186     }
2187 }
2188 
2189 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2190 {
2191     TCGv hi = gen_dest_gpr(dc, rd);
2192     TCGv lo = gen_dest_gpr(dc, rd + 1);
2193 
2194     switch (da->type) {
2195     case GET_ASI_EXCP:
2196         return;
2197 
2198     case GET_ASI_DTWINX:
2199 #ifdef TARGET_SPARC64
2200         {
2201             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2202             TCGv_i128 t = tcg_temp_new_i128();
2203 
2204             tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2205             /*
2206              * Note that LE twinx acts as if each 64-bit register result is
2207              * byte swapped.  We perform one 128-bit LE load, so must swap
2208              * the order of the writebacks.
2209              */
2210             if ((mop & MO_BSWAP) == MO_TE) {
2211                 tcg_gen_extr_i128_i64(lo, hi, t);
2212             } else {
2213                 tcg_gen_extr_i128_i64(hi, lo, t);
2214             }
2215         }
2216         break;
2217 #else
2218         g_assert_not_reached();
2219 #endif
2220 
2221     case GET_ASI_DIRECT:
2222         {
2223             TCGv_i64 tmp = tcg_temp_new_i64();
2224 
2225             tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2226 
2227             /* Note that LE ldda acts as if each 32-bit register
2228                result is byte swapped.  Having just performed one
2229                64-bit bswap, we need now to swap the writebacks.  */
2230             if ((da->memop & MO_BSWAP) == MO_TE) {
2231                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2232             } else {
2233                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2234             }
2235         }
2236         break;
2237 
2238     default:
2239         /* ??? In theory we've handled all of the ASIs that are valid
2240            for ldda, and this should raise DAE_invalid_asi.  However,
2241            real hardware allows others.  This can be seen with e.g.
2242            FreeBSD 10.3 wrt ASI_IC_TAG.  */
2243         {
2244             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2245             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2246             TCGv_i64 tmp = tcg_temp_new_i64();
2247 
2248             save_state(dc);
2249             gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2250 
2251             /* See above.  */
2252             if ((da->memop & MO_BSWAP) == MO_TE) {
2253                 tcg_gen_extr_i64_tl(lo, hi, tmp);
2254             } else {
2255                 tcg_gen_extr_i64_tl(hi, lo, tmp);
2256             }
2257         }
2258         break;
2259     }
2260 
2261     gen_store_gpr(dc, rd, hi);
2262     gen_store_gpr(dc, rd + 1, lo);
2263 }
2264 
2265 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2266 {
2267     TCGv hi = gen_load_gpr(dc, rd);
2268     TCGv lo = gen_load_gpr(dc, rd + 1);
2269 
2270     switch (da->type) {
2271     case GET_ASI_EXCP:
2272         break;
2273 
2274     case GET_ASI_DTWINX:
2275 #ifdef TARGET_SPARC64
2276         {
2277             MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2278             TCGv_i128 t = tcg_temp_new_i128();
2279 
2280             /*
2281              * Note that LE twinx acts as if each 64-bit register result is
2282              * byte swapped.  We perform one 128-bit LE store, so must swap
2283              * the order of the construction.
2284              */
2285             if ((mop & MO_BSWAP) == MO_TE) {
2286                 tcg_gen_concat_i64_i128(t, lo, hi);
2287             } else {
2288                 tcg_gen_concat_i64_i128(t, hi, lo);
2289             }
2290             tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2291         }
2292         break;
2293 #else
2294         g_assert_not_reached();
2295 #endif
2296 
2297     case GET_ASI_DIRECT:
2298         {
2299             TCGv_i64 t64 = tcg_temp_new_i64();
2300 
2301             /* Note that LE stda acts as if each 32-bit register result is
2302                byte swapped.  We will perform one 64-bit LE store, so now
2303                we must swap the order of the construction.  */
2304             if ((da->memop & MO_BSWAP) == MO_TE) {
2305                 tcg_gen_concat_tl_i64(t64, lo, hi);
2306             } else {
2307                 tcg_gen_concat_tl_i64(t64, hi, lo);
2308             }
2309             tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2310         }
2311         break;
2312 
2313     case GET_ASI_BFILL:
2314         assert(TARGET_LONG_BITS == 32);
2315         /* Store 32 bytes of T64 to ADDR.  */
2316         /* ??? The original qemu code suggests 8-byte alignment, dropping
2317            the low bits, but the only place I can see this used is in the
2318            Linux kernel with 32 byte alignment, which would make more sense
2319            as a cacheline-style operation.  */
2320         {
2321             TCGv_i64 t64 = tcg_temp_new_i64();
2322             TCGv d_addr = tcg_temp_new();
2323             TCGv eight = tcg_constant_tl(8);
2324             int i;
2325 
2326             tcg_gen_concat_tl_i64(t64, lo, hi);
2327             tcg_gen_andi_tl(d_addr, addr, -8);
2328             for (i = 0; i < 32; i += 8) {
2329                 tcg_gen_qemu_st_i64(t64, d_addr, da->mem_idx, da->memop);
2330                 tcg_gen_add_tl(d_addr, d_addr, eight);
2331             }
2332         }
2333         break;
2334 
2335     default:
2336         /* ??? In theory we've handled all of the ASIs that are valid
2337            for stda, and this should raise DAE_invalid_asi.  */
2338         {
2339             TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2340             TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2341             TCGv_i64 t64 = tcg_temp_new_i64();
2342 
2343             /* See above.  */
2344             if ((da->memop & MO_BSWAP) == MO_TE) {
2345                 tcg_gen_concat_tl_i64(t64, lo, hi);
2346             } else {
2347                 tcg_gen_concat_tl_i64(t64, hi, lo);
2348             }
2349 
2350             save_state(dc);
2351             gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2352         }
2353         break;
2354     }
2355 }
2356 
2357 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2358 {
2359 #ifdef TARGET_SPARC64
2360     TCGv_i32 c32, zero, dst, s1, s2;
2361 
2362     /* We have two choices here: extend the 32 bit data and use movcond_i64,
2363        or fold the comparison down to 32 bits and use movcond_i32.  Choose
2364        the later.  */
2365     c32 = tcg_temp_new_i32();
2366     if (cmp->is_bool) {
2367         tcg_gen_extrl_i64_i32(c32, cmp->c1);
2368     } else {
2369         TCGv_i64 c64 = tcg_temp_new_i64();
2370         tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2371         tcg_gen_extrl_i64_i32(c32, c64);
2372     }
2373 
2374     s1 = gen_load_fpr_F(dc, rs);
2375     s2 = gen_load_fpr_F(dc, rd);
2376     dst = gen_dest_fpr_F(dc);
2377     zero = tcg_constant_i32(0);
2378 
2379     tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2380 
2381     gen_store_fpr_F(dc, rd, dst);
2382 #else
2383     qemu_build_not_reached();
2384 #endif
2385 }
2386 
2387 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2388 {
2389 #ifdef TARGET_SPARC64
2390     TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
2391     tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
2392                         gen_load_fpr_D(dc, rs),
2393                         gen_load_fpr_D(dc, rd));
2394     gen_store_fpr_D(dc, rd, dst);
2395 #else
2396     qemu_build_not_reached();
2397 #endif
2398 }
2399 
2400 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2401 {
2402 #ifdef TARGET_SPARC64
2403     int qd = QFPREG(rd);
2404     int qs = QFPREG(rs);
2405 
2406     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
2407                         cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
2408     tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
2409                         cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
2410 
2411     gen_update_fprs_dirty(dc, qd);
2412 #else
2413     qemu_build_not_reached();
2414 #endif
2415 }
2416 
2417 #ifdef TARGET_SPARC64
2418 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2419 {
2420     TCGv_i32 r_tl = tcg_temp_new_i32();
2421 
2422     /* load env->tl into r_tl */
2423     tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2424 
2425     /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2426     tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2427 
2428     /* calculate offset to current trap state from env->ts, reuse r_tl */
2429     tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2430     tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2431 
2432     /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2433     {
2434         TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2435         tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2436         tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2437     }
2438 }
2439 #endif
2440 
2441 static int extract_dfpreg(DisasContext *dc, int x)
2442 {
2443     return DFPREG(x);
2444 }
2445 
2446 static int extract_qfpreg(DisasContext *dc, int x)
2447 {
2448     return QFPREG(x);
2449 }
2450 
2451 /* Include the auto-generated decoder.  */
2452 #include "decode-insns.c.inc"
2453 
2454 #define TRANS(NAME, AVAIL, FUNC, ...) \
2455     static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2456     { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2457 
2458 #define avail_ALL(C)      true
2459 #ifdef TARGET_SPARC64
2460 # define avail_32(C)      false
2461 # define avail_ASR17(C)   false
2462 # define avail_CASA(C)    true
2463 # define avail_DIV(C)     true
2464 # define avail_MUL(C)     true
2465 # define avail_POWERDOWN(C) false
2466 # define avail_64(C)      true
2467 # define avail_GL(C)      ((C)->def->features & CPU_FEATURE_GL)
2468 # define avail_HYPV(C)    ((C)->def->features & CPU_FEATURE_HYPV)
2469 # define avail_VIS1(C)    ((C)->def->features & CPU_FEATURE_VIS1)
2470 # define avail_VIS2(C)    ((C)->def->features & CPU_FEATURE_VIS2)
2471 #else
2472 # define avail_32(C)      true
2473 # define avail_ASR17(C)   ((C)->def->features & CPU_FEATURE_ASR17)
2474 # define avail_CASA(C)    ((C)->def->features & CPU_FEATURE_CASA)
2475 # define avail_DIV(C)     ((C)->def->features & CPU_FEATURE_DIV)
2476 # define avail_MUL(C)     ((C)->def->features & CPU_FEATURE_MUL)
2477 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2478 # define avail_64(C)      false
2479 # define avail_GL(C)      false
2480 # define avail_HYPV(C)    false
2481 # define avail_VIS1(C)    false
2482 # define avail_VIS2(C)    false
2483 #endif
2484 
2485 /* Default case for non jump instructions. */
2486 static bool advance_pc(DisasContext *dc)
2487 {
2488     if (dc->npc & 3) {
2489         switch (dc->npc) {
2490         case DYNAMIC_PC:
2491         case DYNAMIC_PC_LOOKUP:
2492             dc->pc = dc->npc;
2493             gen_op_next_insn();
2494             break;
2495         case JUMP_PC:
2496             /* we can do a static jump */
2497             gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
2498             dc->base.is_jmp = DISAS_NORETURN;
2499             break;
2500         default:
2501             g_assert_not_reached();
2502         }
2503     } else {
2504         dc->pc = dc->npc;
2505         dc->npc = dc->npc + 4;
2506     }
2507     return true;
2508 }
2509 
2510 /*
2511  * Major opcodes 00 and 01 -- branches, call, and sethi
2512  */
2513 
2514 static bool advance_jump_uncond_never(DisasContext *dc, bool annul)
2515 {
2516     if (annul) {
2517         dc->pc = dc->npc + 4;
2518         dc->npc = dc->pc + 4;
2519     } else {
2520         dc->pc = dc->npc;
2521         dc->npc = dc->pc + 4;
2522     }
2523     return true;
2524 }
2525 
2526 static bool advance_jump_uncond_always(DisasContext *dc, bool annul,
2527                                        target_ulong dest)
2528 {
2529     if (annul) {
2530         dc->pc = dest;
2531         dc->npc = dest + 4;
2532     } else {
2533         dc->pc = dc->npc;
2534         dc->npc = dest;
2535         tcg_gen_mov_tl(cpu_pc, cpu_npc);
2536     }
2537     return true;
2538 }
2539 
2540 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2541                               bool annul, target_ulong dest)
2542 {
2543     target_ulong npc = dc->npc;
2544 
2545     if (annul) {
2546         TCGLabel *l1 = gen_new_label();
2547 
2548         tcg_gen_brcond_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2549         gen_goto_tb(dc, 0, npc, dest);
2550         gen_set_label(l1);
2551         gen_goto_tb(dc, 1, npc + 4, npc + 8);
2552 
2553         dc->base.is_jmp = DISAS_NORETURN;
2554     } else {
2555         if (npc & 3) {
2556             switch (npc) {
2557             case DYNAMIC_PC:
2558             case DYNAMIC_PC_LOOKUP:
2559                 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2560                 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2561                 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2562                                    cmp->c1, cmp->c2,
2563                                    tcg_constant_tl(dest), cpu_npc);
2564                 dc->pc = npc;
2565                 break;
2566             default:
2567                 g_assert_not_reached();
2568             }
2569         } else {
2570             dc->pc = npc;
2571             dc->jump_pc[0] = dest;
2572             dc->jump_pc[1] = npc + 4;
2573             dc->npc = JUMP_PC;
2574             if (cmp->is_bool) {
2575                 tcg_gen_mov_tl(cpu_cond, cmp->c1);
2576             } else {
2577                 tcg_gen_setcond_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2578             }
2579         }
2580     }
2581     return true;
2582 }
2583 
2584 static bool raise_priv(DisasContext *dc)
2585 {
2586     gen_exception(dc, TT_PRIV_INSN);
2587     return true;
2588 }
2589 
2590 static bool raise_unimpfpop(DisasContext *dc)
2591 {
2592     gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2593     return true;
2594 }
2595 
2596 static bool gen_trap_float128(DisasContext *dc)
2597 {
2598     if (dc->def->features & CPU_FEATURE_FLOAT128) {
2599         return false;
2600     }
2601     return raise_unimpfpop(dc);
2602 }
2603 
2604 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2605 {
2606     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2607     DisasCompare cmp;
2608 
2609     switch (a->cond) {
2610     case 0x0:
2611         return advance_jump_uncond_never(dc, a->a);
2612     case 0x8:
2613         return advance_jump_uncond_always(dc, a->a, target);
2614     default:
2615         flush_cond(dc);
2616 
2617         gen_compare(&cmp, a->cc, a->cond, dc);
2618         return advance_jump_cond(dc, &cmp, a->a, target);
2619     }
2620 }
2621 
2622 TRANS(Bicc, ALL, do_bpcc, a)
2623 TRANS(BPcc,  64, do_bpcc, a)
2624 
2625 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2626 {
2627     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2628     DisasCompare cmp;
2629 
2630     if (gen_trap_ifnofpu(dc)) {
2631         return true;
2632     }
2633     switch (a->cond) {
2634     case 0x0:
2635         return advance_jump_uncond_never(dc, a->a);
2636     case 0x8:
2637         return advance_jump_uncond_always(dc, a->a, target);
2638     default:
2639         flush_cond(dc);
2640 
2641         gen_fcompare(&cmp, a->cc, a->cond);
2642         return advance_jump_cond(dc, &cmp, a->a, target);
2643     }
2644 }
2645 
2646 TRANS(FBPfcc,  64, do_fbpfcc, a)
2647 TRANS(FBfcc,  ALL, do_fbpfcc, a)
2648 
2649 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2650 {
2651     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2652     DisasCompare cmp;
2653 
2654     if (!avail_64(dc)) {
2655         return false;
2656     }
2657     if (gen_tcg_cond_reg[a->cond] == TCG_COND_NEVER) {
2658         return false;
2659     }
2660 
2661     flush_cond(dc);
2662     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
2663     return advance_jump_cond(dc, &cmp, a->a, target);
2664 }
2665 
2666 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2667 {
2668     target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2669 
2670     gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2671     gen_mov_pc_npc(dc);
2672     dc->npc = target;
2673     return true;
2674 }
2675 
2676 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2677 {
2678     /*
2679      * For sparc32, always generate the no-coprocessor exception.
2680      * For sparc64, always generate illegal instruction.
2681      */
2682 #ifdef TARGET_SPARC64
2683     return false;
2684 #else
2685     gen_exception(dc, TT_NCP_INSN);
2686     return true;
2687 #endif
2688 }
2689 
2690 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2691 {
2692     /* Special-case %g0 because that's the canonical nop.  */
2693     if (a->rd) {
2694         gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2695     }
2696     return advance_pc(dc);
2697 }
2698 
2699 /*
2700  * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2701  */
2702 
2703 static bool do_tcc(DisasContext *dc, int cond, int cc,
2704                    int rs1, bool imm, int rs2_or_imm)
2705 {
2706     int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2707                 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2708     DisasCompare cmp;
2709     TCGLabel *lab;
2710     TCGv_i32 trap;
2711 
2712     /* Trap never.  */
2713     if (cond == 0) {
2714         return advance_pc(dc);
2715     }
2716 
2717     /*
2718      * Immediate traps are the most common case.  Since this value is
2719      * live across the branch, it really pays to evaluate the constant.
2720      */
2721     if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2722         trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2723     } else {
2724         trap = tcg_temp_new_i32();
2725         tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2726         if (imm) {
2727             tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2728         } else {
2729             TCGv_i32 t2 = tcg_temp_new_i32();
2730             tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2731             tcg_gen_add_i32(trap, trap, t2);
2732         }
2733         tcg_gen_andi_i32(trap, trap, mask);
2734         tcg_gen_addi_i32(trap, trap, TT_TRAP);
2735     }
2736 
2737     /* Trap always.  */
2738     if (cond == 8) {
2739         save_state(dc);
2740         gen_helper_raise_exception(tcg_env, trap);
2741         dc->base.is_jmp = DISAS_NORETURN;
2742         return true;
2743     }
2744 
2745     /* Conditional trap.  */
2746     flush_cond(dc);
2747     lab = delay_exceptionv(dc, trap);
2748     gen_compare(&cmp, cc, cond, dc);
2749     tcg_gen_brcond_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2750 
2751     return advance_pc(dc);
2752 }
2753 
2754 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2755 {
2756     if (avail_32(dc) && a->cc) {
2757         return false;
2758     }
2759     return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2760 }
2761 
2762 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2763 {
2764     if (avail_64(dc)) {
2765         return false;
2766     }
2767     return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2768 }
2769 
2770 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2771 {
2772     if (avail_32(dc)) {
2773         return false;
2774     }
2775     return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2776 }
2777 
2778 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2779 {
2780     tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2781     return advance_pc(dc);
2782 }
2783 
2784 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2785 {
2786     if (avail_32(dc)) {
2787         return false;
2788     }
2789     if (a->mmask) {
2790         /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2791         tcg_gen_mb(a->mmask | TCG_BAR_SC);
2792     }
2793     if (a->cmask) {
2794         /* For #Sync, etc, end the TB to recognize interrupts. */
2795         dc->base.is_jmp = DISAS_EXIT;
2796     }
2797     return advance_pc(dc);
2798 }
2799 
2800 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2801                           TCGv (*func)(DisasContext *, TCGv))
2802 {
2803     if (!priv) {
2804         return raise_priv(dc);
2805     }
2806     gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2807     return advance_pc(dc);
2808 }
2809 
2810 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2811 {
2812     return cpu_y;
2813 }
2814 
2815 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2816 {
2817     /*
2818      * TODO: Need a feature bit for sparcv8.  In the meantime, treat all
2819      * 32-bit cpus like sparcv7, which ignores the rs1 field.
2820      * This matches after all other ASR, so Leon3 Asr17 is handled first.
2821      */
2822     if (avail_64(dc) && a->rs1 != 0) {
2823         return false;
2824     }
2825     return do_rd_special(dc, true, a->rd, do_rdy);
2826 }
2827 
2828 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2829 {
2830     uint32_t val;
2831 
2832     /*
2833      * TODO: There are many more fields to be filled,
2834      * some of which are writable.
2835      */
2836     val = dc->def->nwindows - 1;   /* [4:0] NWIN */
2837     val |= 1 << 8;                 /* [8]   V8   */
2838 
2839     return tcg_constant_tl(val);
2840 }
2841 
2842 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2843 
2844 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2845 {
2846     update_psr(dc);
2847     gen_helper_rdccr(dst, tcg_env);
2848     return dst;
2849 }
2850 
2851 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2852 
2853 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2854 {
2855 #ifdef TARGET_SPARC64
2856     return tcg_constant_tl(dc->asi);
2857 #else
2858     qemu_build_not_reached();
2859 #endif
2860 }
2861 
2862 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2863 
2864 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2865 {
2866     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2867 
2868     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2869     if (translator_io_start(&dc->base)) {
2870         dc->base.is_jmp = DISAS_EXIT;
2871     }
2872     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2873                               tcg_constant_i32(dc->mem_idx));
2874     return dst;
2875 }
2876 
2877 /* TODO: non-priv access only allowed when enabled. */
2878 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2879 
2880 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2881 {
2882     return tcg_constant_tl(address_mask_i(dc, dc->pc));
2883 }
2884 
2885 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2886 
2887 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2888 {
2889     tcg_gen_ext_i32_tl(dst, cpu_fprs);
2890     return dst;
2891 }
2892 
2893 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2894 
2895 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2896 {
2897     gen_trap_ifnofpu(dc);
2898     return cpu_gsr;
2899 }
2900 
2901 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2902 
2903 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2904 {
2905     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2906     return dst;
2907 }
2908 
2909 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2910 
2911 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2912 {
2913     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2914     return dst;
2915 }
2916 
2917 /* TODO: non-priv access only allowed when enabled. */
2918 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2919 
2920 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2921 {
2922     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2923 
2924     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2925     if (translator_io_start(&dc->base)) {
2926         dc->base.is_jmp = DISAS_EXIT;
2927     }
2928     gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2929                               tcg_constant_i32(dc->mem_idx));
2930     return dst;
2931 }
2932 
2933 /* TODO: non-priv access only allowed when enabled. */
2934 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2935 
2936 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2937 {
2938     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2939     return dst;
2940 }
2941 
2942 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2943 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2944 
2945 /*
2946  * UltraSPARC-T1 Strand status.
2947  * HYPV check maybe not enough, UA2005 & UA2007 describe
2948  * this ASR as impl. dep
2949  */
2950 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2951 {
2952     return tcg_constant_tl(1);
2953 }
2954 
2955 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2956 
2957 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2958 {
2959     update_psr(dc);
2960     gen_helper_rdpsr(dst, tcg_env);
2961     return dst;
2962 }
2963 
2964 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2965 
2966 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2967 {
2968     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2969     return dst;
2970 }
2971 
2972 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2973 
2974 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2975 {
2976     TCGv_i32 tl = tcg_temp_new_i32();
2977     TCGv_ptr tp = tcg_temp_new_ptr();
2978 
2979     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2980     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2981     tcg_gen_shli_i32(tl, tl, 3);
2982     tcg_gen_ext_i32_ptr(tp, tl);
2983     tcg_gen_add_ptr(tp, tp, tcg_env);
2984 
2985     tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2986     return dst;
2987 }
2988 
2989 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2990 
2991 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2992 {
2993     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2994     return dst;
2995 }
2996 
2997 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2998 
2999 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3000 {
3001     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3002     return dst;
3003 }
3004 
3005 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3006 
3007 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3008 {
3009     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3010     return dst;
3011 }
3012 
3013 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3014 
3015 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3016 {
3017     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3018     return dst;
3019 }
3020 
3021 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3022       do_rdhstick_cmpr)
3023 
3024 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3025 {
3026     tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3027     return dst;
3028 }
3029 
3030 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3031 
3032 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3033 {
3034 #ifdef TARGET_SPARC64
3035     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3036 
3037     gen_load_trap_state_at_tl(r_tsptr);
3038     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3039     return dst;
3040 #else
3041     qemu_build_not_reached();
3042 #endif
3043 }
3044 
3045 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3046 
3047 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3048 {
3049 #ifdef TARGET_SPARC64
3050     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3051 
3052     gen_load_trap_state_at_tl(r_tsptr);
3053     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3054     return dst;
3055 #else
3056     qemu_build_not_reached();
3057 #endif
3058 }
3059 
3060 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3061 
3062 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3063 {
3064 #ifdef TARGET_SPARC64
3065     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3066 
3067     gen_load_trap_state_at_tl(r_tsptr);
3068     tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3069     return dst;
3070 #else
3071     qemu_build_not_reached();
3072 #endif
3073 }
3074 
3075 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3076 
3077 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3078 {
3079 #ifdef TARGET_SPARC64
3080     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3081 
3082     gen_load_trap_state_at_tl(r_tsptr);
3083     tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3084     return dst;
3085 #else
3086     qemu_build_not_reached();
3087 #endif
3088 }
3089 
3090 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3091 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3092 
3093 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3094 {
3095     return cpu_tbr;
3096 }
3097 
3098 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3099 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3100 
3101 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3102 {
3103     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3104     return dst;
3105 }
3106 
3107 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3108 
3109 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3110 {
3111     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3112     return dst;
3113 }
3114 
3115 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3116 
3117 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3118 {
3119     tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3120     return dst;
3121 }
3122 
3123 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3124 
3125 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3126 {
3127     gen_helper_rdcwp(dst, tcg_env);
3128     return dst;
3129 }
3130 
3131 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3132 
3133 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3134 {
3135     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3136     return dst;
3137 }
3138 
3139 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3140 
3141 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3142 {
3143     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3144     return dst;
3145 }
3146 
3147 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3148       do_rdcanrestore)
3149 
3150 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3151 {
3152     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3153     return dst;
3154 }
3155 
3156 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3157 
3158 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3159 {
3160     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3161     return dst;
3162 }
3163 
3164 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3165 
3166 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3167 {
3168     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3169     return dst;
3170 }
3171 
3172 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3173 
3174 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3175 {
3176     tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3177     return dst;
3178 }
3179 
3180 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3181 
3182 /* UA2005 strand status */
3183 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3184 {
3185     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3186     return dst;
3187 }
3188 
3189 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3190 
3191 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3192 {
3193     tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3194     return dst;
3195 }
3196 
3197 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3198 
3199 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3200 {
3201     if (avail_64(dc)) {
3202         gen_helper_flushw(tcg_env);
3203         return advance_pc(dc);
3204     }
3205     return false;
3206 }
3207 
3208 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3209                           void (*func)(DisasContext *, TCGv))
3210 {
3211     TCGv src;
3212 
3213     /* For simplicity, we under-decoded the rs2 form. */
3214     if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3215         return false;
3216     }
3217     if (!priv) {
3218         return raise_priv(dc);
3219     }
3220 
3221     if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3222         src = tcg_constant_tl(a->rs2_or_imm);
3223     } else {
3224         TCGv src1 = gen_load_gpr(dc, a->rs1);
3225         if (a->rs2_or_imm == 0) {
3226             src = src1;
3227         } else {
3228             src = tcg_temp_new();
3229             if (a->imm) {
3230                 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3231             } else {
3232                 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3233             }
3234         }
3235     }
3236     func(dc, src);
3237     return advance_pc(dc);
3238 }
3239 
3240 static void do_wry(DisasContext *dc, TCGv src)
3241 {
3242     tcg_gen_ext32u_tl(cpu_y, src);
3243 }
3244 
3245 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3246 
3247 static void do_wrccr(DisasContext *dc, TCGv src)
3248 {
3249     gen_helper_wrccr(tcg_env, src);
3250 }
3251 
3252 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3253 
3254 static void do_wrasi(DisasContext *dc, TCGv src)
3255 {
3256     TCGv tmp = tcg_temp_new();
3257 
3258     tcg_gen_ext8u_tl(tmp, src);
3259     tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3260     /* End TB to notice changed ASI. */
3261     dc->base.is_jmp = DISAS_EXIT;
3262 }
3263 
3264 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3265 
3266 static void do_wrfprs(DisasContext *dc, TCGv src)
3267 {
3268 #ifdef TARGET_SPARC64
3269     tcg_gen_trunc_tl_i32(cpu_fprs, src);
3270     dc->fprs_dirty = 0;
3271     dc->base.is_jmp = DISAS_EXIT;
3272 #else
3273     qemu_build_not_reached();
3274 #endif
3275 }
3276 
3277 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3278 
3279 static void do_wrgsr(DisasContext *dc, TCGv src)
3280 {
3281     gen_trap_ifnofpu(dc);
3282     tcg_gen_mov_tl(cpu_gsr, src);
3283 }
3284 
3285 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3286 
3287 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3288 {
3289     gen_helper_set_softint(tcg_env, src);
3290 }
3291 
3292 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3293 
3294 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3295 {
3296     gen_helper_clear_softint(tcg_env, src);
3297 }
3298 
3299 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3300 
3301 static void do_wrsoftint(DisasContext *dc, TCGv src)
3302 {
3303     gen_helper_write_softint(tcg_env, src);
3304 }
3305 
3306 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3307 
3308 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3309 {
3310     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3311 
3312     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3313     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3314     translator_io_start(&dc->base);
3315     gen_helper_tick_set_limit(r_tickptr, src);
3316     /* End TB to handle timer interrupt */
3317     dc->base.is_jmp = DISAS_EXIT;
3318 }
3319 
3320 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3321 
3322 static void do_wrstick(DisasContext *dc, TCGv src)
3323 {
3324 #ifdef TARGET_SPARC64
3325     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3326 
3327     tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3328     translator_io_start(&dc->base);
3329     gen_helper_tick_set_count(r_tickptr, src);
3330     /* End TB to handle timer interrupt */
3331     dc->base.is_jmp = DISAS_EXIT;
3332 #else
3333     qemu_build_not_reached();
3334 #endif
3335 }
3336 
3337 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3338 
3339 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3340 {
3341     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3342 
3343     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3344     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3345     translator_io_start(&dc->base);
3346     gen_helper_tick_set_limit(r_tickptr, src);
3347     /* End TB to handle timer interrupt */
3348     dc->base.is_jmp = DISAS_EXIT;
3349 }
3350 
3351 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3352 
3353 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3354 {
3355     save_state(dc);
3356     gen_helper_power_down(tcg_env);
3357 }
3358 
3359 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3360 
3361 static void do_wrpsr(DisasContext *dc, TCGv src)
3362 {
3363     gen_helper_wrpsr(tcg_env, src);
3364     tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
3365     dc->cc_op = CC_OP_FLAGS;
3366     dc->base.is_jmp = DISAS_EXIT;
3367 }
3368 
3369 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3370 
3371 static void do_wrwim(DisasContext *dc, TCGv src)
3372 {
3373     target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3374     TCGv tmp = tcg_temp_new();
3375 
3376     tcg_gen_andi_tl(tmp, src, mask);
3377     tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3378 }
3379 
3380 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3381 
3382 static void do_wrtpc(DisasContext *dc, TCGv src)
3383 {
3384 #ifdef TARGET_SPARC64
3385     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3386 
3387     gen_load_trap_state_at_tl(r_tsptr);
3388     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3389 #else
3390     qemu_build_not_reached();
3391 #endif
3392 }
3393 
3394 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3395 
3396 static void do_wrtnpc(DisasContext *dc, TCGv src)
3397 {
3398 #ifdef TARGET_SPARC64
3399     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3400 
3401     gen_load_trap_state_at_tl(r_tsptr);
3402     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3403 #else
3404     qemu_build_not_reached();
3405 #endif
3406 }
3407 
3408 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3409 
3410 static void do_wrtstate(DisasContext *dc, TCGv src)
3411 {
3412 #ifdef TARGET_SPARC64
3413     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3414 
3415     gen_load_trap_state_at_tl(r_tsptr);
3416     tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3417 #else
3418     qemu_build_not_reached();
3419 #endif
3420 }
3421 
3422 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3423 
3424 static void do_wrtt(DisasContext *dc, TCGv src)
3425 {
3426 #ifdef TARGET_SPARC64
3427     TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3428 
3429     gen_load_trap_state_at_tl(r_tsptr);
3430     tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3431 #else
3432     qemu_build_not_reached();
3433 #endif
3434 }
3435 
3436 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3437 
3438 static void do_wrtick(DisasContext *dc, TCGv src)
3439 {
3440     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3441 
3442     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3443     translator_io_start(&dc->base);
3444     gen_helper_tick_set_count(r_tickptr, src);
3445     /* End TB to handle timer interrupt */
3446     dc->base.is_jmp = DISAS_EXIT;
3447 }
3448 
3449 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3450 
3451 static void do_wrtba(DisasContext *dc, TCGv src)
3452 {
3453     tcg_gen_mov_tl(cpu_tbr, src);
3454 }
3455 
3456 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3457 
3458 static void do_wrpstate(DisasContext *dc, TCGv src)
3459 {
3460     save_state(dc);
3461     if (translator_io_start(&dc->base)) {
3462         dc->base.is_jmp = DISAS_EXIT;
3463     }
3464     gen_helper_wrpstate(tcg_env, src);
3465     dc->npc = DYNAMIC_PC;
3466 }
3467 
3468 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3469 
3470 static void do_wrtl(DisasContext *dc, TCGv src)
3471 {
3472     save_state(dc);
3473     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3474     dc->npc = DYNAMIC_PC;
3475 }
3476 
3477 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3478 
3479 static void do_wrpil(DisasContext *dc, TCGv src)
3480 {
3481     if (translator_io_start(&dc->base)) {
3482         dc->base.is_jmp = DISAS_EXIT;
3483     }
3484     gen_helper_wrpil(tcg_env, src);
3485 }
3486 
3487 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3488 
3489 static void do_wrcwp(DisasContext *dc, TCGv src)
3490 {
3491     gen_helper_wrcwp(tcg_env, src);
3492 }
3493 
3494 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3495 
3496 static void do_wrcansave(DisasContext *dc, TCGv src)
3497 {
3498     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3499 }
3500 
3501 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3502 
3503 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3504 {
3505     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3506 }
3507 
3508 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3509 
3510 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3511 {
3512     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3513 }
3514 
3515 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3516 
3517 static void do_wrotherwin(DisasContext *dc, TCGv src)
3518 {
3519     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3520 }
3521 
3522 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3523 
3524 static void do_wrwstate(DisasContext *dc, TCGv src)
3525 {
3526     tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3527 }
3528 
3529 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3530 
3531 static void do_wrgl(DisasContext *dc, TCGv src)
3532 {
3533     gen_helper_wrgl(tcg_env, src);
3534 }
3535 
3536 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3537 
3538 /* UA2005 strand status */
3539 static void do_wrssr(DisasContext *dc, TCGv src)
3540 {
3541     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3542 }
3543 
3544 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3545 
3546 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3547 
3548 static void do_wrhpstate(DisasContext *dc, TCGv src)
3549 {
3550     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3551     dc->base.is_jmp = DISAS_EXIT;
3552 }
3553 
3554 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3555 
3556 static void do_wrhtstate(DisasContext *dc, TCGv src)
3557 {
3558     TCGv_i32 tl = tcg_temp_new_i32();
3559     TCGv_ptr tp = tcg_temp_new_ptr();
3560 
3561     tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3562     tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3563     tcg_gen_shli_i32(tl, tl, 3);
3564     tcg_gen_ext_i32_ptr(tp, tl);
3565     tcg_gen_add_ptr(tp, tp, tcg_env);
3566 
3567     tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3568 }
3569 
3570 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3571 
3572 static void do_wrhintp(DisasContext *dc, TCGv src)
3573 {
3574     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3575 }
3576 
3577 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3578 
3579 static void do_wrhtba(DisasContext *dc, TCGv src)
3580 {
3581     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3582 }
3583 
3584 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3585 
3586 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3587 {
3588     TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3589 
3590     tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3591     tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3592     translator_io_start(&dc->base);
3593     gen_helper_tick_set_limit(r_tickptr, src);
3594     /* End TB to handle timer interrupt */
3595     dc->base.is_jmp = DISAS_EXIT;
3596 }
3597 
3598 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3599       do_wrhstick_cmpr)
3600 
3601 static bool do_saved_restored(DisasContext *dc, bool saved)
3602 {
3603     if (!supervisor(dc)) {
3604         return raise_priv(dc);
3605     }
3606     if (saved) {
3607         gen_helper_saved(tcg_env);
3608     } else {
3609         gen_helper_restored(tcg_env);
3610     }
3611     return advance_pc(dc);
3612 }
3613 
3614 TRANS(SAVED, 64, do_saved_restored, true)
3615 TRANS(RESTORED, 64, do_saved_restored, false)
3616 
3617 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3618 {
3619     return advance_pc(dc);
3620 }
3621 
3622 /*
3623  * TODO: Need a feature bit for sparcv8.
3624  * In the meantime, treat all 32-bit cpus like sparcv7.
3625  */
3626 TRANS(NOP_v7, 32, trans_NOP, a)
3627 TRANS(NOP_v9, 64, trans_NOP, a)
3628 
3629 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3630                          void (*func)(TCGv, TCGv, TCGv),
3631                          void (*funci)(TCGv, TCGv, target_long),
3632                          bool logic_cc)
3633 {
3634     TCGv dst, src1;
3635 
3636     /* For simplicity, we under-decoded the rs2 form. */
3637     if (!a->imm && a->rs2_or_imm & ~0x1f) {
3638         return false;
3639     }
3640 
3641     if (logic_cc) {
3642         dst = cpu_cc_N;
3643     } else if (a->cc && cc_op > CC_OP_FLAGS) {
3644         dst = cpu_cc_dst;
3645     } else {
3646         dst = gen_dest_gpr(dc, a->rd);
3647     }
3648     src1 = gen_load_gpr(dc, a->rs1);
3649 
3650     if (a->imm || a->rs2_or_imm == 0) {
3651         if (funci) {
3652             funci(dst, src1, a->rs2_or_imm);
3653         } else {
3654             func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3655         }
3656     } else {
3657         func(dst, src1, cpu_regs[a->rs2_or_imm]);
3658     }
3659 
3660     if (logic_cc) {
3661         if (TARGET_LONG_BITS == 64) {
3662             tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3663             tcg_gen_movi_tl(cpu_icc_C, 0);
3664         }
3665         tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3666         tcg_gen_movi_tl(cpu_cc_C, 0);
3667         tcg_gen_movi_tl(cpu_cc_V, 0);
3668     }
3669 
3670     gen_store_gpr(dc, a->rd, dst);
3671 
3672     if (a->cc) {
3673         tcg_gen_movi_i32(cpu_cc_op, cc_op);
3674         dc->cc_op = cc_op;
3675     }
3676     return advance_pc(dc);
3677 }
3678 
3679 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a, int cc_op,
3680                      void (*func)(TCGv, TCGv, TCGv),
3681                      void (*funci)(TCGv, TCGv, target_long),
3682                      void (*func_cc)(TCGv, TCGv, TCGv))
3683 {
3684     if (a->cc) {
3685         assert(cc_op >= 0);
3686         return do_arith_int(dc, a, cc_op, func_cc, NULL, false);
3687     }
3688     return do_arith_int(dc, a, cc_op, func, funci, false);
3689 }
3690 
3691 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3692                      void (*func)(TCGv, TCGv, TCGv),
3693                      void (*funci)(TCGv, TCGv, target_long))
3694 {
3695     return do_arith_int(dc, a, CC_OP_FLAGS, func, funci, a->cc);
3696 }
3697 
3698 TRANS(ADD, ALL, do_arith, a, CC_OP_ADD,
3699       tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc)
3700 TRANS(SUB, ALL, do_arith, a, CC_OP_SUB,
3701       tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc)
3702 
3703 TRANS(TADDcc, ALL, do_arith, a, CC_OP_TADD, NULL, NULL, gen_op_add_cc)
3704 TRANS(TSUBcc, ALL, do_arith, a, CC_OP_TSUB, NULL, NULL, gen_op_sub_cc)
3705 TRANS(TADDccTV, ALL, do_arith, a, CC_OP_TADDTV, NULL, NULL, gen_op_taddcctv)
3706 TRANS(TSUBccTV, ALL, do_arith, a, CC_OP_TSUBTV, NULL, NULL, gen_op_tsubcctv)
3707 
3708 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3709 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3710 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3711 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3712 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3713 
3714 TRANS(MULX, 64, do_arith, a, -1, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3715 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3716 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3717 
3718 TRANS(UDIVX, 64, do_arith, a, -1, gen_op_udivx, NULL, NULL)
3719 TRANS(SDIVX, 64, do_arith, a, -1, gen_op_sdivx, NULL, NULL)
3720 TRANS(UDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_udiv, NULL, gen_op_udivcc)
3721 TRANS(SDIV, DIV, do_arith, a, CC_OP_DIV, gen_op_sdiv, NULL, gen_op_sdivcc)
3722 
3723 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3724 TRANS(POPC, 64, do_arith, a, -1, gen_op_popc, NULL, NULL)
3725 
3726 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3727 {
3728     /* OR with %g0 is the canonical alias for MOV. */
3729     if (!a->cc && a->rs1 == 0) {
3730         if (a->imm || a->rs2_or_imm == 0) {
3731             gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3732         } else if (a->rs2_or_imm & ~0x1f) {
3733             /* For simplicity, we under-decoded the rs2 form. */
3734             return false;
3735         } else {
3736             gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3737         }
3738         return advance_pc(dc);
3739     }
3740     return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3741 }
3742 
3743 static bool trans_ADDC(DisasContext *dc, arg_r_r_ri_cc *a)
3744 {
3745     switch (dc->cc_op) {
3746     case CC_OP_DIV:
3747         /* Carry is known to be zero.  Fall back to plain ADD.  */
3748         return do_arith(dc, a, CC_OP_ADD,
3749                         tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_add_cc);
3750     case CC_OP_ADD:
3751     case CC_OP_TADD:
3752     case CC_OP_TADDTV:
3753         return do_arith(dc, a, CC_OP_ADDX,
3754                         gen_op_addc_add, NULL, gen_op_addccc_add);
3755     case CC_OP_SUB:
3756     case CC_OP_TSUB:
3757     case CC_OP_TSUBTV:
3758         return do_arith(dc, a, CC_OP_ADDX,
3759                         gen_op_addc_sub, NULL, gen_op_addccc_sub);
3760     default:
3761         return do_arith(dc, a, CC_OP_ADDX,
3762                         gen_op_addc_generic, NULL, gen_op_addccc_generic);
3763     }
3764 }
3765 
3766 static bool trans_SUBC(DisasContext *dc, arg_r_r_ri_cc *a)
3767 {
3768     switch (dc->cc_op) {
3769     case CC_OP_DIV:
3770         /* Carry is known to be zero.  Fall back to plain SUB.  */
3771         return do_arith(dc, a, CC_OP_SUB,
3772                         tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_sub_cc);
3773     case CC_OP_ADD:
3774     case CC_OP_TADD:
3775     case CC_OP_TADDTV:
3776         return do_arith(dc, a, CC_OP_SUBX,
3777                         gen_op_subc_add, NULL, gen_op_subccc_add);
3778     case CC_OP_SUB:
3779     case CC_OP_TSUB:
3780     case CC_OP_TSUBTV:
3781         return do_arith(dc, a, CC_OP_SUBX,
3782                         gen_op_subc_sub, NULL, gen_op_subccc_sub);
3783     default:
3784         return do_arith(dc, a, CC_OP_SUBX,
3785                         gen_op_subc_generic, NULL, gen_op_subccc_generic);
3786     }
3787 }
3788 
3789 static bool trans_MULScc(DisasContext *dc, arg_r_r_ri_cc *a)
3790 {
3791     update_psr(dc);
3792     return do_arith(dc, a, CC_OP_ADD, NULL, NULL, gen_op_mulscc);
3793 }
3794 
3795 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3796                      int width, bool cc, bool left)
3797 {
3798     TCGv dst, s1, s2, lo1, lo2;
3799     uint64_t amask, tabl, tabr;
3800     int shift, imask, omask;
3801 
3802     dst = gen_dest_gpr(dc, a->rd);
3803     s1 = gen_load_gpr(dc, a->rs1);
3804     s2 = gen_load_gpr(dc, a->rs2);
3805 
3806     if (cc) {
3807         tcg_gen_mov_tl(cpu_cc_src, s1);
3808         tcg_gen_mov_tl(cpu_cc_src2, s2);
3809         tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
3810         tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
3811         dc->cc_op = CC_OP_SUB;
3812     }
3813 
3814     /*
3815      * Theory of operation: there are two tables, left and right (not to
3816      * be confused with the left and right versions of the opcode).  These
3817      * are indexed by the low 3 bits of the inputs.  To make things "easy",
3818      * these tables are loaded into two constants, TABL and TABR below.
3819      * The operation index = (input & imask) << shift calculates the index
3820      * into the constant, while val = (table >> index) & omask calculates
3821      * the value we're looking for.
3822      */
3823     switch (width) {
3824     case 8:
3825         imask = 0x7;
3826         shift = 3;
3827         omask = 0xff;
3828         if (left) {
3829             tabl = 0x80c0e0f0f8fcfeffULL;
3830             tabr = 0xff7f3f1f0f070301ULL;
3831         } else {
3832             tabl = 0x0103070f1f3f7fffULL;
3833             tabr = 0xfffefcf8f0e0c080ULL;
3834         }
3835         break;
3836     case 16:
3837         imask = 0x6;
3838         shift = 1;
3839         omask = 0xf;
3840         if (left) {
3841             tabl = 0x8cef;
3842             tabr = 0xf731;
3843         } else {
3844             tabl = 0x137f;
3845             tabr = 0xfec8;
3846         }
3847         break;
3848     case 32:
3849         imask = 0x4;
3850         shift = 0;
3851         omask = 0x3;
3852         if (left) {
3853             tabl = (2 << 2) | 3;
3854             tabr = (3 << 2) | 1;
3855         } else {
3856             tabl = (1 << 2) | 3;
3857             tabr = (3 << 2) | 2;
3858         }
3859         break;
3860     default:
3861         abort();
3862     }
3863 
3864     lo1 = tcg_temp_new();
3865     lo2 = tcg_temp_new();
3866     tcg_gen_andi_tl(lo1, s1, imask);
3867     tcg_gen_andi_tl(lo2, s2, imask);
3868     tcg_gen_shli_tl(lo1, lo1, shift);
3869     tcg_gen_shli_tl(lo2, lo2, shift);
3870 
3871     tcg_gen_shr_tl(lo1, tcg_constant_tl(tabl), lo1);
3872     tcg_gen_shr_tl(lo2, tcg_constant_tl(tabr), lo2);
3873     tcg_gen_andi_tl(lo1, lo1, omask);
3874     tcg_gen_andi_tl(lo2, lo2, omask);
3875 
3876     amask = address_mask_i(dc, -8);
3877     tcg_gen_andi_tl(s1, s1, amask);
3878     tcg_gen_andi_tl(s2, s2, amask);
3879 
3880     /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3881     tcg_gen_and_tl(lo2, lo2, lo1);
3882     tcg_gen_movcond_tl(TCG_COND_EQ, dst, s1, s2, lo1, lo2);
3883 
3884     gen_store_gpr(dc, a->rd, dst);
3885     return advance_pc(dc);
3886 }
3887 
3888 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3889 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3890 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3891 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3892 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3893 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3894 
3895 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3896 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3897 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3898 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3899 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3900 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3901 
3902 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3903                    void (*func)(TCGv, TCGv, TCGv))
3904 {
3905     TCGv dst = gen_dest_gpr(dc, a->rd);
3906     TCGv src1 = gen_load_gpr(dc, a->rs1);
3907     TCGv src2 = gen_load_gpr(dc, a->rs2);
3908 
3909     func(dst, src1, src2);
3910     gen_store_gpr(dc, a->rd, dst);
3911     return advance_pc(dc);
3912 }
3913 
3914 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3915 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3916 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3917 
3918 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
3919 {
3920 #ifdef TARGET_SPARC64
3921     TCGv tmp = tcg_temp_new();
3922 
3923     tcg_gen_add_tl(tmp, s1, s2);
3924     tcg_gen_andi_tl(dst, tmp, -8);
3925     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3926 #else
3927     g_assert_not_reached();
3928 #endif
3929 }
3930 
3931 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
3932 {
3933 #ifdef TARGET_SPARC64
3934     TCGv tmp = tcg_temp_new();
3935 
3936     tcg_gen_add_tl(tmp, s1, s2);
3937     tcg_gen_andi_tl(dst, tmp, -8);
3938     tcg_gen_neg_tl(tmp, tmp);
3939     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
3940 #else
3941     g_assert_not_reached();
3942 #endif
3943 }
3944 
3945 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
3946 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
3947 
3948 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
3949 {
3950 #ifdef TARGET_SPARC64
3951     tcg_gen_add_tl(dst, s1, s2);
3952     tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
3953 #else
3954     g_assert_not_reached();
3955 #endif
3956 }
3957 
3958 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
3959 
3960 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
3961 {
3962     TCGv dst, src1, src2;
3963 
3964     /* Reject 64-bit shifts for sparc32. */
3965     if (avail_32(dc) && a->x) {
3966         return false;
3967     }
3968 
3969     src2 = tcg_temp_new();
3970     tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
3971     src1 = gen_load_gpr(dc, a->rs1);
3972     dst = gen_dest_gpr(dc, a->rd);
3973 
3974     if (l) {
3975         tcg_gen_shl_tl(dst, src1, src2);
3976         if (!a->x) {
3977             tcg_gen_ext32u_tl(dst, dst);
3978         }
3979     } else if (u) {
3980         if (!a->x) {
3981             tcg_gen_ext32u_tl(dst, src1);
3982             src1 = dst;
3983         }
3984         tcg_gen_shr_tl(dst, src1, src2);
3985     } else {
3986         if (!a->x) {
3987             tcg_gen_ext32s_tl(dst, src1);
3988             src1 = dst;
3989         }
3990         tcg_gen_sar_tl(dst, src1, src2);
3991     }
3992     gen_store_gpr(dc, a->rd, dst);
3993     return advance_pc(dc);
3994 }
3995 
3996 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
3997 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
3998 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
3999 
4000 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4001 {
4002     TCGv dst, src1;
4003 
4004     /* Reject 64-bit shifts for sparc32. */
4005     if (avail_32(dc) && (a->x || a->i >= 32)) {
4006         return false;
4007     }
4008 
4009     src1 = gen_load_gpr(dc, a->rs1);
4010     dst = gen_dest_gpr(dc, a->rd);
4011 
4012     if (avail_32(dc) || a->x) {
4013         if (l) {
4014             tcg_gen_shli_tl(dst, src1, a->i);
4015         } else if (u) {
4016             tcg_gen_shri_tl(dst, src1, a->i);
4017         } else {
4018             tcg_gen_sari_tl(dst, src1, a->i);
4019         }
4020     } else {
4021         if (l) {
4022             tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4023         } else if (u) {
4024             tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4025         } else {
4026             tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4027         }
4028     }
4029     gen_store_gpr(dc, a->rd, dst);
4030     return advance_pc(dc);
4031 }
4032 
4033 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4034 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4035 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4036 
4037 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4038 {
4039     /* For simplicity, we under-decoded the rs2 form. */
4040     if (!imm && rs2_or_imm & ~0x1f) {
4041         return NULL;
4042     }
4043     if (imm || rs2_or_imm == 0) {
4044         return tcg_constant_tl(rs2_or_imm);
4045     } else {
4046         return cpu_regs[rs2_or_imm];
4047     }
4048 }
4049 
4050 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4051 {
4052     TCGv dst = gen_load_gpr(dc, rd);
4053 
4054     tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, cmp->c2, src2, dst);
4055     gen_store_gpr(dc, rd, dst);
4056     return advance_pc(dc);
4057 }
4058 
4059 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4060 {
4061     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4062     DisasCompare cmp;
4063 
4064     if (src2 == NULL) {
4065         return false;
4066     }
4067     gen_compare(&cmp, a->cc, a->cond, dc);
4068     return do_mov_cond(dc, &cmp, a->rd, src2);
4069 }
4070 
4071 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4072 {
4073     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4074     DisasCompare cmp;
4075 
4076     if (src2 == NULL) {
4077         return false;
4078     }
4079     gen_fcompare(&cmp, a->cc, a->cond);
4080     return do_mov_cond(dc, &cmp, a->rd, src2);
4081 }
4082 
4083 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4084 {
4085     TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4086     DisasCompare cmp;
4087 
4088     if (src2 == NULL) {
4089         return false;
4090     }
4091     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
4092     return do_mov_cond(dc, &cmp, a->rd, src2);
4093 }
4094 
4095 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4096                            bool (*func)(DisasContext *dc, int rd, TCGv src))
4097 {
4098     TCGv src1, sum;
4099 
4100     /* For simplicity, we under-decoded the rs2 form. */
4101     if (!a->imm && a->rs2_or_imm & ~0x1f) {
4102         return false;
4103     }
4104 
4105     /*
4106      * Always load the sum into a new temporary.
4107      * This is required to capture the value across a window change,
4108      * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4109      */
4110     sum = tcg_temp_new();
4111     src1 = gen_load_gpr(dc, a->rs1);
4112     if (a->imm || a->rs2_or_imm == 0) {
4113         tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4114     } else {
4115         tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4116     }
4117     return func(dc, a->rd, sum);
4118 }
4119 
4120 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4121 {
4122     /*
4123      * Preserve pc across advance, so that we can delay
4124      * the writeback to rd until after src is consumed.
4125      */
4126     target_ulong cur_pc = dc->pc;
4127 
4128     gen_check_align(dc, src, 3);
4129 
4130     gen_mov_pc_npc(dc);
4131     tcg_gen_mov_tl(cpu_npc, src);
4132     gen_address_mask(dc, cpu_npc);
4133     gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4134 
4135     dc->npc = DYNAMIC_PC_LOOKUP;
4136     return true;
4137 }
4138 
4139 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4140 
4141 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4142 {
4143     if (!supervisor(dc)) {
4144         return raise_priv(dc);
4145     }
4146 
4147     gen_check_align(dc, src, 3);
4148 
4149     gen_mov_pc_npc(dc);
4150     tcg_gen_mov_tl(cpu_npc, src);
4151     gen_helper_rett(tcg_env);
4152 
4153     dc->npc = DYNAMIC_PC;
4154     return true;
4155 }
4156 
4157 TRANS(RETT, 32, do_add_special, a, do_rett)
4158 
4159 static bool do_return(DisasContext *dc, int rd, TCGv src)
4160 {
4161     gen_check_align(dc, src, 3);
4162 
4163     gen_mov_pc_npc(dc);
4164     tcg_gen_mov_tl(cpu_npc, src);
4165     gen_address_mask(dc, cpu_npc);
4166 
4167     gen_helper_restore(tcg_env);
4168     dc->npc = DYNAMIC_PC_LOOKUP;
4169     return true;
4170 }
4171 
4172 TRANS(RETURN, 64, do_add_special, a, do_return)
4173 
4174 static bool do_save(DisasContext *dc, int rd, TCGv src)
4175 {
4176     gen_helper_save(tcg_env);
4177     gen_store_gpr(dc, rd, src);
4178     return advance_pc(dc);
4179 }
4180 
4181 TRANS(SAVE, ALL, do_add_special, a, do_save)
4182 
4183 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4184 {
4185     gen_helper_restore(tcg_env);
4186     gen_store_gpr(dc, rd, src);
4187     return advance_pc(dc);
4188 }
4189 
4190 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4191 
4192 static bool do_done_retry(DisasContext *dc, bool done)
4193 {
4194     if (!supervisor(dc)) {
4195         return raise_priv(dc);
4196     }
4197     dc->npc = DYNAMIC_PC;
4198     dc->pc = DYNAMIC_PC;
4199     translator_io_start(&dc->base);
4200     if (done) {
4201         gen_helper_done(tcg_env);
4202     } else {
4203         gen_helper_retry(tcg_env);
4204     }
4205     return true;
4206 }
4207 
4208 TRANS(DONE, 64, do_done_retry, true)
4209 TRANS(RETRY, 64, do_done_retry, false)
4210 
4211 /*
4212  * Major opcode 11 -- load and store instructions
4213  */
4214 
4215 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4216 {
4217     TCGv addr, tmp = NULL;
4218 
4219     /* For simplicity, we under-decoded the rs2 form. */
4220     if (!imm && rs2_or_imm & ~0x1f) {
4221         return NULL;
4222     }
4223 
4224     addr = gen_load_gpr(dc, rs1);
4225     if (rs2_or_imm) {
4226         tmp = tcg_temp_new();
4227         if (imm) {
4228             tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4229         } else {
4230             tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4231         }
4232         addr = tmp;
4233     }
4234     if (AM_CHECK(dc)) {
4235         if (!tmp) {
4236             tmp = tcg_temp_new();
4237         }
4238         tcg_gen_ext32u_tl(tmp, addr);
4239         addr = tmp;
4240     }
4241     return addr;
4242 }
4243 
4244 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4245 {
4246     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4247     DisasASI da;
4248 
4249     if (addr == NULL) {
4250         return false;
4251     }
4252     da = resolve_asi(dc, a->asi, mop);
4253 
4254     reg = gen_dest_gpr(dc, a->rd);
4255     gen_ld_asi(dc, &da, reg, addr);
4256     gen_store_gpr(dc, a->rd, reg);
4257     return advance_pc(dc);
4258 }
4259 
4260 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4261 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4262 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4263 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4264 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4265 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4266 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4267 
4268 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4269 {
4270     TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4271     DisasASI da;
4272 
4273     if (addr == NULL) {
4274         return false;
4275     }
4276     da = resolve_asi(dc, a->asi, mop);
4277 
4278     reg = gen_load_gpr(dc, a->rd);
4279     gen_st_asi(dc, &da, reg, addr);
4280     return advance_pc(dc);
4281 }
4282 
4283 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4284 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4285 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4286 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4287 
4288 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4289 {
4290     TCGv addr;
4291     DisasASI da;
4292 
4293     if (a->rd & 1) {
4294         return false;
4295     }
4296     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4297     if (addr == NULL) {
4298         return false;
4299     }
4300     da = resolve_asi(dc, a->asi, MO_TEUQ);
4301     gen_ldda_asi(dc, &da, addr, a->rd);
4302     return advance_pc(dc);
4303 }
4304 
4305 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4306 {
4307     TCGv addr;
4308     DisasASI da;
4309 
4310     if (a->rd & 1) {
4311         return false;
4312     }
4313     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4314     if (addr == NULL) {
4315         return false;
4316     }
4317     da = resolve_asi(dc, a->asi, MO_TEUQ);
4318     gen_stda_asi(dc, &da, addr, a->rd);
4319     return advance_pc(dc);
4320 }
4321 
4322 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4323 {
4324     TCGv addr, reg;
4325     DisasASI da;
4326 
4327     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4328     if (addr == NULL) {
4329         return false;
4330     }
4331     da = resolve_asi(dc, a->asi, MO_UB);
4332 
4333     reg = gen_dest_gpr(dc, a->rd);
4334     gen_ldstub_asi(dc, &da, reg, addr);
4335     gen_store_gpr(dc, a->rd, reg);
4336     return advance_pc(dc);
4337 }
4338 
4339 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4340 {
4341     TCGv addr, dst, src;
4342     DisasASI da;
4343 
4344     addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4345     if (addr == NULL) {
4346         return false;
4347     }
4348     da = resolve_asi(dc, a->asi, MO_TEUL);
4349 
4350     dst = gen_dest_gpr(dc, a->rd);
4351     src = gen_load_gpr(dc, a->rd);
4352     gen_swap_asi(dc, &da, dst, src, addr);
4353     gen_store_gpr(dc, a->rd, dst);
4354     return advance_pc(dc);
4355 }
4356 
4357 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4358 {
4359     TCGv addr, o, n, c;
4360     DisasASI da;
4361 
4362     addr = gen_ldst_addr(dc, a->rs1, true, 0);
4363     if (addr == NULL) {
4364         return false;
4365     }
4366     da = resolve_asi(dc, a->asi, mop);
4367 
4368     o = gen_dest_gpr(dc, a->rd);
4369     n = gen_load_gpr(dc, a->rd);
4370     c = gen_load_gpr(dc, a->rs2_or_imm);
4371     gen_cas_asi(dc, &da, o, n, c, addr);
4372     gen_store_gpr(dc, a->rd, o);
4373     return advance_pc(dc);
4374 }
4375 
4376 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4377 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4378 
4379 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4380 {
4381     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4382     DisasASI da;
4383 
4384     if (addr == NULL) {
4385         return false;
4386     }
4387     if (gen_trap_ifnofpu(dc)) {
4388         return true;
4389     }
4390     if (sz == MO_128 && gen_trap_float128(dc)) {
4391         return true;
4392     }
4393     da = resolve_asi(dc, a->asi, MO_TE | sz);
4394     gen_ldf_asi(dc, &da, sz, addr, a->rd);
4395     gen_update_fprs_dirty(dc, a->rd);
4396     return advance_pc(dc);
4397 }
4398 
4399 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4400 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4401 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4402 
4403 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4404 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4405 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4406 
4407 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4408 {
4409     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4410     DisasASI da;
4411 
4412     if (addr == NULL) {
4413         return false;
4414     }
4415     if (gen_trap_ifnofpu(dc)) {
4416         return true;
4417     }
4418     if (sz == MO_128 && gen_trap_float128(dc)) {
4419         return true;
4420     }
4421     da = resolve_asi(dc, a->asi, MO_TE | sz);
4422     gen_stf_asi(dc, &da, sz, addr, a->rd);
4423     return advance_pc(dc);
4424 }
4425 
4426 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4427 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4428 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4429 
4430 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4431 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4432 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4433 
4434 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4435 {
4436     if (!avail_32(dc)) {
4437         return false;
4438     }
4439     if (!supervisor(dc)) {
4440         return raise_priv(dc);
4441     }
4442     if (gen_trap_ifnofpu(dc)) {
4443         return true;
4444     }
4445     gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4446     return true;
4447 }
4448 
4449 static bool do_ldfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop,
4450                      target_ulong new_mask, target_ulong old_mask)
4451 {
4452     TCGv tmp, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4453     if (addr == NULL) {
4454         return false;
4455     }
4456     if (gen_trap_ifnofpu(dc)) {
4457         return true;
4458     }
4459     tmp = tcg_temp_new();
4460     tcg_gen_qemu_ld_tl(tmp, addr, dc->mem_idx, mop | MO_ALIGN);
4461     tcg_gen_andi_tl(tmp, tmp, new_mask);
4462     tcg_gen_andi_tl(cpu_fsr, cpu_fsr, old_mask);
4463     tcg_gen_or_tl(cpu_fsr, cpu_fsr, tmp);
4464     gen_helper_set_fsr(tcg_env, cpu_fsr);
4465     return advance_pc(dc);
4466 }
4467 
4468 TRANS(LDFSR, ALL, do_ldfsr, a, MO_TEUL, FSR_LDFSR_MASK, FSR_LDFSR_OLDMASK)
4469 TRANS(LDXFSR, 64, do_ldfsr, a, MO_TEUQ, FSR_LDXFSR_MASK, FSR_LDXFSR_OLDMASK)
4470 
4471 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4472 {
4473     TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4474     if (addr == NULL) {
4475         return false;
4476     }
4477     if (gen_trap_ifnofpu(dc)) {
4478         return true;
4479     }
4480     tcg_gen_qemu_st_tl(cpu_fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4481     return advance_pc(dc);
4482 }
4483 
4484 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4485 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4486 
4487 static bool do_fc(DisasContext *dc, int rd, bool c)
4488 {
4489     uint64_t mask;
4490 
4491     if (gen_trap_ifnofpu(dc)) {
4492         return true;
4493     }
4494 
4495     if (rd & 1) {
4496         mask = MAKE_64BIT_MASK(0, 32);
4497     } else {
4498         mask = MAKE_64BIT_MASK(32, 32);
4499     }
4500     if (c) {
4501         tcg_gen_ori_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], mask);
4502     } else {
4503         tcg_gen_andi_i64(cpu_fpr[rd / 2], cpu_fpr[rd / 2], ~mask);
4504     }
4505     gen_update_fprs_dirty(dc, rd);
4506     return advance_pc(dc);
4507 }
4508 
4509 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4510 TRANS(FONEs, VIS1, do_fc, a->rd, 1)
4511 
4512 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4513 {
4514     if (gen_trap_ifnofpu(dc)) {
4515         return true;
4516     }
4517 
4518     tcg_gen_movi_i64(cpu_fpr[rd / 2], c);
4519     gen_update_fprs_dirty(dc, rd);
4520     return advance_pc(dc);
4521 }
4522 
4523 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4524 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4525 
4526 static bool do_ff(DisasContext *dc, arg_r_r *a,
4527                   void (*func)(TCGv_i32, TCGv_i32))
4528 {
4529     TCGv_i32 tmp;
4530 
4531     if (gen_trap_ifnofpu(dc)) {
4532         return true;
4533     }
4534 
4535     tmp = gen_load_fpr_F(dc, a->rs);
4536     func(tmp, tmp);
4537     gen_store_fpr_F(dc, a->rd, tmp);
4538     return advance_pc(dc);
4539 }
4540 
4541 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4542 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4543 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4544 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4545 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4546 
4547 static bool do_fd(DisasContext *dc, arg_r_r *a,
4548                   void (*func)(TCGv_i32, TCGv_i64))
4549 {
4550     TCGv_i32 dst;
4551     TCGv_i64 src;
4552 
4553     if (gen_trap_ifnofpu(dc)) {
4554         return true;
4555     }
4556 
4557     dst = gen_dest_fpr_F(dc);
4558     src = gen_load_fpr_D(dc, a->rs);
4559     func(dst, src);
4560     gen_store_fpr_F(dc, a->rd, dst);
4561     return advance_pc(dc);
4562 }
4563 
4564 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4565 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4566 
4567 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4568                       void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4569 {
4570     TCGv_i32 tmp;
4571 
4572     if (gen_trap_ifnofpu(dc)) {
4573         return true;
4574     }
4575 
4576     gen_op_clear_ieee_excp_and_FTT();
4577     tmp = gen_load_fpr_F(dc, a->rs);
4578     func(tmp, tcg_env, tmp);
4579     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4580     gen_store_fpr_F(dc, a->rd, tmp);
4581     return advance_pc(dc);
4582 }
4583 
4584 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4585 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4586 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4587 
4588 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4589                       void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4590 {
4591     TCGv_i32 dst;
4592     TCGv_i64 src;
4593 
4594     if (gen_trap_ifnofpu(dc)) {
4595         return true;
4596     }
4597 
4598     gen_op_clear_ieee_excp_and_FTT();
4599     dst = gen_dest_fpr_F(dc);
4600     src = gen_load_fpr_D(dc, a->rs);
4601     func(dst, tcg_env, src);
4602     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4603     gen_store_fpr_F(dc, a->rd, dst);
4604     return advance_pc(dc);
4605 }
4606 
4607 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4608 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4609 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4610 
4611 static bool do_dd(DisasContext *dc, arg_r_r *a,
4612                   void (*func)(TCGv_i64, TCGv_i64))
4613 {
4614     TCGv_i64 dst, src;
4615 
4616     if (gen_trap_ifnofpu(dc)) {
4617         return true;
4618     }
4619 
4620     dst = gen_dest_fpr_D(dc, a->rd);
4621     src = gen_load_fpr_D(dc, a->rs);
4622     func(dst, src);
4623     gen_store_fpr_D(dc, a->rd, dst);
4624     return advance_pc(dc);
4625 }
4626 
4627 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4628 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4629 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4630 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4631 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4632 
4633 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4634                       void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4635 {
4636     TCGv_i64 dst, src;
4637 
4638     if (gen_trap_ifnofpu(dc)) {
4639         return true;
4640     }
4641 
4642     gen_op_clear_ieee_excp_and_FTT();
4643     dst = gen_dest_fpr_D(dc, a->rd);
4644     src = gen_load_fpr_D(dc, a->rs);
4645     func(dst, tcg_env, src);
4646     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4647     gen_store_fpr_D(dc, a->rd, dst);
4648     return advance_pc(dc);
4649 }
4650 
4651 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4652 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4653 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4654 
4655 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4656                       void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4657 {
4658     TCGv_i64 dst;
4659     TCGv_i32 src;
4660 
4661     if (gen_trap_ifnofpu(dc)) {
4662         return true;
4663     }
4664 
4665     gen_op_clear_ieee_excp_and_FTT();
4666     dst = gen_dest_fpr_D(dc, a->rd);
4667     src = gen_load_fpr_F(dc, a->rs);
4668     func(dst, tcg_env, src);
4669     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4670     gen_store_fpr_D(dc, a->rd, dst);
4671     return advance_pc(dc);
4672 }
4673 
4674 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4675 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4676 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4677 
4678 static bool trans_FMOVq(DisasContext *dc, arg_FMOVq *a)
4679 {
4680     int rd, rs;
4681 
4682     if (!avail_64(dc)) {
4683         return false;
4684     }
4685     if (gen_trap_ifnofpu(dc)) {
4686         return true;
4687     }
4688     if (gen_trap_float128(dc)) {
4689         return true;
4690     }
4691 
4692     gen_op_clear_ieee_excp_and_FTT();
4693     rd = QFPREG(a->rd);
4694     rs = QFPREG(a->rs);
4695     tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
4696     tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
4697     gen_update_fprs_dirty(dc, rd);
4698     return advance_pc(dc);
4699 }
4700 
4701 static bool do_qq(DisasContext *dc, arg_r_r *a,
4702                   void (*func)(TCGv_env))
4703 {
4704     if (gen_trap_ifnofpu(dc)) {
4705         return true;
4706     }
4707     if (gen_trap_float128(dc)) {
4708         return true;
4709     }
4710 
4711     gen_op_clear_ieee_excp_and_FTT();
4712     gen_op_load_fpr_QT1(QFPREG(a->rs));
4713     func(tcg_env);
4714     gen_op_store_QT0_fpr(QFPREG(a->rd));
4715     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4716     return advance_pc(dc);
4717 }
4718 
4719 TRANS(FNEGq, 64, do_qq, a, gen_helper_fnegq)
4720 TRANS(FABSq, 64, do_qq, a, gen_helper_fabsq)
4721 
4722 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4723                        void (*func)(TCGv_env))
4724 {
4725     if (gen_trap_ifnofpu(dc)) {
4726         return true;
4727     }
4728     if (gen_trap_float128(dc)) {
4729         return true;
4730     }
4731 
4732     gen_op_clear_ieee_excp_and_FTT();
4733     gen_op_load_fpr_QT1(QFPREG(a->rs));
4734     func(tcg_env);
4735     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4736     gen_op_store_QT0_fpr(QFPREG(a->rd));
4737     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4738     return advance_pc(dc);
4739 }
4740 
4741 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4742 
4743 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4744                       void (*func)(TCGv_i32, TCGv_env))
4745 {
4746     TCGv_i32 dst;
4747 
4748     if (gen_trap_ifnofpu(dc)) {
4749         return true;
4750     }
4751     if (gen_trap_float128(dc)) {
4752         return true;
4753     }
4754 
4755     gen_op_clear_ieee_excp_and_FTT();
4756     gen_op_load_fpr_QT1(QFPREG(a->rs));
4757     dst = gen_dest_fpr_F(dc);
4758     func(dst, tcg_env);
4759     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4760     gen_store_fpr_F(dc, a->rd, dst);
4761     return advance_pc(dc);
4762 }
4763 
4764 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4765 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4766 
4767 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4768                       void (*func)(TCGv_i64, TCGv_env))
4769 {
4770     TCGv_i64 dst;
4771 
4772     if (gen_trap_ifnofpu(dc)) {
4773         return true;
4774     }
4775     if (gen_trap_float128(dc)) {
4776         return true;
4777     }
4778 
4779     gen_op_clear_ieee_excp_and_FTT();
4780     gen_op_load_fpr_QT1(QFPREG(a->rs));
4781     dst = gen_dest_fpr_D(dc, a->rd);
4782     func(dst, tcg_env);
4783     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4784     gen_store_fpr_D(dc, a->rd, dst);
4785     return advance_pc(dc);
4786 }
4787 
4788 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4789 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4790 
4791 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4792                       void (*func)(TCGv_env, TCGv_i32))
4793 {
4794     TCGv_i32 src;
4795 
4796     if (gen_trap_ifnofpu(dc)) {
4797         return true;
4798     }
4799     if (gen_trap_float128(dc)) {
4800         return true;
4801     }
4802 
4803     gen_op_clear_ieee_excp_and_FTT();
4804     src = gen_load_fpr_F(dc, a->rs);
4805     func(tcg_env, src);
4806     gen_op_store_QT0_fpr(QFPREG(a->rd));
4807     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4808     return advance_pc(dc);
4809 }
4810 
4811 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4812 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4813 
4814 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4815                       void (*func)(TCGv_env, TCGv_i64))
4816 {
4817     TCGv_i64 src;
4818 
4819     if (gen_trap_ifnofpu(dc)) {
4820         return true;
4821     }
4822     if (gen_trap_float128(dc)) {
4823         return true;
4824     }
4825 
4826     gen_op_clear_ieee_excp_and_FTT();
4827     src = gen_load_fpr_D(dc, a->rs);
4828     func(tcg_env, src);
4829     gen_op_store_QT0_fpr(QFPREG(a->rd));
4830     gen_update_fprs_dirty(dc, QFPREG(a->rd));
4831     return advance_pc(dc);
4832 }
4833 
4834 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4835 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4836 
4837 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4838                    void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4839 {
4840     TCGv_i32 src1, src2;
4841 
4842     if (gen_trap_ifnofpu(dc)) {
4843         return true;
4844     }
4845 
4846     src1 = gen_load_fpr_F(dc, a->rs1);
4847     src2 = gen_load_fpr_F(dc, a->rs2);
4848     func(src1, src1, src2);
4849     gen_store_fpr_F(dc, a->rd, src1);
4850     return advance_pc(dc);
4851 }
4852 
4853 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4854 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4855 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4856 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4857 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4858 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4859 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4860 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4861 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4862 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4863 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4864 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4865 
4866 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4867                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4868 {
4869     TCGv_i32 src1, src2;
4870 
4871     if (gen_trap_ifnofpu(dc)) {
4872         return true;
4873     }
4874 
4875     gen_op_clear_ieee_excp_and_FTT();
4876     src1 = gen_load_fpr_F(dc, a->rs1);
4877     src2 = gen_load_fpr_F(dc, a->rs2);
4878     func(src1, tcg_env, src1, src2);
4879     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4880     gen_store_fpr_F(dc, a->rd, src1);
4881     return advance_pc(dc);
4882 }
4883 
4884 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
4885 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
4886 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
4887 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
4888 
4889 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
4890                    void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
4891 {
4892     TCGv_i64 dst, src1, src2;
4893 
4894     if (gen_trap_ifnofpu(dc)) {
4895         return true;
4896     }
4897 
4898     dst = gen_dest_fpr_D(dc, a->rd);
4899     src1 = gen_load_fpr_D(dc, a->rs1);
4900     src2 = gen_load_fpr_D(dc, a->rs2);
4901     func(dst, src1, src2);
4902     gen_store_fpr_D(dc, a->rd, dst);
4903     return advance_pc(dc);
4904 }
4905 
4906 TRANS(FMUL8x16, VIS1, do_ddd, a, gen_helper_fmul8x16)
4907 TRANS(FMUL8x16AU, VIS1, do_ddd, a, gen_helper_fmul8x16au)
4908 TRANS(FMUL8x16AL, VIS1, do_ddd, a, gen_helper_fmul8x16al)
4909 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
4910 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
4911 TRANS(FMULD8SUx16, VIS1, do_ddd, a, gen_helper_fmuld8sux16)
4912 TRANS(FMULD8ULx16, VIS1, do_ddd, a, gen_helper_fmuld8ulx16)
4913 TRANS(FPMERGE, VIS1, do_ddd, a, gen_helper_fpmerge)
4914 TRANS(FEXPAND, VIS1, do_ddd, a, gen_helper_fexpand)
4915 
4916 TRANS(FPADD16, VIS1, do_ddd, a, tcg_gen_vec_add16_i64)
4917 TRANS(FPADD32, VIS1, do_ddd, a, tcg_gen_vec_add32_i64)
4918 TRANS(FPSUB16, VIS1, do_ddd, a, tcg_gen_vec_sub16_i64)
4919 TRANS(FPSUB32, VIS1, do_ddd, a, tcg_gen_vec_sub32_i64)
4920 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
4921 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
4922 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
4923 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
4924 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
4925 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
4926 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
4927 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
4928 
4929 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
4930 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata)
4931 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
4932 
4933 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
4934                    void (*func)(TCGv, TCGv_i64, TCGv_i64))
4935 {
4936     TCGv_i64 src1, src2;
4937     TCGv dst;
4938 
4939     if (gen_trap_ifnofpu(dc)) {
4940         return true;
4941     }
4942 
4943     dst = gen_dest_gpr(dc, a->rd);
4944     src1 = gen_load_fpr_D(dc, a->rs1);
4945     src2 = gen_load_fpr_D(dc, a->rs2);
4946     func(dst, src1, src2);
4947     gen_store_gpr(dc, a->rd, dst);
4948     return advance_pc(dc);
4949 }
4950 
4951 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
4952 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
4953 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
4954 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
4955 
4956 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
4957 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
4958 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
4959 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
4960 
4961 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
4962                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
4963 {
4964     TCGv_i64 dst, src1, src2;
4965 
4966     if (gen_trap_ifnofpu(dc)) {
4967         return true;
4968     }
4969 
4970     gen_op_clear_ieee_excp_and_FTT();
4971     dst = gen_dest_fpr_D(dc, a->rd);
4972     src1 = gen_load_fpr_D(dc, a->rs1);
4973     src2 = gen_load_fpr_D(dc, a->rs2);
4974     func(dst, tcg_env, src1, src2);
4975     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
4976     gen_store_fpr_D(dc, a->rd, dst);
4977     return advance_pc(dc);
4978 }
4979 
4980 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
4981 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
4982 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
4983 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
4984 
4985 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
4986 {
4987     TCGv_i64 dst;
4988     TCGv_i32 src1, src2;
4989 
4990     if (gen_trap_ifnofpu(dc)) {
4991         return true;
4992     }
4993     if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
4994         return raise_unimpfpop(dc);
4995     }
4996 
4997     gen_op_clear_ieee_excp_and_FTT();
4998     dst = gen_dest_fpr_D(dc, a->rd);
4999     src1 = gen_load_fpr_F(dc, a->rs1);
5000     src2 = gen_load_fpr_F(dc, a->rs2);
5001     gen_helper_fsmuld(dst, tcg_env, src1, src2);
5002     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5003     gen_store_fpr_D(dc, a->rd, dst);
5004     return advance_pc(dc);
5005 }
5006 
5007 static bool do_dddd(DisasContext *dc, arg_r_r_r *a,
5008                     void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5009 {
5010     TCGv_i64 dst, src0, src1, src2;
5011 
5012     if (gen_trap_ifnofpu(dc)) {
5013         return true;
5014     }
5015 
5016     dst  = gen_dest_fpr_D(dc, a->rd);
5017     src0 = gen_load_fpr_D(dc, a->rd);
5018     src1 = gen_load_fpr_D(dc, a->rs1);
5019     src2 = gen_load_fpr_D(dc, a->rs2);
5020     func(dst, src0, src1, src2);
5021     gen_store_fpr_D(dc, a->rd, dst);
5022     return advance_pc(dc);
5023 }
5024 
5025 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5026 
5027 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5028                        void (*func)(TCGv_env))
5029 {
5030     if (gen_trap_ifnofpu(dc)) {
5031         return true;
5032     }
5033     if (gen_trap_float128(dc)) {
5034         return true;
5035     }
5036 
5037     gen_op_clear_ieee_excp_and_FTT();
5038     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5039     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5040     func(tcg_env);
5041     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5042     gen_op_store_QT0_fpr(QFPREG(a->rd));
5043     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5044     return advance_pc(dc);
5045 }
5046 
5047 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5048 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5049 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5050 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5051 
5052 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5053 {
5054     TCGv_i64 src1, src2;
5055 
5056     if (gen_trap_ifnofpu(dc)) {
5057         return true;
5058     }
5059     if (gen_trap_float128(dc)) {
5060         return true;
5061     }
5062 
5063     gen_op_clear_ieee_excp_and_FTT();
5064     src1 = gen_load_fpr_D(dc, a->rs1);
5065     src2 = gen_load_fpr_D(dc, a->rs2);
5066     gen_helper_fdmulq(tcg_env, src1, src2);
5067     gen_helper_check_ieee_exceptions(cpu_fsr, tcg_env);
5068     gen_op_store_QT0_fpr(QFPREG(a->rd));
5069     gen_update_fprs_dirty(dc, QFPREG(a->rd));
5070     return advance_pc(dc);
5071 }
5072 
5073 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5074                      void (*func)(DisasContext *, DisasCompare *, int, int))
5075 {
5076     DisasCompare cmp;
5077 
5078     if (gen_trap_ifnofpu(dc)) {
5079         return true;
5080     }
5081     if (is_128 && gen_trap_float128(dc)) {
5082         return true;
5083     }
5084 
5085     gen_op_clear_ieee_excp_and_FTT();
5086     gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1));
5087     func(dc, &cmp, a->rd, a->rs2);
5088     return advance_pc(dc);
5089 }
5090 
5091 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5092 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5093 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5094 
5095 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5096                       void (*func)(DisasContext *, DisasCompare *, int, int))
5097 {
5098     DisasCompare cmp;
5099 
5100     if (gen_trap_ifnofpu(dc)) {
5101         return true;
5102     }
5103     if (is_128 && gen_trap_float128(dc)) {
5104         return true;
5105     }
5106 
5107     gen_op_clear_ieee_excp_and_FTT();
5108     gen_compare(&cmp, a->cc, a->cond, dc);
5109     func(dc, &cmp, a->rd, a->rs2);
5110     return advance_pc(dc);
5111 }
5112 
5113 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5114 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5115 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5116 
5117 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5118                        void (*func)(DisasContext *, DisasCompare *, int, int))
5119 {
5120     DisasCompare cmp;
5121 
5122     if (gen_trap_ifnofpu(dc)) {
5123         return true;
5124     }
5125     if (is_128 && gen_trap_float128(dc)) {
5126         return true;
5127     }
5128 
5129     gen_op_clear_ieee_excp_and_FTT();
5130     gen_fcompare(&cmp, a->cc, a->cond);
5131     func(dc, &cmp, a->rd, a->rs2);
5132     return advance_pc(dc);
5133 }
5134 
5135 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5136 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5137 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5138 
5139 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5140 {
5141     TCGv_i32 src1, src2;
5142 
5143     if (avail_32(dc) && a->cc != 0) {
5144         return false;
5145     }
5146     if (gen_trap_ifnofpu(dc)) {
5147         return true;
5148     }
5149 
5150     gen_op_clear_ieee_excp_and_FTT();
5151     src1 = gen_load_fpr_F(dc, a->rs1);
5152     src2 = gen_load_fpr_F(dc, a->rs2);
5153     if (e) {
5154         gen_op_fcmpes(a->cc, src1, src2);
5155     } else {
5156         gen_op_fcmps(a->cc, src1, src2);
5157     }
5158     return advance_pc(dc);
5159 }
5160 
5161 TRANS(FCMPs, ALL, do_fcmps, a, false)
5162 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5163 
5164 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5165 {
5166     TCGv_i64 src1, src2;
5167 
5168     if (avail_32(dc) && a->cc != 0) {
5169         return false;
5170     }
5171     if (gen_trap_ifnofpu(dc)) {
5172         return true;
5173     }
5174 
5175     gen_op_clear_ieee_excp_and_FTT();
5176     src1 = gen_load_fpr_D(dc, a->rs1);
5177     src2 = gen_load_fpr_D(dc, a->rs2);
5178     if (e) {
5179         gen_op_fcmped(a->cc, src1, src2);
5180     } else {
5181         gen_op_fcmpd(a->cc, src1, src2);
5182     }
5183     return advance_pc(dc);
5184 }
5185 
5186 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5187 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5188 
5189 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5190 {
5191     if (avail_32(dc) && a->cc != 0) {
5192         return false;
5193     }
5194     if (gen_trap_ifnofpu(dc)) {
5195         return true;
5196     }
5197     if (gen_trap_float128(dc)) {
5198         return true;
5199     }
5200 
5201     gen_op_clear_ieee_excp_and_FTT();
5202     gen_op_load_fpr_QT0(QFPREG(a->rs1));
5203     gen_op_load_fpr_QT1(QFPREG(a->rs2));
5204     if (e) {
5205         gen_op_fcmpeq(a->cc);
5206     } else {
5207         gen_op_fcmpq(a->cc);
5208     }
5209     return advance_pc(dc);
5210 }
5211 
5212 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5213 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5214 
5215 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5216 {
5217     DisasContext *dc = container_of(dcbase, DisasContext, base);
5218     CPUSPARCState *env = cpu_env(cs);
5219     int bound;
5220 
5221     dc->pc = dc->base.pc_first;
5222     dc->npc = (target_ulong)dc->base.tb->cs_base;
5223     dc->cc_op = CC_OP_DYNAMIC;
5224     dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5225     dc->def = &env->def;
5226     dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5227     dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5228 #ifndef CONFIG_USER_ONLY
5229     dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5230 #endif
5231 #ifdef TARGET_SPARC64
5232     dc->fprs_dirty = 0;
5233     dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5234 #ifndef CONFIG_USER_ONLY
5235     dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5236 #endif
5237 #endif
5238     /*
5239      * if we reach a page boundary, we stop generation so that the
5240      * PC of a TT_TFAULT exception is always in the right page
5241      */
5242     bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5243     dc->base.max_insns = MIN(dc->base.max_insns, bound);
5244 }
5245 
5246 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5247 {
5248 }
5249 
5250 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5251 {
5252     DisasContext *dc = container_of(dcbase, DisasContext, base);
5253     target_ulong npc = dc->npc;
5254 
5255     if (npc & 3) {
5256         switch (npc) {
5257         case JUMP_PC:
5258             assert(dc->jump_pc[1] == dc->pc + 4);
5259             npc = dc->jump_pc[0] | JUMP_PC;
5260             break;
5261         case DYNAMIC_PC:
5262         case DYNAMIC_PC_LOOKUP:
5263             npc = DYNAMIC_PC;
5264             break;
5265         default:
5266             g_assert_not_reached();
5267         }
5268     }
5269     tcg_gen_insn_start(dc->pc, npc);
5270 }
5271 
5272 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5273 {
5274     DisasContext *dc = container_of(dcbase, DisasContext, base);
5275     CPUSPARCState *env = cpu_env(cs);
5276     unsigned int insn;
5277 
5278     insn = translator_ldl(env, &dc->base, dc->pc);
5279     dc->base.pc_next += 4;
5280 
5281     if (!decode(dc, insn)) {
5282         gen_exception(dc, TT_ILL_INSN);
5283     }
5284 
5285     if (dc->base.is_jmp == DISAS_NORETURN) {
5286         return;
5287     }
5288     if (dc->pc != dc->base.pc_next) {
5289         dc->base.is_jmp = DISAS_TOO_MANY;
5290     }
5291 }
5292 
5293 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5294 {
5295     DisasContext *dc = container_of(dcbase, DisasContext, base);
5296     DisasDelayException *e, *e_next;
5297     bool may_lookup;
5298 
5299     switch (dc->base.is_jmp) {
5300     case DISAS_NEXT:
5301     case DISAS_TOO_MANY:
5302         if (((dc->pc | dc->npc) & 3) == 0) {
5303             /* static PC and NPC: we can use direct chaining */
5304             gen_goto_tb(dc, 0, dc->pc, dc->npc);
5305             break;
5306         }
5307 
5308         may_lookup = true;
5309         if (dc->pc & 3) {
5310             switch (dc->pc) {
5311             case DYNAMIC_PC_LOOKUP:
5312                 break;
5313             case DYNAMIC_PC:
5314                 may_lookup = false;
5315                 break;
5316             default:
5317                 g_assert_not_reached();
5318             }
5319         } else {
5320             tcg_gen_movi_tl(cpu_pc, dc->pc);
5321         }
5322 
5323         if (dc->npc & 3) {
5324             switch (dc->npc) {
5325             case JUMP_PC:
5326                 gen_generic_branch(dc);
5327                 break;
5328             case DYNAMIC_PC:
5329                 may_lookup = false;
5330                 break;
5331             case DYNAMIC_PC_LOOKUP:
5332                 break;
5333             default:
5334                 g_assert_not_reached();
5335             }
5336         } else {
5337             tcg_gen_movi_tl(cpu_npc, dc->npc);
5338         }
5339         if (may_lookup) {
5340             tcg_gen_lookup_and_goto_ptr();
5341         } else {
5342             tcg_gen_exit_tb(NULL, 0);
5343         }
5344         break;
5345 
5346     case DISAS_NORETURN:
5347        break;
5348 
5349     case DISAS_EXIT:
5350         /* Exit TB */
5351         save_state(dc);
5352         tcg_gen_exit_tb(NULL, 0);
5353         break;
5354 
5355     default:
5356         g_assert_not_reached();
5357     }
5358 
5359     for (e = dc->delay_excp_list; e ; e = e_next) {
5360         gen_set_label(e->lab);
5361 
5362         tcg_gen_movi_tl(cpu_pc, e->pc);
5363         if (e->npc % 4 == 0) {
5364             tcg_gen_movi_tl(cpu_npc, e->npc);
5365         }
5366         gen_helper_raise_exception(tcg_env, e->excp);
5367 
5368         e_next = e->next;
5369         g_free(e);
5370     }
5371 }
5372 
5373 static void sparc_tr_disas_log(const DisasContextBase *dcbase,
5374                                CPUState *cpu, FILE *logfile)
5375 {
5376     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
5377     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
5378 }
5379 
5380 static const TranslatorOps sparc_tr_ops = {
5381     .init_disas_context = sparc_tr_init_disas_context,
5382     .tb_start           = sparc_tr_tb_start,
5383     .insn_start         = sparc_tr_insn_start,
5384     .translate_insn     = sparc_tr_translate_insn,
5385     .tb_stop            = sparc_tr_tb_stop,
5386     .disas_log          = sparc_tr_disas_log,
5387 };
5388 
5389 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5390                            target_ulong pc, void *host_pc)
5391 {
5392     DisasContext dc = {};
5393 
5394     translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5395 }
5396 
5397 void sparc_tcg_init(void)
5398 {
5399     static const char gregnames[32][4] = {
5400         "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5401         "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5402         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5403         "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5404     };
5405     static const char fregnames[32][4] = {
5406         "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5407         "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5408         "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5409         "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5410     };
5411 
5412     static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5413 #ifdef TARGET_SPARC64
5414         { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5415 #endif
5416         { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
5417     };
5418 
5419     static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5420 #ifdef TARGET_SPARC64
5421         { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5422         { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5423         { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5424 #endif
5425         { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5426         { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5427         { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5428         { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5429         { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5430         { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
5431         { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
5432         { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
5433         { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
5434         { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5435         { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5436         { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5437         { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5438     };
5439 
5440     unsigned int i;
5441 
5442     cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5443                                          offsetof(CPUSPARCState, regwptr),
5444                                          "regwptr");
5445 
5446     for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5447         *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5448     }
5449 
5450     for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5451         *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5452     }
5453 
5454     cpu_regs[0] = NULL;
5455     for (i = 1; i < 8; ++i) {
5456         cpu_regs[i] = tcg_global_mem_new(tcg_env,
5457                                          offsetof(CPUSPARCState, gregs[i]),
5458                                          gregnames[i]);
5459     }
5460 
5461     for (i = 8; i < 32; ++i) {
5462         cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5463                                          (i - 8) * sizeof(target_ulong),
5464                                          gregnames[i]);
5465     }
5466 
5467     for (i = 0; i < TARGET_DPREGS; i++) {
5468         cpu_fpr[i] = tcg_global_mem_new_i64(tcg_env,
5469                                             offsetof(CPUSPARCState, fpr[i]),
5470                                             fregnames[i]);
5471     }
5472 }
5473 
5474 void sparc_restore_state_to_opc(CPUState *cs,
5475                                 const TranslationBlock *tb,
5476                                 const uint64_t *data)
5477 {
5478     SPARCCPU *cpu = SPARC_CPU(cs);
5479     CPUSPARCState *env = &cpu->env;
5480     target_ulong pc = data[0];
5481     target_ulong npc = data[1];
5482 
5483     env->pc = pc;
5484     if (npc == DYNAMIC_PC) {
5485         /* dynamic NPC: already stored */
5486     } else if (npc & JUMP_PC) {
5487         /* jump PC: use 'cond' and the jump targets of the translation */
5488         if (env->cond) {
5489             env->npc = npc & ~3;
5490         } else {
5491             env->npc = pc + 4;
5492         }
5493     } else {
5494         env->npc = npc;
5495     }
5496 }
5497