1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 #include "fpu/softfloat.h"
32 #include "asi.h"
33
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
36 #undef HELPER_H
37
38 #ifdef TARGET_SPARC64
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 #else
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
64 # define gen_helper_cmask8 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32 ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16 ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32 ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK 0
102 #endif
103
104 /* Dynamic PC, must exit to main loop. */
105 #define DYNAMIC_PC 1
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
107 #define JUMP_PC 2
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP 3
110
111 #define DISAS_EXIT DISAS_TARGET_0
112
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
117 static TCGv cpu_y;
118 static TCGv cpu_tbr;
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
128 static TCGv cpu_gsr;
129 #else
130 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
132 #endif
133
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z cpu_xcc_Z
136 #define cpu_cc_C cpu_xcc_C
137 #else
138 #define cpu_cc_Z cpu_icc_Z
139 #define cpu_cc_C cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
142 #endif
143
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
146
147 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X) env_field_offsetof(X)
151 #else
152 # define env32_field_offsetof(X) env_field_offsetof(X)
153 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
154 #endif
155
156 typedef struct DisasCompare {
157 TCGCond cond;
158 TCGv c1;
159 int c2;
160 } DisasCompare;
161
162 typedef struct DisasDelayException {
163 struct DisasDelayException *next;
164 TCGLabel *lab;
165 TCGv_i32 excp;
166 /* Saved state at parent insn. */
167 target_ulong pc;
168 target_ulong npc;
169 } DisasDelayException;
170
171 typedef struct DisasContext {
172 DisasContextBase base;
173 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
174 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
175
176 /* Used when JUMP_PC value is used. */
177 DisasCompare jump;
178 target_ulong jump_pc[2];
179
180 int mem_idx;
181 bool cpu_cond_live;
182 bool fpu_enabled;
183 bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
185 bool supervisor;
186 #ifdef TARGET_SPARC64
187 bool hypervisor;
188 #endif
189 #endif
190
191 sparc_def_t *def;
192 #ifdef TARGET_SPARC64
193 int fprs_dirty;
194 int asi;
195 #endif
196 DisasDelayException *delay_excp_list;
197 } DisasContext;
198
199 // This function uses non-native bit order
200 #define GET_FIELD(X, FROM, TO) \
201 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
202
203 // This function uses the order in the manuals, i.e. bit 0 is 2^0
204 #define GET_FIELD_SP(X, FROM, TO) \
205 GET_FIELD(X, 31 - (TO), 31 - (FROM))
206
207 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
208 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
209
210 #define UA2005_HTRAP_MASK 0xff
211 #define V8_TRAP_MASK 0x7f
212
213 #define IS_IMM (insn & (1<<13))
214
gen_update_fprs_dirty(DisasContext * dc,int rd)215 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
216 {
217 #if defined(TARGET_SPARC64)
218 int bit = (rd < 32) ? 1 : 2;
219 /* If we know we've already set this bit within the TB,
220 we can avoid setting it again. */
221 if (!(dc->fprs_dirty & bit)) {
222 dc->fprs_dirty |= bit;
223 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
224 }
225 #endif
226 }
227
228 /* floating point registers moves */
229
gen_offset_fpr_F(unsigned int reg)230 static int gen_offset_fpr_F(unsigned int reg)
231 {
232 int ret;
233
234 tcg_debug_assert(reg < 32);
235 ret= offsetof(CPUSPARCState, fpr[reg / 2]);
236 if (reg & 1) {
237 ret += offsetof(CPU_DoubleU, l.lower);
238 } else {
239 ret += offsetof(CPU_DoubleU, l.upper);
240 }
241 return ret;
242 }
243
gen_load_fpr_F(DisasContext * dc,unsigned int src)244 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
245 {
246 TCGv_i32 ret = tcg_temp_new_i32();
247 tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
248 return ret;
249 }
250
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)251 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
252 {
253 tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
254 gen_update_fprs_dirty(dc, dst);
255 }
256
gen_offset_fpr_D(unsigned int reg)257 static int gen_offset_fpr_D(unsigned int reg)
258 {
259 tcg_debug_assert(reg < 64);
260 tcg_debug_assert(reg % 2 == 0);
261 return offsetof(CPUSPARCState, fpr[reg / 2]);
262 }
263
gen_load_fpr_D(DisasContext * dc,unsigned int src)264 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
265 {
266 TCGv_i64 ret = tcg_temp_new_i64();
267 tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
268 return ret;
269 }
270
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)271 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
272 {
273 tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
274 gen_update_fprs_dirty(dc, dst);
275 }
276
gen_load_fpr_Q(DisasContext * dc,unsigned int src)277 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
278 {
279 TCGv_i128 ret = tcg_temp_new_i128();
280 TCGv_i64 h = gen_load_fpr_D(dc, src);
281 TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
282
283 tcg_gen_concat_i64_i128(ret, l, h);
284 return ret;
285 }
286
gen_store_fpr_Q(DisasContext * dc,unsigned int dst,TCGv_i128 v)287 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
288 {
289 TCGv_i64 h = tcg_temp_new_i64();
290 TCGv_i64 l = tcg_temp_new_i64();
291
292 tcg_gen_extr_i128_i64(l, h, v);
293 gen_store_fpr_D(dc, dst, h);
294 gen_store_fpr_D(dc, dst + 2, l);
295 }
296
297 /* moves */
298 #ifdef CONFIG_USER_ONLY
299 #define supervisor(dc) 0
300 #define hypervisor(dc) 0
301 #else
302 #ifdef TARGET_SPARC64
303 #define hypervisor(dc) (dc->hypervisor)
304 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
305 #else
306 #define supervisor(dc) (dc->supervisor)
307 #define hypervisor(dc) 0
308 #endif
309 #endif
310
311 #if !defined(TARGET_SPARC64)
312 # define AM_CHECK(dc) false
313 #elif defined(TARGET_ABI32)
314 # define AM_CHECK(dc) true
315 #elif defined(CONFIG_USER_ONLY)
316 # define AM_CHECK(dc) false
317 #else
318 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
319 #endif
320
gen_address_mask(DisasContext * dc,TCGv addr)321 static void gen_address_mask(DisasContext *dc, TCGv addr)
322 {
323 if (AM_CHECK(dc)) {
324 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
325 }
326 }
327
address_mask_i(DisasContext * dc,target_ulong addr)328 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
329 {
330 return AM_CHECK(dc) ? (uint32_t)addr : addr;
331 }
332
gen_load_gpr(DisasContext * dc,int reg)333 static TCGv gen_load_gpr(DisasContext *dc, int reg)
334 {
335 if (reg > 0) {
336 assert(reg < 32);
337 return cpu_regs[reg];
338 } else {
339 TCGv t = tcg_temp_new();
340 tcg_gen_movi_tl(t, 0);
341 return t;
342 }
343 }
344
gen_store_gpr(DisasContext * dc,int reg,TCGv v)345 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
346 {
347 if (reg > 0) {
348 assert(reg < 32);
349 tcg_gen_mov_tl(cpu_regs[reg], v);
350 }
351 }
352
gen_dest_gpr(DisasContext * dc,int reg)353 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
354 {
355 if (reg > 0) {
356 assert(reg < 32);
357 return cpu_regs[reg];
358 } else {
359 return tcg_temp_new();
360 }
361 }
362
use_goto_tb(DisasContext * s,target_ulong pc,target_ulong npc)363 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
364 {
365 return translator_use_goto_tb(&s->base, pc) &&
366 translator_use_goto_tb(&s->base, npc);
367 }
368
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)369 static void gen_goto_tb(DisasContext *s, int tb_num,
370 target_ulong pc, target_ulong npc)
371 {
372 if (use_goto_tb(s, pc, npc)) {
373 /* jump to same page: we can use a direct jump */
374 tcg_gen_goto_tb(tb_num);
375 tcg_gen_movi_tl(cpu_pc, pc);
376 tcg_gen_movi_tl(cpu_npc, npc);
377 tcg_gen_exit_tb(s->base.tb, tb_num);
378 } else {
379 /* jump to another page: we can use an indirect jump */
380 tcg_gen_movi_tl(cpu_pc, pc);
381 tcg_gen_movi_tl(cpu_npc, npc);
382 tcg_gen_lookup_and_goto_ptr();
383 }
384 }
385
gen_carry32(void)386 static TCGv gen_carry32(void)
387 {
388 if (TARGET_LONG_BITS == 64) {
389 TCGv t = tcg_temp_new();
390 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
391 return t;
392 }
393 return cpu_icc_C;
394 }
395
gen_op_addcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)396 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
397 {
398 TCGv z = tcg_constant_tl(0);
399
400 if (cin) {
401 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
402 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
403 } else {
404 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
405 }
406 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
407 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
408 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
409 if (TARGET_LONG_BITS == 64) {
410 /*
411 * Carry-in to bit 32 is result ^ src1 ^ src2.
412 * We already have the src xor term in Z, from computation of V.
413 */
414 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
415 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
416 }
417 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
418 tcg_gen_mov_tl(dst, cpu_cc_N);
419 }
420
gen_op_addcc(TCGv dst,TCGv src1,TCGv src2)421 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
422 {
423 gen_op_addcc_int(dst, src1, src2, NULL);
424 }
425
gen_op_taddcc(TCGv dst,TCGv src1,TCGv src2)426 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
427 {
428 TCGv t = tcg_temp_new();
429
430 /* Save the tag bits around modification of dst. */
431 tcg_gen_or_tl(t, src1, src2);
432
433 gen_op_addcc(dst, src1, src2);
434
435 /* Incorprate tag bits into icc.V */
436 tcg_gen_andi_tl(t, t, 3);
437 tcg_gen_neg_tl(t, t);
438 tcg_gen_ext32u_tl(t, t);
439 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
440 }
441
gen_op_addc(TCGv dst,TCGv src1,TCGv src2)442 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
443 {
444 tcg_gen_add_tl(dst, src1, src2);
445 tcg_gen_add_tl(dst, dst, gen_carry32());
446 }
447
gen_op_addccc(TCGv dst,TCGv src1,TCGv src2)448 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
449 {
450 gen_op_addcc_int(dst, src1, src2, gen_carry32());
451 }
452
gen_op_addxc(TCGv dst,TCGv src1,TCGv src2)453 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
454 {
455 tcg_gen_add_tl(dst, src1, src2);
456 tcg_gen_add_tl(dst, dst, cpu_cc_C);
457 }
458
gen_op_addxccc(TCGv dst,TCGv src1,TCGv src2)459 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
460 {
461 gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
462 }
463
gen_op_subcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)464 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
465 {
466 TCGv z = tcg_constant_tl(0);
467
468 if (cin) {
469 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
470 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
471 } else {
472 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
473 }
474 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
475 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
476 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
477 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
478 #ifdef TARGET_SPARC64
479 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
480 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
481 #endif
482 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
483 tcg_gen_mov_tl(dst, cpu_cc_N);
484 }
485
gen_op_subcc(TCGv dst,TCGv src1,TCGv src2)486 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
487 {
488 gen_op_subcc_int(dst, src1, src2, NULL);
489 }
490
gen_op_tsubcc(TCGv dst,TCGv src1,TCGv src2)491 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
492 {
493 TCGv t = tcg_temp_new();
494
495 /* Save the tag bits around modification of dst. */
496 tcg_gen_or_tl(t, src1, src2);
497
498 gen_op_subcc(dst, src1, src2);
499
500 /* Incorprate tag bits into icc.V */
501 tcg_gen_andi_tl(t, t, 3);
502 tcg_gen_neg_tl(t, t);
503 tcg_gen_ext32u_tl(t, t);
504 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
505 }
506
gen_op_subc(TCGv dst,TCGv src1,TCGv src2)507 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
508 {
509 tcg_gen_sub_tl(dst, src1, src2);
510 tcg_gen_sub_tl(dst, dst, gen_carry32());
511 }
512
gen_op_subccc(TCGv dst,TCGv src1,TCGv src2)513 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
514 {
515 gen_op_subcc_int(dst, src1, src2, gen_carry32());
516 }
517
gen_op_subxc(TCGv dst,TCGv src1,TCGv src2)518 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
519 {
520 tcg_gen_sub_tl(dst, src1, src2);
521 tcg_gen_sub_tl(dst, dst, cpu_cc_C);
522 }
523
gen_op_subxccc(TCGv dst,TCGv src1,TCGv src2)524 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
525 {
526 gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
527 }
528
gen_op_mulscc(TCGv dst,TCGv src1,TCGv src2)529 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
530 {
531 TCGv zero = tcg_constant_tl(0);
532 TCGv one = tcg_constant_tl(1);
533 TCGv t_src1 = tcg_temp_new();
534 TCGv t_src2 = tcg_temp_new();
535 TCGv t0 = tcg_temp_new();
536
537 tcg_gen_ext32u_tl(t_src1, src1);
538 tcg_gen_ext32u_tl(t_src2, src2);
539
540 /*
541 * if (!(env->y & 1))
542 * src2 = 0;
543 */
544 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
545
546 /*
547 * b2 = src1 & 1;
548 * y = (b2 << 31) | (y >> 1);
549 */
550 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
551 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
552
553 // b1 = N ^ V;
554 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
555
556 /*
557 * src1 = (b1 << 31) | (src1 >> 1)
558 */
559 tcg_gen_andi_tl(t0, t0, 1u << 31);
560 tcg_gen_shri_tl(t_src1, t_src1, 1);
561 tcg_gen_or_tl(t_src1, t_src1, t0);
562
563 gen_op_addcc(dst, t_src1, t_src2);
564 }
565
gen_op_multiply(TCGv dst,TCGv src1,TCGv src2,int sign_ext)566 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
567 {
568 #if TARGET_LONG_BITS == 32
569 if (sign_ext) {
570 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
571 } else {
572 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
573 }
574 #else
575 TCGv t0 = tcg_temp_new_i64();
576 TCGv t1 = tcg_temp_new_i64();
577
578 if (sign_ext) {
579 tcg_gen_ext32s_i64(t0, src1);
580 tcg_gen_ext32s_i64(t1, src2);
581 } else {
582 tcg_gen_ext32u_i64(t0, src1);
583 tcg_gen_ext32u_i64(t1, src2);
584 }
585
586 tcg_gen_mul_i64(dst, t0, t1);
587 tcg_gen_shri_i64(cpu_y, dst, 32);
588 #endif
589 }
590
gen_op_umul(TCGv dst,TCGv src1,TCGv src2)591 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
592 {
593 /* zero-extend truncated operands before multiplication */
594 gen_op_multiply(dst, src1, src2, 0);
595 }
596
gen_op_smul(TCGv dst,TCGv src1,TCGv src2)597 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
598 {
599 /* sign-extend truncated operands before multiplication */
600 gen_op_multiply(dst, src1, src2, 1);
601 }
602
gen_op_umulxhi(TCGv dst,TCGv src1,TCGv src2)603 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
604 {
605 TCGv discard = tcg_temp_new();
606 tcg_gen_mulu2_tl(discard, dst, src1, src2);
607 }
608
gen_op_fpmaddx(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)609 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
610 TCGv_i64 src2, TCGv_i64 src3)
611 {
612 TCGv_i64 t = tcg_temp_new_i64();
613
614 tcg_gen_mul_i64(t, src1, src2);
615 tcg_gen_add_i64(dst, src3, t);
616 }
617
gen_op_fpmaddxhi(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)618 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
619 TCGv_i64 src2, TCGv_i64 src3)
620 {
621 TCGv_i64 l = tcg_temp_new_i64();
622 TCGv_i64 h = tcg_temp_new_i64();
623 TCGv_i64 z = tcg_constant_i64(0);
624
625 tcg_gen_mulu2_i64(l, h, src1, src2);
626 tcg_gen_add2_i64(l, dst, l, h, src3, z);
627 }
628
gen_op_sdiv(TCGv dst,TCGv src1,TCGv src2)629 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
630 {
631 #ifdef TARGET_SPARC64
632 gen_helper_sdiv(dst, tcg_env, src1, src2);
633 tcg_gen_ext32s_tl(dst, dst);
634 #else
635 TCGv_i64 t64 = tcg_temp_new_i64();
636 gen_helper_sdiv(t64, tcg_env, src1, src2);
637 tcg_gen_trunc_i64_tl(dst, t64);
638 #endif
639 }
640
gen_op_udivcc(TCGv dst,TCGv src1,TCGv src2)641 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
642 {
643 TCGv_i64 t64;
644
645 #ifdef TARGET_SPARC64
646 t64 = cpu_cc_V;
647 #else
648 t64 = tcg_temp_new_i64();
649 #endif
650
651 gen_helper_udiv(t64, tcg_env, src1, src2);
652
653 #ifdef TARGET_SPARC64
654 tcg_gen_ext32u_tl(cpu_cc_N, t64);
655 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
656 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
657 tcg_gen_movi_tl(cpu_icc_C, 0);
658 #else
659 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
660 #endif
661 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
662 tcg_gen_movi_tl(cpu_cc_C, 0);
663 tcg_gen_mov_tl(dst, cpu_cc_N);
664 }
665
gen_op_sdivcc(TCGv dst,TCGv src1,TCGv src2)666 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
667 {
668 TCGv_i64 t64;
669
670 #ifdef TARGET_SPARC64
671 t64 = cpu_cc_V;
672 #else
673 t64 = tcg_temp_new_i64();
674 #endif
675
676 gen_helper_sdiv(t64, tcg_env, src1, src2);
677
678 #ifdef TARGET_SPARC64
679 tcg_gen_ext32s_tl(cpu_cc_N, t64);
680 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
681 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
682 tcg_gen_movi_tl(cpu_icc_C, 0);
683 #else
684 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
685 #endif
686 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
687 tcg_gen_movi_tl(cpu_cc_C, 0);
688 tcg_gen_mov_tl(dst, cpu_cc_N);
689 }
690
gen_op_taddcctv(TCGv dst,TCGv src1,TCGv src2)691 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
692 {
693 gen_helper_taddcctv(dst, tcg_env, src1, src2);
694 }
695
gen_op_tsubcctv(TCGv dst,TCGv src1,TCGv src2)696 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
697 {
698 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
699 }
700
gen_op_popc(TCGv dst,TCGv src1,TCGv src2)701 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
702 {
703 tcg_gen_ctpop_tl(dst, src2);
704 }
705
gen_op_lzcnt(TCGv dst,TCGv src)706 static void gen_op_lzcnt(TCGv dst, TCGv src)
707 {
708 tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
709 }
710
711 #ifndef TARGET_SPARC64
gen_helper_array8(TCGv dst,TCGv src1,TCGv src2)712 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
713 {
714 g_assert_not_reached();
715 }
716 #endif
717
gen_op_array16(TCGv dst,TCGv src1,TCGv src2)718 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
719 {
720 gen_helper_array8(dst, src1, src2);
721 tcg_gen_shli_tl(dst, dst, 1);
722 }
723
gen_op_array32(TCGv dst,TCGv src1,TCGv src2)724 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
725 {
726 gen_helper_array8(dst, src1, src2);
727 tcg_gen_shli_tl(dst, dst, 2);
728 }
729
gen_op_fpack16(TCGv_i32 dst,TCGv_i64 src)730 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
731 {
732 #ifdef TARGET_SPARC64
733 gen_helper_fpack16(dst, cpu_gsr, src);
734 #else
735 g_assert_not_reached();
736 #endif
737 }
738
gen_op_fpackfix(TCGv_i32 dst,TCGv_i64 src)739 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
740 {
741 #ifdef TARGET_SPARC64
742 gen_helper_fpackfix(dst, cpu_gsr, src);
743 #else
744 g_assert_not_reached();
745 #endif
746 }
747
gen_op_fpack32(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)748 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
749 {
750 #ifdef TARGET_SPARC64
751 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
752 #else
753 g_assert_not_reached();
754 #endif
755 }
756
gen_op_fpadds16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)757 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
758 {
759 TCGv_i32 t[2];
760
761 for (int i = 0; i < 2; i++) {
762 TCGv_i32 u = tcg_temp_new_i32();
763 TCGv_i32 v = tcg_temp_new_i32();
764
765 tcg_gen_sextract_i32(u, src1, i * 16, 16);
766 tcg_gen_sextract_i32(v, src2, i * 16, 16);
767 tcg_gen_add_i32(u, u, v);
768 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
769 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
770 t[i] = u;
771 }
772 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
773 }
774
gen_op_fpsubs16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)775 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
776 {
777 TCGv_i32 t[2];
778
779 for (int i = 0; i < 2; i++) {
780 TCGv_i32 u = tcg_temp_new_i32();
781 TCGv_i32 v = tcg_temp_new_i32();
782
783 tcg_gen_sextract_i32(u, src1, i * 16, 16);
784 tcg_gen_sextract_i32(v, src2, i * 16, 16);
785 tcg_gen_sub_i32(u, u, v);
786 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
787 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
788 t[i] = u;
789 }
790 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
791 }
792
gen_op_fpadds32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)793 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
794 {
795 TCGv_i32 r = tcg_temp_new_i32();
796 TCGv_i32 t = tcg_temp_new_i32();
797 TCGv_i32 v = tcg_temp_new_i32();
798 TCGv_i32 z = tcg_constant_i32(0);
799
800 tcg_gen_add_i32(r, src1, src2);
801 tcg_gen_xor_i32(t, src1, src2);
802 tcg_gen_xor_i32(v, r, src2);
803 tcg_gen_andc_i32(v, v, t);
804
805 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
806 tcg_gen_addi_i32(t, t, INT32_MAX);
807
808 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
809 }
810
gen_op_fpsubs32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)811 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
812 {
813 TCGv_i32 r = tcg_temp_new_i32();
814 TCGv_i32 t = tcg_temp_new_i32();
815 TCGv_i32 v = tcg_temp_new_i32();
816 TCGv_i32 z = tcg_constant_i32(0);
817
818 tcg_gen_sub_i32(r, src1, src2);
819 tcg_gen_xor_i32(t, src1, src2);
820 tcg_gen_xor_i32(v, r, src1);
821 tcg_gen_and_i32(v, v, t);
822
823 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
824 tcg_gen_addi_i32(t, t, INT32_MAX);
825
826 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
827 }
828
gen_op_faligndata_i(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2,TCGv gsr)829 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
830 TCGv_i64 s2, TCGv gsr)
831 {
832 #ifdef TARGET_SPARC64
833 TCGv t1, t2, shift;
834
835 t1 = tcg_temp_new();
836 t2 = tcg_temp_new();
837 shift = tcg_temp_new();
838
839 tcg_gen_andi_tl(shift, gsr, 7);
840 tcg_gen_shli_tl(shift, shift, 3);
841 tcg_gen_shl_tl(t1, s1, shift);
842
843 /*
844 * A shift of 64 does not produce 0 in TCG. Divide this into a
845 * shift of (up to 63) followed by a constant shift of 1.
846 */
847 tcg_gen_xori_tl(shift, shift, 63);
848 tcg_gen_shr_tl(t2, s2, shift);
849 tcg_gen_shri_tl(t2, t2, 1);
850
851 tcg_gen_or_tl(dst, t1, t2);
852 #else
853 g_assert_not_reached();
854 #endif
855 }
856
gen_op_faligndata_g(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2)857 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
858 {
859 gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
860 }
861
gen_op_bshuffle(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)862 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
863 {
864 #ifdef TARGET_SPARC64
865 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
866 #else
867 g_assert_not_reached();
868 #endif
869 }
870
gen_op_pdistn(TCGv dst,TCGv_i64 src1,TCGv_i64 src2)871 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
872 {
873 #ifdef TARGET_SPARC64
874 gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
875 #else
876 g_assert_not_reached();
877 #endif
878 }
879
gen_op_fmul8x16al(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)880 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
881 {
882 tcg_gen_ext16s_i32(src2, src2);
883 gen_helper_fmul8x16a(dst, src1, src2);
884 }
885
gen_op_fmul8x16au(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)886 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
887 {
888 tcg_gen_sari_i32(src2, src2, 16);
889 gen_helper_fmul8x16a(dst, src1, src2);
890 }
891
gen_op_fmuld8ulx16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)892 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
893 {
894 TCGv_i32 t0 = tcg_temp_new_i32();
895 TCGv_i32 t1 = tcg_temp_new_i32();
896 TCGv_i32 t2 = tcg_temp_new_i32();
897
898 tcg_gen_ext8u_i32(t0, src1);
899 tcg_gen_ext16s_i32(t1, src2);
900 tcg_gen_mul_i32(t0, t0, t1);
901
902 tcg_gen_extract_i32(t1, src1, 16, 8);
903 tcg_gen_sextract_i32(t2, src2, 16, 16);
904 tcg_gen_mul_i32(t1, t1, t2);
905
906 tcg_gen_concat_i32_i64(dst, t0, t1);
907 }
908
gen_op_fmuld8sux16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)909 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
910 {
911 TCGv_i32 t0 = tcg_temp_new_i32();
912 TCGv_i32 t1 = tcg_temp_new_i32();
913 TCGv_i32 t2 = tcg_temp_new_i32();
914
915 /*
916 * The insn description talks about extracting the upper 8 bits
917 * of the signed 16-bit input rs1, performing the multiply, then
918 * shifting left by 8 bits. Instead, zap the lower 8 bits of
919 * the rs1 input, which avoids the need for two shifts.
920 */
921 tcg_gen_ext16s_i32(t0, src1);
922 tcg_gen_andi_i32(t0, t0, ~0xff);
923 tcg_gen_ext16s_i32(t1, src2);
924 tcg_gen_mul_i32(t0, t0, t1);
925
926 tcg_gen_sextract_i32(t1, src1, 16, 16);
927 tcg_gen_andi_i32(t1, t1, ~0xff);
928 tcg_gen_sextract_i32(t2, src2, 16, 16);
929 tcg_gen_mul_i32(t1, t1, t2);
930
931 tcg_gen_concat_i32_i64(dst, t0, t1);
932 }
933
934 #ifdef TARGET_SPARC64
gen_vec_fchksm16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)935 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
936 TCGv_vec src1, TCGv_vec src2)
937 {
938 TCGv_vec a = tcg_temp_new_vec_matching(dst);
939 TCGv_vec c = tcg_temp_new_vec_matching(dst);
940
941 tcg_gen_add_vec(vece, a, src1, src2);
942 tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
943 /* Vector cmp produces -1 for true, so subtract to add carry. */
944 tcg_gen_sub_vec(vece, dst, a, c);
945 }
946
gen_op_fchksm16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)947 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
948 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
949 {
950 static const TCGOpcode vecop_list[] = {
951 INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
952 };
953 static const GVecGen3 op = {
954 .fni8 = gen_helper_fchksm16,
955 .fniv = gen_vec_fchksm16,
956 .opt_opc = vecop_list,
957 .vece = MO_16,
958 };
959 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
960 }
961
gen_vec_fmean16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)962 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
963 TCGv_vec src1, TCGv_vec src2)
964 {
965 TCGv_vec t = tcg_temp_new_vec_matching(dst);
966
967 tcg_gen_or_vec(vece, t, src1, src2);
968 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
969 tcg_gen_sari_vec(vece, src1, src1, 1);
970 tcg_gen_sari_vec(vece, src2, src2, 1);
971 tcg_gen_add_vec(vece, dst, src1, src2);
972 tcg_gen_add_vec(vece, dst, dst, t);
973 }
974
gen_op_fmean16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)975 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
976 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
977 {
978 static const TCGOpcode vecop_list[] = {
979 INDEX_op_add_vec, INDEX_op_sari_vec,
980 };
981 static const GVecGen3 op = {
982 .fni8 = gen_helper_fmean16,
983 .fniv = gen_vec_fmean16,
984 .opt_opc = vecop_list,
985 .vece = MO_16,
986 };
987 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
988 }
989 #else
990 #define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
991 #define gen_op_fmean16 ({ qemu_build_not_reached(); NULL; })
992 #endif
993
finishing_insn(DisasContext * dc)994 static void finishing_insn(DisasContext *dc)
995 {
996 /*
997 * From here, there is no future path through an unwinding exception.
998 * If the current insn cannot raise an exception, the computation of
999 * cpu_cond may be able to be elided.
1000 */
1001 if (dc->cpu_cond_live) {
1002 tcg_gen_discard_tl(cpu_cond);
1003 dc->cpu_cond_live = false;
1004 }
1005 }
1006
gen_generic_branch(DisasContext * dc)1007 static void gen_generic_branch(DisasContext *dc)
1008 {
1009 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1010 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1011 TCGv c2 = tcg_constant_tl(dc->jump.c2);
1012
1013 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1014 }
1015
1016 /* call this function before using the condition register as it may
1017 have been set for a jump */
flush_cond(DisasContext * dc)1018 static void flush_cond(DisasContext *dc)
1019 {
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC_LOOKUP;
1023 }
1024 }
1025
save_npc(DisasContext * dc)1026 static void save_npc(DisasContext *dc)
1027 {
1028 if (dc->npc & 3) {
1029 switch (dc->npc) {
1030 case JUMP_PC:
1031 gen_generic_branch(dc);
1032 dc->npc = DYNAMIC_PC_LOOKUP;
1033 break;
1034 case DYNAMIC_PC:
1035 case DYNAMIC_PC_LOOKUP:
1036 break;
1037 default:
1038 g_assert_not_reached();
1039 }
1040 } else {
1041 tcg_gen_movi_tl(cpu_npc, dc->npc);
1042 }
1043 }
1044
save_state(DisasContext * dc)1045 static void save_state(DisasContext *dc)
1046 {
1047 tcg_gen_movi_tl(cpu_pc, dc->pc);
1048 save_npc(dc);
1049 }
1050
gen_exception(DisasContext * dc,int which)1051 static void gen_exception(DisasContext *dc, int which)
1052 {
1053 finishing_insn(dc);
1054 save_state(dc);
1055 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1056 dc->base.is_jmp = DISAS_NORETURN;
1057 }
1058
delay_exceptionv(DisasContext * dc,TCGv_i32 excp)1059 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1060 {
1061 DisasDelayException *e = g_new0(DisasDelayException, 1);
1062
1063 e->next = dc->delay_excp_list;
1064 dc->delay_excp_list = e;
1065
1066 e->lab = gen_new_label();
1067 e->excp = excp;
1068 e->pc = dc->pc;
1069 /* Caller must have used flush_cond before branch. */
1070 assert(e->npc != JUMP_PC);
1071 e->npc = dc->npc;
1072
1073 return e->lab;
1074 }
1075
delay_exception(DisasContext * dc,int excp)1076 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1077 {
1078 return delay_exceptionv(dc, tcg_constant_i32(excp));
1079 }
1080
gen_check_align(DisasContext * dc,TCGv addr,int mask)1081 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1082 {
1083 TCGv t = tcg_temp_new();
1084 TCGLabel *lab;
1085
1086 tcg_gen_andi_tl(t, addr, mask);
1087
1088 flush_cond(dc);
1089 lab = delay_exception(dc, TT_UNALIGNED);
1090 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1091 }
1092
gen_mov_pc_npc(DisasContext * dc)1093 static void gen_mov_pc_npc(DisasContext *dc)
1094 {
1095 finishing_insn(dc);
1096
1097 if (dc->npc & 3) {
1098 switch (dc->npc) {
1099 case JUMP_PC:
1100 gen_generic_branch(dc);
1101 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102 dc->pc = DYNAMIC_PC_LOOKUP;
1103 break;
1104 case DYNAMIC_PC:
1105 case DYNAMIC_PC_LOOKUP:
1106 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1107 dc->pc = dc->npc;
1108 break;
1109 default:
1110 g_assert_not_reached();
1111 }
1112 } else {
1113 dc->pc = dc->npc;
1114 }
1115 }
1116
gen_compare(DisasCompare * cmp,bool xcc,unsigned int cond,DisasContext * dc)1117 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1118 DisasContext *dc)
1119 {
1120 TCGv t1;
1121
1122 cmp->c1 = t1 = tcg_temp_new();
1123 cmp->c2 = 0;
1124
1125 switch (cond & 7) {
1126 case 0x0: /* never */
1127 cmp->cond = TCG_COND_NEVER;
1128 cmp->c1 = tcg_constant_tl(0);
1129 break;
1130
1131 case 0x1: /* eq: Z */
1132 cmp->cond = TCG_COND_EQ;
1133 if (TARGET_LONG_BITS == 32 || xcc) {
1134 tcg_gen_mov_tl(t1, cpu_cc_Z);
1135 } else {
1136 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1137 }
1138 break;
1139
1140 case 0x2: /* le: Z | (N ^ V) */
1141 /*
1142 * Simplify:
1143 * cc_Z || (N ^ V) < 0 NE
1144 * cc_Z && !((N ^ V) < 0) EQ
1145 * cc_Z & ~((N ^ V) >> TLB) EQ
1146 */
1147 cmp->cond = TCG_COND_EQ;
1148 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1149 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1150 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1151 if (TARGET_LONG_BITS == 64 && !xcc) {
1152 tcg_gen_ext32u_tl(t1, t1);
1153 }
1154 break;
1155
1156 case 0x3: /* lt: N ^ V */
1157 cmp->cond = TCG_COND_LT;
1158 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1159 if (TARGET_LONG_BITS == 64 && !xcc) {
1160 tcg_gen_ext32s_tl(t1, t1);
1161 }
1162 break;
1163
1164 case 0x4: /* leu: Z | C */
1165 /*
1166 * Simplify:
1167 * cc_Z == 0 || cc_C != 0 NE
1168 * cc_Z != 0 && cc_C == 0 EQ
1169 * cc_Z & (cc_C ? 0 : -1) EQ
1170 * cc_Z & (cc_C - 1) EQ
1171 */
1172 cmp->cond = TCG_COND_EQ;
1173 if (TARGET_LONG_BITS == 32 || xcc) {
1174 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1175 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1176 } else {
1177 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1178 tcg_gen_subi_tl(t1, t1, 1);
1179 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1180 tcg_gen_ext32u_tl(t1, t1);
1181 }
1182 break;
1183
1184 case 0x5: /* ltu: C */
1185 cmp->cond = TCG_COND_NE;
1186 if (TARGET_LONG_BITS == 32 || xcc) {
1187 tcg_gen_mov_tl(t1, cpu_cc_C);
1188 } else {
1189 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1190 }
1191 break;
1192
1193 case 0x6: /* neg: N */
1194 cmp->cond = TCG_COND_LT;
1195 if (TARGET_LONG_BITS == 32 || xcc) {
1196 tcg_gen_mov_tl(t1, cpu_cc_N);
1197 } else {
1198 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1199 }
1200 break;
1201
1202 case 0x7: /* vs: V */
1203 cmp->cond = TCG_COND_LT;
1204 if (TARGET_LONG_BITS == 32 || xcc) {
1205 tcg_gen_mov_tl(t1, cpu_cc_V);
1206 } else {
1207 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1208 }
1209 break;
1210 }
1211 if (cond & 8) {
1212 cmp->cond = tcg_invert_cond(cmp->cond);
1213 }
1214 }
1215
gen_fcompare(DisasCompare * cmp,unsigned int cc,unsigned int cond)1216 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1217 {
1218 TCGv_i32 fcc = cpu_fcc[cc];
1219 TCGv_i32 c1 = fcc;
1220 int c2 = 0;
1221 TCGCond tcond;
1222
1223 /*
1224 * FCC values:
1225 * 0 =
1226 * 1 <
1227 * 2 >
1228 * 3 unordered
1229 */
1230 switch (cond & 7) {
1231 case 0x0: /* fbn */
1232 tcond = TCG_COND_NEVER;
1233 break;
1234 case 0x1: /* fbne : !0 */
1235 tcond = TCG_COND_NE;
1236 break;
1237 case 0x2: /* fblg : 1 or 2 */
1238 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1239 c1 = tcg_temp_new_i32();
1240 tcg_gen_addi_i32(c1, fcc, -1);
1241 c2 = 1;
1242 tcond = TCG_COND_LEU;
1243 break;
1244 case 0x3: /* fbul : 1 or 3 */
1245 c1 = tcg_temp_new_i32();
1246 tcg_gen_andi_i32(c1, fcc, 1);
1247 tcond = TCG_COND_NE;
1248 break;
1249 case 0x4: /* fbl : 1 */
1250 c2 = 1;
1251 tcond = TCG_COND_EQ;
1252 break;
1253 case 0x5: /* fbug : 2 or 3 */
1254 c2 = 2;
1255 tcond = TCG_COND_GEU;
1256 break;
1257 case 0x6: /* fbg : 2 */
1258 c2 = 2;
1259 tcond = TCG_COND_EQ;
1260 break;
1261 case 0x7: /* fbu : 3 */
1262 c2 = 3;
1263 tcond = TCG_COND_EQ;
1264 break;
1265 }
1266 if (cond & 8) {
1267 tcond = tcg_invert_cond(tcond);
1268 }
1269
1270 cmp->cond = tcond;
1271 cmp->c2 = c2;
1272 cmp->c1 = tcg_temp_new();
1273 tcg_gen_extu_i32_tl(cmp->c1, c1);
1274 }
1275
gen_compare_reg(DisasCompare * cmp,int cond,TCGv r_src)1276 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1277 {
1278 static const TCGCond cond_reg[4] = {
1279 TCG_COND_NEVER, /* reserved */
1280 TCG_COND_EQ,
1281 TCG_COND_LE,
1282 TCG_COND_LT,
1283 };
1284 TCGCond tcond;
1285
1286 if ((cond & 3) == 0) {
1287 return false;
1288 }
1289 tcond = cond_reg[cond & 3];
1290 if (cond & 4) {
1291 tcond = tcg_invert_cond(tcond);
1292 }
1293
1294 cmp->cond = tcond;
1295 cmp->c1 = tcg_temp_new();
1296 cmp->c2 = 0;
1297 tcg_gen_mov_tl(cmp->c1, r_src);
1298 return true;
1299 }
1300
gen_op_clear_ieee_excp_and_FTT(void)1301 static void gen_op_clear_ieee_excp_and_FTT(void)
1302 {
1303 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1304 offsetof(CPUSPARCState, fsr_cexc_ftt));
1305 }
1306
gen_op_fmovs(TCGv_i32 dst,TCGv_i32 src)1307 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1308 {
1309 gen_op_clear_ieee_excp_and_FTT();
1310 tcg_gen_mov_i32(dst, src);
1311 }
1312
gen_op_fnegs(TCGv_i32 dst,TCGv_i32 src)1313 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1314 {
1315 gen_op_clear_ieee_excp_and_FTT();
1316 tcg_gen_xori_i32(dst, src, 1u << 31);
1317 }
1318
gen_op_fabss(TCGv_i32 dst,TCGv_i32 src)1319 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1320 {
1321 gen_op_clear_ieee_excp_and_FTT();
1322 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1323 }
1324
gen_op_fmovd(TCGv_i64 dst,TCGv_i64 src)1325 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1326 {
1327 gen_op_clear_ieee_excp_and_FTT();
1328 tcg_gen_mov_i64(dst, src);
1329 }
1330
gen_op_fnegd(TCGv_i64 dst,TCGv_i64 src)1331 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1332 {
1333 gen_op_clear_ieee_excp_and_FTT();
1334 tcg_gen_xori_i64(dst, src, 1ull << 63);
1335 }
1336
gen_op_fabsd(TCGv_i64 dst,TCGv_i64 src)1337 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1338 {
1339 gen_op_clear_ieee_excp_and_FTT();
1340 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1341 }
1342
gen_op_fnegq(TCGv_i128 dst,TCGv_i128 src)1343 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1344 {
1345 TCGv_i64 l = tcg_temp_new_i64();
1346 TCGv_i64 h = tcg_temp_new_i64();
1347
1348 tcg_gen_extr_i128_i64(l, h, src);
1349 tcg_gen_xori_i64(h, h, 1ull << 63);
1350 tcg_gen_concat_i64_i128(dst, l, h);
1351 }
1352
gen_op_fabsq(TCGv_i128 dst,TCGv_i128 src)1353 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1354 {
1355 TCGv_i64 l = tcg_temp_new_i64();
1356 TCGv_i64 h = tcg_temp_new_i64();
1357
1358 tcg_gen_extr_i128_i64(l, h, src);
1359 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1360 tcg_gen_concat_i64_i128(dst, l, h);
1361 }
1362
gen_op_fmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1363 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1364 {
1365 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1366 }
1367
gen_op_fmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1368 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1369 {
1370 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1371 }
1372
gen_op_fmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1373 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1374 {
1375 int op = float_muladd_negate_c;
1376 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1377 }
1378
gen_op_fmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1380 {
1381 int op = float_muladd_negate_c;
1382 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1383 }
1384
gen_op_fnmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1386 {
1387 int op = float_muladd_negate_c | float_muladd_negate_result;
1388 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1389 }
1390
gen_op_fnmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1391 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1392 {
1393 int op = float_muladd_negate_c | float_muladd_negate_result;
1394 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1395 }
1396
gen_op_fnmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1397 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1398 {
1399 int op = float_muladd_negate_result;
1400 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1401 }
1402
gen_op_fnmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1403 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1404 {
1405 int op = float_muladd_negate_result;
1406 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1407 }
1408
1409 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
gen_op_fhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1410 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1411 {
1412 TCGv_i32 one = tcg_constant_i32(float32_one);
1413 int op = float_muladd_halve_result;
1414 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1415 }
1416
gen_op_fhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1417 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1418 {
1419 TCGv_i64 one = tcg_constant_i64(float64_one);
1420 int op = float_muladd_halve_result;
1421 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1422 }
1423
1424 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
gen_op_fhsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1425 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1426 {
1427 TCGv_i32 one = tcg_constant_i32(float32_one);
1428 int op = float_muladd_negate_c | float_muladd_halve_result;
1429 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1430 }
1431
gen_op_fhsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1432 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1433 {
1434 TCGv_i64 one = tcg_constant_i64(float64_one);
1435 int op = float_muladd_negate_c | float_muladd_halve_result;
1436 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1437 }
1438
1439 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
gen_op_fnhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1440 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1441 {
1442 TCGv_i32 one = tcg_constant_i32(float32_one);
1443 int op = float_muladd_negate_result | float_muladd_halve_result;
1444 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1445 }
1446
gen_op_fnhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1447 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1448 {
1449 TCGv_i64 one = tcg_constant_i64(float64_one);
1450 int op = float_muladd_negate_result | float_muladd_halve_result;
1451 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1452 }
1453
gen_op_fpexception_im(DisasContext * dc,int ftt)1454 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1455 {
1456 /*
1457 * CEXC is only set when succesfully completing an FPop,
1458 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1459 * Thus we can simply store FTT into this field.
1460 */
1461 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1462 offsetof(CPUSPARCState, fsr_cexc_ftt));
1463 gen_exception(dc, TT_FP_EXCP);
1464 }
1465
gen_trap_ifnofpu(DisasContext * dc)1466 static int gen_trap_ifnofpu(DisasContext *dc)
1467 {
1468 #if !defined(CONFIG_USER_ONLY)
1469 if (!dc->fpu_enabled) {
1470 gen_exception(dc, TT_NFPU_INSN);
1471 return 1;
1472 }
1473 #endif
1474 return 0;
1475 }
1476
1477 /* asi moves */
1478 typedef enum {
1479 GET_ASI_HELPER,
1480 GET_ASI_EXCP,
1481 GET_ASI_DIRECT,
1482 GET_ASI_DTWINX,
1483 GET_ASI_CODE,
1484 GET_ASI_BLOCK,
1485 GET_ASI_SHORT,
1486 GET_ASI_BCOPY,
1487 GET_ASI_BFILL,
1488 } ASIType;
1489
1490 typedef struct {
1491 ASIType type;
1492 int asi;
1493 int mem_idx;
1494 MemOp memop;
1495 } DisasASI;
1496
1497 /*
1498 * Build DisasASI.
1499 * For asi == -1, treat as non-asi.
1500 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1501 */
resolve_asi(DisasContext * dc,int asi,MemOp memop)1502 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1503 {
1504 ASIType type = GET_ASI_HELPER;
1505 int mem_idx = dc->mem_idx;
1506
1507 if (asi == -1) {
1508 /* Artificial "non-asi" case. */
1509 type = GET_ASI_DIRECT;
1510 goto done;
1511 }
1512
1513 #ifndef TARGET_SPARC64
1514 /* Before v9, all asis are immediate and privileged. */
1515 if (asi < 0) {
1516 gen_exception(dc, TT_ILL_INSN);
1517 type = GET_ASI_EXCP;
1518 } else if (supervisor(dc)
1519 /* Note that LEON accepts ASI_USERDATA in user mode, for
1520 use with CASA. Also note that previous versions of
1521 QEMU allowed (and old versions of gcc emitted) ASI_P
1522 for LEON, which is incorrect. */
1523 || (asi == ASI_USERDATA
1524 && (dc->def->features & CPU_FEATURE_CASA))) {
1525 switch (asi) {
1526 case ASI_USERDATA: /* User data access */
1527 mem_idx = MMU_USER_IDX;
1528 type = GET_ASI_DIRECT;
1529 break;
1530 case ASI_KERNELDATA: /* Supervisor data access */
1531 mem_idx = MMU_KERNEL_IDX;
1532 type = GET_ASI_DIRECT;
1533 break;
1534 case ASI_USERTXT: /* User text access */
1535 mem_idx = MMU_USER_IDX;
1536 type = GET_ASI_CODE;
1537 break;
1538 case ASI_KERNELTXT: /* Supervisor text access */
1539 mem_idx = MMU_KERNEL_IDX;
1540 type = GET_ASI_CODE;
1541 break;
1542 case ASI_M_BYPASS: /* MMU passthrough */
1543 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1544 mem_idx = MMU_PHYS_IDX;
1545 type = GET_ASI_DIRECT;
1546 break;
1547 case ASI_M_BCOPY: /* Block copy, sta access */
1548 mem_idx = MMU_KERNEL_IDX;
1549 type = GET_ASI_BCOPY;
1550 break;
1551 case ASI_M_BFILL: /* Block fill, stda access */
1552 mem_idx = MMU_KERNEL_IDX;
1553 type = GET_ASI_BFILL;
1554 break;
1555 }
1556
1557 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1558 * permissions check in get_physical_address(..).
1559 */
1560 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1561 } else {
1562 gen_exception(dc, TT_PRIV_INSN);
1563 type = GET_ASI_EXCP;
1564 }
1565 #else
1566 if (asi < 0) {
1567 asi = dc->asi;
1568 }
1569 /* With v9, all asis below 0x80 are privileged. */
1570 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1571 down that bit into DisasContext. For the moment that's ok,
1572 since the direct implementations below doesn't have any ASIs
1573 in the restricted [0x30, 0x7f] range, and the check will be
1574 done properly in the helper. */
1575 if (!supervisor(dc) && asi < 0x80) {
1576 gen_exception(dc, TT_PRIV_ACT);
1577 type = GET_ASI_EXCP;
1578 } else {
1579 switch (asi) {
1580 case ASI_REAL: /* Bypass */
1581 case ASI_REAL_IO: /* Bypass, non-cacheable */
1582 case ASI_REAL_L: /* Bypass LE */
1583 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1584 case ASI_TWINX_REAL: /* Real address, twinx */
1585 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1586 case ASI_QUAD_LDD_PHYS:
1587 case ASI_QUAD_LDD_PHYS_L:
1588 mem_idx = MMU_PHYS_IDX;
1589 break;
1590 case ASI_N: /* Nucleus */
1591 case ASI_NL: /* Nucleus LE */
1592 case ASI_TWINX_N:
1593 case ASI_TWINX_NL:
1594 case ASI_NUCLEUS_QUAD_LDD:
1595 case ASI_NUCLEUS_QUAD_LDD_L:
1596 if (hypervisor(dc)) {
1597 mem_idx = MMU_PHYS_IDX;
1598 } else {
1599 mem_idx = MMU_NUCLEUS_IDX;
1600 }
1601 break;
1602 case ASI_AIUP: /* As if user primary */
1603 case ASI_AIUPL: /* As if user primary LE */
1604 case ASI_TWINX_AIUP:
1605 case ASI_TWINX_AIUP_L:
1606 case ASI_BLK_AIUP_4V:
1607 case ASI_BLK_AIUP_L_4V:
1608 case ASI_BLK_AIUP:
1609 case ASI_BLK_AIUPL:
1610 case ASI_MON_AIUP:
1611 mem_idx = MMU_USER_IDX;
1612 break;
1613 case ASI_AIUS: /* As if user secondary */
1614 case ASI_AIUSL: /* As if user secondary LE */
1615 case ASI_TWINX_AIUS:
1616 case ASI_TWINX_AIUS_L:
1617 case ASI_BLK_AIUS_4V:
1618 case ASI_BLK_AIUS_L_4V:
1619 case ASI_BLK_AIUS:
1620 case ASI_BLK_AIUSL:
1621 case ASI_MON_AIUS:
1622 mem_idx = MMU_USER_SECONDARY_IDX;
1623 break;
1624 case ASI_S: /* Secondary */
1625 case ASI_SL: /* Secondary LE */
1626 case ASI_TWINX_S:
1627 case ASI_TWINX_SL:
1628 case ASI_BLK_COMMIT_S:
1629 case ASI_BLK_S:
1630 case ASI_BLK_SL:
1631 case ASI_FL8_S:
1632 case ASI_FL8_SL:
1633 case ASI_FL16_S:
1634 case ASI_FL16_SL:
1635 case ASI_MON_S:
1636 if (mem_idx == MMU_USER_IDX) {
1637 mem_idx = MMU_USER_SECONDARY_IDX;
1638 } else if (mem_idx == MMU_KERNEL_IDX) {
1639 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1640 }
1641 break;
1642 case ASI_P: /* Primary */
1643 case ASI_PL: /* Primary LE */
1644 case ASI_TWINX_P:
1645 case ASI_TWINX_PL:
1646 case ASI_BLK_COMMIT_P:
1647 case ASI_BLK_P:
1648 case ASI_BLK_PL:
1649 case ASI_FL8_P:
1650 case ASI_FL8_PL:
1651 case ASI_FL16_P:
1652 case ASI_FL16_PL:
1653 case ASI_MON_P:
1654 break;
1655 }
1656 switch (asi) {
1657 case ASI_REAL:
1658 case ASI_REAL_IO:
1659 case ASI_REAL_L:
1660 case ASI_REAL_IO_L:
1661 case ASI_N:
1662 case ASI_NL:
1663 case ASI_AIUP:
1664 case ASI_AIUPL:
1665 case ASI_AIUS:
1666 case ASI_AIUSL:
1667 case ASI_S:
1668 case ASI_SL:
1669 case ASI_P:
1670 case ASI_PL:
1671 case ASI_MON_P:
1672 case ASI_MON_S:
1673 case ASI_MON_AIUP:
1674 case ASI_MON_AIUS:
1675 type = GET_ASI_DIRECT;
1676 break;
1677 case ASI_TWINX_REAL:
1678 case ASI_TWINX_REAL_L:
1679 case ASI_TWINX_N:
1680 case ASI_TWINX_NL:
1681 case ASI_TWINX_AIUP:
1682 case ASI_TWINX_AIUP_L:
1683 case ASI_TWINX_AIUS:
1684 case ASI_TWINX_AIUS_L:
1685 case ASI_TWINX_P:
1686 case ASI_TWINX_PL:
1687 case ASI_TWINX_S:
1688 case ASI_TWINX_SL:
1689 case ASI_QUAD_LDD_PHYS:
1690 case ASI_QUAD_LDD_PHYS_L:
1691 case ASI_NUCLEUS_QUAD_LDD:
1692 case ASI_NUCLEUS_QUAD_LDD_L:
1693 type = GET_ASI_DTWINX;
1694 break;
1695 case ASI_BLK_COMMIT_P:
1696 case ASI_BLK_COMMIT_S:
1697 case ASI_BLK_AIUP_4V:
1698 case ASI_BLK_AIUP_L_4V:
1699 case ASI_BLK_AIUP:
1700 case ASI_BLK_AIUPL:
1701 case ASI_BLK_AIUS_4V:
1702 case ASI_BLK_AIUS_L_4V:
1703 case ASI_BLK_AIUS:
1704 case ASI_BLK_AIUSL:
1705 case ASI_BLK_S:
1706 case ASI_BLK_SL:
1707 case ASI_BLK_P:
1708 case ASI_BLK_PL:
1709 type = GET_ASI_BLOCK;
1710 break;
1711 case ASI_FL8_S:
1712 case ASI_FL8_SL:
1713 case ASI_FL8_P:
1714 case ASI_FL8_PL:
1715 memop = MO_UB;
1716 type = GET_ASI_SHORT;
1717 break;
1718 case ASI_FL16_S:
1719 case ASI_FL16_SL:
1720 case ASI_FL16_P:
1721 case ASI_FL16_PL:
1722 memop = MO_TEUW;
1723 type = GET_ASI_SHORT;
1724 break;
1725 }
1726 /* The little-endian asis all have bit 3 set. */
1727 if (asi & 8) {
1728 memop ^= MO_BSWAP;
1729 }
1730 }
1731 #endif
1732
1733 done:
1734 return (DisasASI){ type, asi, mem_idx, memop };
1735 }
1736
1737 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
gen_helper_ld_asi(TCGv_i64 r,TCGv_env e,TCGv a,TCGv_i32 asi,TCGv_i32 mop)1738 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1739 TCGv_i32 asi, TCGv_i32 mop)
1740 {
1741 g_assert_not_reached();
1742 }
1743
gen_helper_st_asi(TCGv_env e,TCGv a,TCGv_i64 r,TCGv_i32 asi,TCGv_i32 mop)1744 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1745 TCGv_i32 asi, TCGv_i32 mop)
1746 {
1747 g_assert_not_reached();
1748 }
1749 #endif
1750
gen_ld_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1751 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1752 {
1753 switch (da->type) {
1754 case GET_ASI_EXCP:
1755 break;
1756 case GET_ASI_DTWINX: /* Reserved for ldda. */
1757 gen_exception(dc, TT_ILL_INSN);
1758 break;
1759 case GET_ASI_DIRECT:
1760 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1761 break;
1762
1763 case GET_ASI_CODE:
1764 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1765 {
1766 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1767 TCGv_i64 t64 = tcg_temp_new_i64();
1768
1769 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1770 tcg_gen_trunc_i64_tl(dst, t64);
1771 }
1772 break;
1773 #else
1774 g_assert_not_reached();
1775 #endif
1776
1777 default:
1778 {
1779 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1780 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1781
1782 save_state(dc);
1783 #ifdef TARGET_SPARC64
1784 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1785 #else
1786 {
1787 TCGv_i64 t64 = tcg_temp_new_i64();
1788 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1789 tcg_gen_trunc_i64_tl(dst, t64);
1790 }
1791 #endif
1792 }
1793 break;
1794 }
1795 }
1796
gen_st_asi(DisasContext * dc,DisasASI * da,TCGv src,TCGv addr)1797 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1798 {
1799 switch (da->type) {
1800 case GET_ASI_EXCP:
1801 break;
1802
1803 case GET_ASI_DTWINX: /* Reserved for stda. */
1804 if (TARGET_LONG_BITS == 32) {
1805 gen_exception(dc, TT_ILL_INSN);
1806 break;
1807 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1808 /* Pre OpenSPARC CPUs don't have these */
1809 gen_exception(dc, TT_ILL_INSN);
1810 break;
1811 }
1812 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1813 /* fall through */
1814
1815 case GET_ASI_DIRECT:
1816 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1817 break;
1818
1819 case GET_ASI_BCOPY:
1820 assert(TARGET_LONG_BITS == 32);
1821 /*
1822 * Copy 32 bytes from the address in SRC to ADDR.
1823 *
1824 * From Ross RT625 hyperSPARC manual, section 4.6:
1825 * "Block Copy and Block Fill will work only on cache line boundaries."
1826 *
1827 * It does not specify if an unaliged address is truncated or trapped.
1828 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1829 * is obviously wrong. The only place I can see this used is in the
1830 * Linux kernel which begins with page alignment, advancing by 32,
1831 * so is always aligned. Assume truncation as the simpler option.
1832 *
1833 * Since the loads and stores are paired, allow the copy to happen
1834 * in the host endianness. The copy need not be atomic.
1835 */
1836 {
1837 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1838 TCGv saddr = tcg_temp_new();
1839 TCGv daddr = tcg_temp_new();
1840 TCGv_i128 tmp = tcg_temp_new_i128();
1841
1842 tcg_gen_andi_tl(saddr, src, -32);
1843 tcg_gen_andi_tl(daddr, addr, -32);
1844 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1845 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1846 tcg_gen_addi_tl(saddr, saddr, 16);
1847 tcg_gen_addi_tl(daddr, daddr, 16);
1848 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1849 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1850 }
1851 break;
1852
1853 default:
1854 {
1855 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1856 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1857
1858 save_state(dc);
1859 #ifdef TARGET_SPARC64
1860 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1861 #else
1862 {
1863 TCGv_i64 t64 = tcg_temp_new_i64();
1864 tcg_gen_extu_tl_i64(t64, src);
1865 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1866 }
1867 #endif
1868
1869 /* A write to a TLB register may alter page maps. End the TB. */
1870 dc->npc = DYNAMIC_PC;
1871 }
1872 break;
1873 }
1874 }
1875
gen_swap_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv src,TCGv addr)1876 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1877 TCGv dst, TCGv src, TCGv addr)
1878 {
1879 switch (da->type) {
1880 case GET_ASI_EXCP:
1881 break;
1882 case GET_ASI_DIRECT:
1883 tcg_gen_atomic_xchg_tl(dst, addr, src,
1884 da->mem_idx, da->memop | MO_ALIGN);
1885 break;
1886 default:
1887 /* ??? Should be DAE_invalid_asi. */
1888 gen_exception(dc, TT_DATA_ACCESS);
1889 break;
1890 }
1891 }
1892
gen_cas_asi(DisasContext * dc,DisasASI * da,TCGv oldv,TCGv newv,TCGv cmpv,TCGv addr)1893 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1894 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1895 {
1896 switch (da->type) {
1897 case GET_ASI_EXCP:
1898 return;
1899 case GET_ASI_DIRECT:
1900 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1901 da->mem_idx, da->memop | MO_ALIGN);
1902 break;
1903 default:
1904 /* ??? Should be DAE_invalid_asi. */
1905 gen_exception(dc, TT_DATA_ACCESS);
1906 break;
1907 }
1908 }
1909
gen_ldstub_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1910 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1911 {
1912 switch (da->type) {
1913 case GET_ASI_EXCP:
1914 break;
1915 case GET_ASI_DIRECT:
1916 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1917 da->mem_idx, MO_UB);
1918 break;
1919 default:
1920 /* ??? In theory, this should be raise DAE_invalid_asi.
1921 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1922 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1923 gen_helper_exit_atomic(tcg_env);
1924 } else {
1925 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1926 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1927 TCGv_i64 s64, t64;
1928
1929 save_state(dc);
1930 t64 = tcg_temp_new_i64();
1931 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1932
1933 s64 = tcg_constant_i64(0xff);
1934 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1935
1936 tcg_gen_trunc_i64_tl(dst, t64);
1937
1938 /* End the TB. */
1939 dc->npc = DYNAMIC_PC;
1940 }
1941 break;
1942 }
1943 }
1944
gen_ldf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)1945 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1946 TCGv addr, int rd)
1947 {
1948 MemOp memop = da->memop;
1949 MemOp size = memop & MO_SIZE;
1950 TCGv_i32 d32;
1951 TCGv_i64 d64, l64;
1952 TCGv addr_tmp;
1953
1954 /* TODO: Use 128-bit load/store below. */
1955 if (size == MO_128) {
1956 memop = (memop & ~MO_SIZE) | MO_64;
1957 }
1958
1959 switch (da->type) {
1960 case GET_ASI_EXCP:
1961 break;
1962
1963 case GET_ASI_DIRECT:
1964 memop |= MO_ALIGN_4;
1965 switch (size) {
1966 case MO_32:
1967 d32 = tcg_temp_new_i32();
1968 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1969 gen_store_fpr_F(dc, rd, d32);
1970 break;
1971
1972 case MO_64:
1973 d64 = tcg_temp_new_i64();
1974 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1975 gen_store_fpr_D(dc, rd, d64);
1976 break;
1977
1978 case MO_128:
1979 d64 = tcg_temp_new_i64();
1980 l64 = tcg_temp_new_i64();
1981 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1982 addr_tmp = tcg_temp_new();
1983 tcg_gen_addi_tl(addr_tmp, addr, 8);
1984 tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1985 gen_store_fpr_D(dc, rd, d64);
1986 gen_store_fpr_D(dc, rd + 2, l64);
1987 break;
1988 default:
1989 g_assert_not_reached();
1990 }
1991 break;
1992
1993 case GET_ASI_BLOCK:
1994 /* Valid for lddfa on aligned registers only. */
1995 if (orig_size == MO_64 && (rd & 7) == 0) {
1996 /* The first operation checks required alignment. */
1997 addr_tmp = tcg_temp_new();
1998 d64 = tcg_temp_new_i64();
1999 for (int i = 0; ; ++i) {
2000 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2001 memop | (i == 0 ? MO_ALIGN_64 : 0));
2002 gen_store_fpr_D(dc, rd + 2 * i, d64);
2003 if (i == 7) {
2004 break;
2005 }
2006 tcg_gen_addi_tl(addr_tmp, addr, 8);
2007 addr = addr_tmp;
2008 }
2009 } else {
2010 gen_exception(dc, TT_ILL_INSN);
2011 }
2012 break;
2013
2014 case GET_ASI_SHORT:
2015 /* Valid for lddfa only. */
2016 if (orig_size == MO_64) {
2017 d64 = tcg_temp_new_i64();
2018 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2019 gen_store_fpr_D(dc, rd, d64);
2020 } else {
2021 gen_exception(dc, TT_ILL_INSN);
2022 }
2023 break;
2024
2025 default:
2026 {
2027 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2028 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2029
2030 save_state(dc);
2031 /* According to the table in the UA2011 manual, the only
2032 other asis that are valid for ldfa/lddfa/ldqfa are
2033 the NO_FAULT asis. We still need a helper for these,
2034 but we can just use the integer asi helper for them. */
2035 switch (size) {
2036 case MO_32:
2037 d64 = tcg_temp_new_i64();
2038 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2039 d32 = tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(d32, d64);
2041 gen_store_fpr_F(dc, rd, d32);
2042 break;
2043 case MO_64:
2044 d64 = tcg_temp_new_i64();
2045 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2046 gen_store_fpr_D(dc, rd, d64);
2047 break;
2048 case MO_128:
2049 d64 = tcg_temp_new_i64();
2050 l64 = tcg_temp_new_i64();
2051 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2052 addr_tmp = tcg_temp_new();
2053 tcg_gen_addi_tl(addr_tmp, addr, 8);
2054 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2055 gen_store_fpr_D(dc, rd, d64);
2056 gen_store_fpr_D(dc, rd + 2, l64);
2057 break;
2058 default:
2059 g_assert_not_reached();
2060 }
2061 }
2062 break;
2063 }
2064 }
2065
gen_stf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)2066 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2067 TCGv addr, int rd)
2068 {
2069 MemOp memop = da->memop;
2070 MemOp size = memop & MO_SIZE;
2071 TCGv_i32 d32;
2072 TCGv_i64 d64;
2073 TCGv addr_tmp;
2074
2075 /* TODO: Use 128-bit load/store below. */
2076 if (size == MO_128) {
2077 memop = (memop & ~MO_SIZE) | MO_64;
2078 }
2079
2080 switch (da->type) {
2081 case GET_ASI_EXCP:
2082 break;
2083
2084 case GET_ASI_DIRECT:
2085 memop |= MO_ALIGN_4;
2086 switch (size) {
2087 case MO_32:
2088 d32 = gen_load_fpr_F(dc, rd);
2089 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2090 break;
2091 case MO_64:
2092 d64 = gen_load_fpr_D(dc, rd);
2093 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2094 break;
2095 case MO_128:
2096 /* Only 4-byte alignment required. However, it is legal for the
2097 cpu to signal the alignment fault, and the OS trap handler is
2098 required to fix it up. Requiring 16-byte alignment here avoids
2099 having to probe the second page before performing the first
2100 write. */
2101 d64 = gen_load_fpr_D(dc, rd);
2102 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2103 addr_tmp = tcg_temp_new();
2104 tcg_gen_addi_tl(addr_tmp, addr, 8);
2105 d64 = gen_load_fpr_D(dc, rd + 2);
2106 tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2107 break;
2108 default:
2109 g_assert_not_reached();
2110 }
2111 break;
2112
2113 case GET_ASI_BLOCK:
2114 /* Valid for stdfa on aligned registers only. */
2115 if (orig_size == MO_64 && (rd & 7) == 0) {
2116 /* The first operation checks required alignment. */
2117 addr_tmp = tcg_temp_new();
2118 for (int i = 0; ; ++i) {
2119 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2120 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2121 memop | (i == 0 ? MO_ALIGN_64 : 0));
2122 if (i == 7) {
2123 break;
2124 }
2125 tcg_gen_addi_tl(addr_tmp, addr, 8);
2126 addr = addr_tmp;
2127 }
2128 } else {
2129 gen_exception(dc, TT_ILL_INSN);
2130 }
2131 break;
2132
2133 case GET_ASI_SHORT:
2134 /* Valid for stdfa only. */
2135 if (orig_size == MO_64) {
2136 d64 = gen_load_fpr_D(dc, rd);
2137 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2138 } else {
2139 gen_exception(dc, TT_ILL_INSN);
2140 }
2141 break;
2142
2143 default:
2144 /* According to the table in the UA2011 manual, the only
2145 other asis that are valid for ldfa/lddfa/ldqfa are
2146 the PST* asis, which aren't currently handled. */
2147 gen_exception(dc, TT_ILL_INSN);
2148 break;
2149 }
2150 }
2151
gen_ldda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2152 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2153 {
2154 TCGv hi = gen_dest_gpr(dc, rd);
2155 TCGv lo = gen_dest_gpr(dc, rd + 1);
2156
2157 switch (da->type) {
2158 case GET_ASI_EXCP:
2159 return;
2160
2161 case GET_ASI_DTWINX:
2162 #ifdef TARGET_SPARC64
2163 {
2164 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2165 TCGv_i128 t = tcg_temp_new_i128();
2166
2167 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2168 /*
2169 * Note that LE twinx acts as if each 64-bit register result is
2170 * byte swapped. We perform one 128-bit LE load, so must swap
2171 * the order of the writebacks.
2172 */
2173 if ((mop & MO_BSWAP) == MO_TE) {
2174 tcg_gen_extr_i128_i64(lo, hi, t);
2175 } else {
2176 tcg_gen_extr_i128_i64(hi, lo, t);
2177 }
2178 }
2179 break;
2180 #else
2181 g_assert_not_reached();
2182 #endif
2183
2184 case GET_ASI_DIRECT:
2185 {
2186 TCGv_i64 tmp = tcg_temp_new_i64();
2187
2188 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2189
2190 /* Note that LE ldda acts as if each 32-bit register
2191 result is byte swapped. Having just performed one
2192 64-bit bswap, we need now to swap the writebacks. */
2193 if ((da->memop & MO_BSWAP) == MO_TE) {
2194 tcg_gen_extr_i64_tl(lo, hi, tmp);
2195 } else {
2196 tcg_gen_extr_i64_tl(hi, lo, tmp);
2197 }
2198 }
2199 break;
2200
2201 case GET_ASI_CODE:
2202 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2203 {
2204 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2205 TCGv_i64 tmp = tcg_temp_new_i64();
2206
2207 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2208
2209 /* See above. */
2210 if ((da->memop & MO_BSWAP) == MO_TE) {
2211 tcg_gen_extr_i64_tl(lo, hi, tmp);
2212 } else {
2213 tcg_gen_extr_i64_tl(hi, lo, tmp);
2214 }
2215 }
2216 break;
2217 #else
2218 g_assert_not_reached();
2219 #endif
2220
2221 default:
2222 /* ??? In theory we've handled all of the ASIs that are valid
2223 for ldda, and this should raise DAE_invalid_asi. However,
2224 real hardware allows others. This can be seen with e.g.
2225 FreeBSD 10.3 wrt ASI_IC_TAG. */
2226 {
2227 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2228 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2229 TCGv_i64 tmp = tcg_temp_new_i64();
2230
2231 save_state(dc);
2232 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2233
2234 /* See above. */
2235 if ((da->memop & MO_BSWAP) == MO_TE) {
2236 tcg_gen_extr_i64_tl(lo, hi, tmp);
2237 } else {
2238 tcg_gen_extr_i64_tl(hi, lo, tmp);
2239 }
2240 }
2241 break;
2242 }
2243
2244 gen_store_gpr(dc, rd, hi);
2245 gen_store_gpr(dc, rd + 1, lo);
2246 }
2247
gen_stda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2248 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2249 {
2250 TCGv hi = gen_load_gpr(dc, rd);
2251 TCGv lo = gen_load_gpr(dc, rd + 1);
2252
2253 switch (da->type) {
2254 case GET_ASI_EXCP:
2255 break;
2256
2257 case GET_ASI_DTWINX:
2258 #ifdef TARGET_SPARC64
2259 {
2260 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2261 TCGv_i128 t = tcg_temp_new_i128();
2262
2263 /*
2264 * Note that LE twinx acts as if each 64-bit register result is
2265 * byte swapped. We perform one 128-bit LE store, so must swap
2266 * the order of the construction.
2267 */
2268 if ((mop & MO_BSWAP) == MO_TE) {
2269 tcg_gen_concat_i64_i128(t, lo, hi);
2270 } else {
2271 tcg_gen_concat_i64_i128(t, hi, lo);
2272 }
2273 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2274 }
2275 break;
2276 #else
2277 g_assert_not_reached();
2278 #endif
2279
2280 case GET_ASI_DIRECT:
2281 {
2282 TCGv_i64 t64 = tcg_temp_new_i64();
2283
2284 /* Note that LE stda acts as if each 32-bit register result is
2285 byte swapped. We will perform one 64-bit LE store, so now
2286 we must swap the order of the construction. */
2287 if ((da->memop & MO_BSWAP) == MO_TE) {
2288 tcg_gen_concat_tl_i64(t64, lo, hi);
2289 } else {
2290 tcg_gen_concat_tl_i64(t64, hi, lo);
2291 }
2292 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2293 }
2294 break;
2295
2296 case GET_ASI_BFILL:
2297 assert(TARGET_LONG_BITS == 32);
2298 /*
2299 * Store 32 bytes of [rd:rd+1] to ADDR.
2300 * See comments for GET_ASI_COPY above.
2301 */
2302 {
2303 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2304 TCGv_i64 t8 = tcg_temp_new_i64();
2305 TCGv_i128 t16 = tcg_temp_new_i128();
2306 TCGv daddr = tcg_temp_new();
2307
2308 tcg_gen_concat_tl_i64(t8, lo, hi);
2309 tcg_gen_concat_i64_i128(t16, t8, t8);
2310 tcg_gen_andi_tl(daddr, addr, -32);
2311 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2312 tcg_gen_addi_tl(daddr, daddr, 16);
2313 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2314 }
2315 break;
2316
2317 default:
2318 /* ??? In theory we've handled all of the ASIs that are valid
2319 for stda, and this should raise DAE_invalid_asi. */
2320 {
2321 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2322 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2323 TCGv_i64 t64 = tcg_temp_new_i64();
2324
2325 /* See above. */
2326 if ((da->memop & MO_BSWAP) == MO_TE) {
2327 tcg_gen_concat_tl_i64(t64, lo, hi);
2328 } else {
2329 tcg_gen_concat_tl_i64(t64, hi, lo);
2330 }
2331
2332 save_state(dc);
2333 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2334 }
2335 break;
2336 }
2337 }
2338
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2339 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2340 {
2341 #ifdef TARGET_SPARC64
2342 TCGv_i32 c32, zero, dst, s1, s2;
2343 TCGv_i64 c64 = tcg_temp_new_i64();
2344
2345 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2346 or fold the comparison down to 32 bits and use movcond_i32. Choose
2347 the later. */
2348 c32 = tcg_temp_new_i32();
2349 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2350 tcg_gen_extrl_i64_i32(c32, c64);
2351
2352 s1 = gen_load_fpr_F(dc, rs);
2353 s2 = gen_load_fpr_F(dc, rd);
2354 dst = tcg_temp_new_i32();
2355 zero = tcg_constant_i32(0);
2356
2357 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2358
2359 gen_store_fpr_F(dc, rd, dst);
2360 #else
2361 qemu_build_not_reached();
2362 #endif
2363 }
2364
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2365 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2366 {
2367 #ifdef TARGET_SPARC64
2368 TCGv_i64 dst = tcg_temp_new_i64();
2369 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2370 gen_load_fpr_D(dc, rs),
2371 gen_load_fpr_D(dc, rd));
2372 gen_store_fpr_D(dc, rd, dst);
2373 #else
2374 qemu_build_not_reached();
2375 #endif
2376 }
2377
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2378 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2379 {
2380 #ifdef TARGET_SPARC64
2381 TCGv c2 = tcg_constant_tl(cmp->c2);
2382 TCGv_i64 h = tcg_temp_new_i64();
2383 TCGv_i64 l = tcg_temp_new_i64();
2384
2385 tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2386 gen_load_fpr_D(dc, rs),
2387 gen_load_fpr_D(dc, rd));
2388 tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2389 gen_load_fpr_D(dc, rs + 2),
2390 gen_load_fpr_D(dc, rd + 2));
2391 gen_store_fpr_D(dc, rd, h);
2392 gen_store_fpr_D(dc, rd + 2, l);
2393 #else
2394 qemu_build_not_reached();
2395 #endif
2396 }
2397
2398 #ifdef TARGET_SPARC64
gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)2399 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2400 {
2401 TCGv_i32 r_tl = tcg_temp_new_i32();
2402
2403 /* load env->tl into r_tl */
2404 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2405
2406 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2407 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2408
2409 /* calculate offset to current trap state from env->ts, reuse r_tl */
2410 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2411 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2412
2413 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2414 {
2415 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2416 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2417 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2418 }
2419 }
2420 #endif
2421
extract_dfpreg(DisasContext * dc,int x)2422 static int extract_dfpreg(DisasContext *dc, int x)
2423 {
2424 int r = x & 0x1e;
2425 #ifdef TARGET_SPARC64
2426 r |= (x & 1) << 5;
2427 #endif
2428 return r;
2429 }
2430
extract_qfpreg(DisasContext * dc,int x)2431 static int extract_qfpreg(DisasContext *dc, int x)
2432 {
2433 int r = x & 0x1c;
2434 #ifdef TARGET_SPARC64
2435 r |= (x & 1) << 5;
2436 #endif
2437 return r;
2438 }
2439
2440 /* Include the auto-generated decoder. */
2441 #include "decode-insns.c.inc"
2442
2443 #define TRANS(NAME, AVAIL, FUNC, ...) \
2444 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2445 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2446
2447 #define avail_ALL(C) true
2448 #ifdef TARGET_SPARC64
2449 # define avail_32(C) false
2450 # define avail_ASR17(C) false
2451 # define avail_CASA(C) true
2452 # define avail_DIV(C) true
2453 # define avail_MUL(C) true
2454 # define avail_POWERDOWN(C) false
2455 # define avail_64(C) true
2456 # define avail_FMAF(C) ((C)->def->features & CPU_FEATURE_FMAF)
2457 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2458 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2459 # define avail_IMA(C) ((C)->def->features & CPU_FEATURE_IMA)
2460 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2461 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2462 # define avail_VIS3(C) ((C)->def->features & CPU_FEATURE_VIS3)
2463 # define avail_VIS3B(C) avail_VIS3(C)
2464 # define avail_VIS4(C) ((C)->def->features & CPU_FEATURE_VIS4)
2465 #else
2466 # define avail_32(C) true
2467 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2468 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2469 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2470 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2471 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2472 # define avail_64(C) false
2473 # define avail_FMAF(C) false
2474 # define avail_GL(C) false
2475 # define avail_HYPV(C) false
2476 # define avail_IMA(C) false
2477 # define avail_VIS1(C) false
2478 # define avail_VIS2(C) false
2479 # define avail_VIS3(C) false
2480 # define avail_VIS3B(C) false
2481 # define avail_VIS4(C) false
2482 #endif
2483
2484 /* Default case for non jump instructions. */
advance_pc(DisasContext * dc)2485 static bool advance_pc(DisasContext *dc)
2486 {
2487 TCGLabel *l1;
2488
2489 finishing_insn(dc);
2490
2491 if (dc->npc & 3) {
2492 switch (dc->npc) {
2493 case DYNAMIC_PC:
2494 case DYNAMIC_PC_LOOKUP:
2495 dc->pc = dc->npc;
2496 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2497 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2498 break;
2499
2500 case JUMP_PC:
2501 /* we can do a static jump */
2502 l1 = gen_new_label();
2503 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2504
2505 /* jump not taken */
2506 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2507
2508 /* jump taken */
2509 gen_set_label(l1);
2510 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2511
2512 dc->base.is_jmp = DISAS_NORETURN;
2513 break;
2514
2515 default:
2516 g_assert_not_reached();
2517 }
2518 } else {
2519 dc->pc = dc->npc;
2520 dc->npc = dc->npc + 4;
2521 }
2522 return true;
2523 }
2524
2525 /*
2526 * Major opcodes 00 and 01 -- branches, call, and sethi
2527 */
2528
advance_jump_cond(DisasContext * dc,DisasCompare * cmp,bool annul,int disp)2529 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2530 bool annul, int disp)
2531 {
2532 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2533 target_ulong npc;
2534
2535 finishing_insn(dc);
2536
2537 if (cmp->cond == TCG_COND_ALWAYS) {
2538 if (annul) {
2539 dc->pc = dest;
2540 dc->npc = dest + 4;
2541 } else {
2542 gen_mov_pc_npc(dc);
2543 dc->npc = dest;
2544 }
2545 return true;
2546 }
2547
2548 if (cmp->cond == TCG_COND_NEVER) {
2549 npc = dc->npc;
2550 if (npc & 3) {
2551 gen_mov_pc_npc(dc);
2552 if (annul) {
2553 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2554 }
2555 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2556 } else {
2557 dc->pc = npc + (annul ? 4 : 0);
2558 dc->npc = dc->pc + 4;
2559 }
2560 return true;
2561 }
2562
2563 flush_cond(dc);
2564 npc = dc->npc;
2565
2566 if (annul) {
2567 TCGLabel *l1 = gen_new_label();
2568
2569 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2570 gen_goto_tb(dc, 0, npc, dest);
2571 gen_set_label(l1);
2572 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2573
2574 dc->base.is_jmp = DISAS_NORETURN;
2575 } else {
2576 if (npc & 3) {
2577 switch (npc) {
2578 case DYNAMIC_PC:
2579 case DYNAMIC_PC_LOOKUP:
2580 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2581 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2582 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2583 cmp->c1, tcg_constant_tl(cmp->c2),
2584 tcg_constant_tl(dest), cpu_npc);
2585 dc->pc = npc;
2586 break;
2587 default:
2588 g_assert_not_reached();
2589 }
2590 } else {
2591 dc->pc = npc;
2592 dc->npc = JUMP_PC;
2593 dc->jump = *cmp;
2594 dc->jump_pc[0] = dest;
2595 dc->jump_pc[1] = npc + 4;
2596
2597 /* The condition for cpu_cond is always NE -- normalize. */
2598 if (cmp->cond == TCG_COND_NE) {
2599 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2600 } else {
2601 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2602 }
2603 dc->cpu_cond_live = true;
2604 }
2605 }
2606 return true;
2607 }
2608
raise_priv(DisasContext * dc)2609 static bool raise_priv(DisasContext *dc)
2610 {
2611 gen_exception(dc, TT_PRIV_INSN);
2612 return true;
2613 }
2614
raise_unimpfpop(DisasContext * dc)2615 static bool raise_unimpfpop(DisasContext *dc)
2616 {
2617 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2618 return true;
2619 }
2620
gen_trap_float128(DisasContext * dc)2621 static bool gen_trap_float128(DisasContext *dc)
2622 {
2623 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2624 return false;
2625 }
2626 return raise_unimpfpop(dc);
2627 }
2628
do_bpcc(DisasContext * dc,arg_bcc * a)2629 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2630 {
2631 DisasCompare cmp;
2632
2633 gen_compare(&cmp, a->cc, a->cond, dc);
2634 return advance_jump_cond(dc, &cmp, a->a, a->i);
2635 }
2636
TRANS(Bicc,ALL,do_bpcc,a)2637 TRANS(Bicc, ALL, do_bpcc, a)
2638 TRANS(BPcc, 64, do_bpcc, a)
2639
2640 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2641 {
2642 DisasCompare cmp;
2643
2644 if (gen_trap_ifnofpu(dc)) {
2645 return true;
2646 }
2647 gen_fcompare(&cmp, a->cc, a->cond);
2648 return advance_jump_cond(dc, &cmp, a->a, a->i);
2649 }
2650
2651 TRANS(FBPfcc, 64, do_fbpfcc, a)
TRANS(FBfcc,ALL,do_fbpfcc,a)2652 TRANS(FBfcc, ALL, do_fbpfcc, a)
2653
2654 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2655 {
2656 DisasCompare cmp;
2657
2658 if (!avail_64(dc)) {
2659 return false;
2660 }
2661 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2662 return false;
2663 }
2664 return advance_jump_cond(dc, &cmp, a->a, a->i);
2665 }
2666
trans_CALL(DisasContext * dc,arg_CALL * a)2667 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2668 {
2669 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2670
2671 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2672 gen_mov_pc_npc(dc);
2673 dc->npc = target;
2674 return true;
2675 }
2676
trans_NCP(DisasContext * dc,arg_NCP * a)2677 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2678 {
2679 /*
2680 * For sparc32, always generate the no-coprocessor exception.
2681 * For sparc64, always generate illegal instruction.
2682 */
2683 #ifdef TARGET_SPARC64
2684 return false;
2685 #else
2686 gen_exception(dc, TT_NCP_INSN);
2687 return true;
2688 #endif
2689 }
2690
trans_SETHI(DisasContext * dc,arg_SETHI * a)2691 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2692 {
2693 /* Special-case %g0 because that's the canonical nop. */
2694 if (a->rd) {
2695 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2696 }
2697 return advance_pc(dc);
2698 }
2699
2700 /*
2701 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2702 */
2703
do_tcc(DisasContext * dc,int cond,int cc,int rs1,bool imm,int rs2_or_imm)2704 static bool do_tcc(DisasContext *dc, int cond, int cc,
2705 int rs1, bool imm, int rs2_or_imm)
2706 {
2707 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2708 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2709 DisasCompare cmp;
2710 TCGLabel *lab;
2711 TCGv_i32 trap;
2712
2713 /* Trap never. */
2714 if (cond == 0) {
2715 return advance_pc(dc);
2716 }
2717
2718 /*
2719 * Immediate traps are the most common case. Since this value is
2720 * live across the branch, it really pays to evaluate the constant.
2721 */
2722 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2723 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2724 } else {
2725 trap = tcg_temp_new_i32();
2726 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2727 if (imm) {
2728 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2729 } else {
2730 TCGv_i32 t2 = tcg_temp_new_i32();
2731 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2732 tcg_gen_add_i32(trap, trap, t2);
2733 }
2734 tcg_gen_andi_i32(trap, trap, mask);
2735 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2736 }
2737
2738 finishing_insn(dc);
2739
2740 /* Trap always. */
2741 if (cond == 8) {
2742 save_state(dc);
2743 gen_helper_raise_exception(tcg_env, trap);
2744 dc->base.is_jmp = DISAS_NORETURN;
2745 return true;
2746 }
2747
2748 /* Conditional trap. */
2749 flush_cond(dc);
2750 lab = delay_exceptionv(dc, trap);
2751 gen_compare(&cmp, cc, cond, dc);
2752 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2753
2754 return advance_pc(dc);
2755 }
2756
trans_Tcc_r(DisasContext * dc,arg_Tcc_r * a)2757 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2758 {
2759 if (avail_32(dc) && a->cc) {
2760 return false;
2761 }
2762 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2763 }
2764
trans_Tcc_i_v7(DisasContext * dc,arg_Tcc_i_v7 * a)2765 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2766 {
2767 if (avail_64(dc)) {
2768 return false;
2769 }
2770 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2771 }
2772
trans_Tcc_i_v9(DisasContext * dc,arg_Tcc_i_v9 * a)2773 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2774 {
2775 if (avail_32(dc)) {
2776 return false;
2777 }
2778 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2779 }
2780
trans_STBAR(DisasContext * dc,arg_STBAR * a)2781 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2782 {
2783 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2784 return advance_pc(dc);
2785 }
2786
trans_MEMBAR(DisasContext * dc,arg_MEMBAR * a)2787 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2788 {
2789 if (avail_32(dc)) {
2790 return false;
2791 }
2792 if (a->mmask) {
2793 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2794 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2795 }
2796 if (a->cmask) {
2797 /* For #Sync, etc, end the TB to recognize interrupts. */
2798 dc->base.is_jmp = DISAS_EXIT;
2799 }
2800 return advance_pc(dc);
2801 }
2802
do_rd_special(DisasContext * dc,bool priv,int rd,TCGv (* func)(DisasContext *,TCGv))2803 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2804 TCGv (*func)(DisasContext *, TCGv))
2805 {
2806 if (!priv) {
2807 return raise_priv(dc);
2808 }
2809 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2810 return advance_pc(dc);
2811 }
2812
do_rdy(DisasContext * dc,TCGv dst)2813 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2814 {
2815 return cpu_y;
2816 }
2817
trans_RDY(DisasContext * dc,arg_RDY * a)2818 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2819 {
2820 /*
2821 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2822 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2823 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2824 */
2825 if (avail_64(dc) && a->rs1 != 0) {
2826 return false;
2827 }
2828 return do_rd_special(dc, true, a->rd, do_rdy);
2829 }
2830
do_rd_leon3_config(DisasContext * dc,TCGv dst)2831 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2832 {
2833 gen_helper_rdasr17(dst, tcg_env);
2834 return dst;
2835 }
2836
2837 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2838
do_rdccr(DisasContext * dc,TCGv dst)2839 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2840 {
2841 gen_helper_rdccr(dst, tcg_env);
2842 return dst;
2843 }
2844
2845 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2846
do_rdasi(DisasContext * dc,TCGv dst)2847 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2848 {
2849 #ifdef TARGET_SPARC64
2850 return tcg_constant_tl(dc->asi);
2851 #else
2852 qemu_build_not_reached();
2853 #endif
2854 }
2855
2856 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2857
do_rdtick(DisasContext * dc,TCGv dst)2858 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2859 {
2860 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2861
2862 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2863 if (translator_io_start(&dc->base)) {
2864 dc->base.is_jmp = DISAS_EXIT;
2865 }
2866 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2867 tcg_constant_i32(dc->mem_idx));
2868 return dst;
2869 }
2870
2871 /* TODO: non-priv access only allowed when enabled. */
2872 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2873
do_rdpc(DisasContext * dc,TCGv dst)2874 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2875 {
2876 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2877 }
2878
2879 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2880
do_rdfprs(DisasContext * dc,TCGv dst)2881 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2882 {
2883 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2884 return dst;
2885 }
2886
2887 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2888
do_rdgsr(DisasContext * dc,TCGv dst)2889 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2890 {
2891 gen_trap_ifnofpu(dc);
2892 return cpu_gsr;
2893 }
2894
2895 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2896
do_rdsoftint(DisasContext * dc,TCGv dst)2897 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2898 {
2899 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2900 return dst;
2901 }
2902
2903 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2904
do_rdtick_cmpr(DisasContext * dc,TCGv dst)2905 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2906 {
2907 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2908 return dst;
2909 }
2910
2911 /* TODO: non-priv access only allowed when enabled. */
2912 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2913
do_rdstick(DisasContext * dc,TCGv dst)2914 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2915 {
2916 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2917
2918 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2919 if (translator_io_start(&dc->base)) {
2920 dc->base.is_jmp = DISAS_EXIT;
2921 }
2922 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2923 tcg_constant_i32(dc->mem_idx));
2924 return dst;
2925 }
2926
2927 /* TODO: non-priv access only allowed when enabled. */
2928 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2929
do_rdstick_cmpr(DisasContext * dc,TCGv dst)2930 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2931 {
2932 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2933 return dst;
2934 }
2935
2936 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2937 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2938
2939 /*
2940 * UltraSPARC-T1 Strand status.
2941 * HYPV check maybe not enough, UA2005 & UA2007 describe
2942 * this ASR as impl. dep
2943 */
do_rdstrand_status(DisasContext * dc,TCGv dst)2944 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2945 {
2946 return tcg_constant_tl(1);
2947 }
2948
2949 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2950
do_rdpsr(DisasContext * dc,TCGv dst)2951 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2952 {
2953 gen_helper_rdpsr(dst, tcg_env);
2954 return dst;
2955 }
2956
2957 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2958
do_rdhpstate(DisasContext * dc,TCGv dst)2959 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2960 {
2961 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2962 return dst;
2963 }
2964
2965 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2966
do_rdhtstate(DisasContext * dc,TCGv dst)2967 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2968 {
2969 TCGv_i32 tl = tcg_temp_new_i32();
2970 TCGv_ptr tp = tcg_temp_new_ptr();
2971
2972 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2973 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2974 tcg_gen_shli_i32(tl, tl, 3);
2975 tcg_gen_ext_i32_ptr(tp, tl);
2976 tcg_gen_add_ptr(tp, tp, tcg_env);
2977
2978 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2979 return dst;
2980 }
2981
2982 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2983
do_rdhintp(DisasContext * dc,TCGv dst)2984 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2985 {
2986 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2987 return dst;
2988 }
2989
2990 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2991
do_rdhtba(DisasContext * dc,TCGv dst)2992 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2993 {
2994 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2995 return dst;
2996 }
2997
2998 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
2999
do_rdhver(DisasContext * dc,TCGv dst)3000 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3001 {
3002 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3003 return dst;
3004 }
3005
3006 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3007
do_rdhstick_cmpr(DisasContext * dc,TCGv dst)3008 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3009 {
3010 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3011 return dst;
3012 }
3013
3014 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3015 do_rdhstick_cmpr)
3016
do_rdwim(DisasContext * dc,TCGv dst)3017 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3018 {
3019 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3020 return dst;
3021 }
3022
3023 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3024
do_rdtpc(DisasContext * dc,TCGv dst)3025 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3026 {
3027 #ifdef TARGET_SPARC64
3028 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3029
3030 gen_load_trap_state_at_tl(r_tsptr);
3031 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3032 return dst;
3033 #else
3034 qemu_build_not_reached();
3035 #endif
3036 }
3037
3038 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3039
do_rdtnpc(DisasContext * dc,TCGv dst)3040 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3041 {
3042 #ifdef TARGET_SPARC64
3043 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3044
3045 gen_load_trap_state_at_tl(r_tsptr);
3046 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3047 return dst;
3048 #else
3049 qemu_build_not_reached();
3050 #endif
3051 }
3052
3053 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3054
do_rdtstate(DisasContext * dc,TCGv dst)3055 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3056 {
3057 #ifdef TARGET_SPARC64
3058 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3059
3060 gen_load_trap_state_at_tl(r_tsptr);
3061 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3062 return dst;
3063 #else
3064 qemu_build_not_reached();
3065 #endif
3066 }
3067
3068 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3069
do_rdtt(DisasContext * dc,TCGv dst)3070 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3071 {
3072 #ifdef TARGET_SPARC64
3073 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3074
3075 gen_load_trap_state_at_tl(r_tsptr);
3076 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3077 return dst;
3078 #else
3079 qemu_build_not_reached();
3080 #endif
3081 }
3082
3083 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3084 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3085
do_rdtba(DisasContext * dc,TCGv dst)3086 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3087 {
3088 return cpu_tbr;
3089 }
3090
3091 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3092 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3093
do_rdpstate(DisasContext * dc,TCGv dst)3094 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3095 {
3096 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3097 return dst;
3098 }
3099
3100 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3101
do_rdtl(DisasContext * dc,TCGv dst)3102 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3103 {
3104 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3105 return dst;
3106 }
3107
3108 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3109
do_rdpil(DisasContext * dc,TCGv dst)3110 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3111 {
3112 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3113 return dst;
3114 }
3115
3116 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3117
do_rdcwp(DisasContext * dc,TCGv dst)3118 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3119 {
3120 gen_helper_rdcwp(dst, tcg_env);
3121 return dst;
3122 }
3123
3124 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3125
do_rdcansave(DisasContext * dc,TCGv dst)3126 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3127 {
3128 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3129 return dst;
3130 }
3131
3132 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3133
do_rdcanrestore(DisasContext * dc,TCGv dst)3134 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3135 {
3136 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3137 return dst;
3138 }
3139
3140 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3141 do_rdcanrestore)
3142
do_rdcleanwin(DisasContext * dc,TCGv dst)3143 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3144 {
3145 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3146 return dst;
3147 }
3148
3149 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3150
do_rdotherwin(DisasContext * dc,TCGv dst)3151 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3152 {
3153 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3154 return dst;
3155 }
3156
3157 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3158
do_rdwstate(DisasContext * dc,TCGv dst)3159 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3160 {
3161 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3162 return dst;
3163 }
3164
3165 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3166
do_rdgl(DisasContext * dc,TCGv dst)3167 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3168 {
3169 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3170 return dst;
3171 }
3172
3173 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3174
3175 /* UA2005 strand status */
do_rdssr(DisasContext * dc,TCGv dst)3176 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3177 {
3178 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3179 return dst;
3180 }
3181
3182 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3183
do_rdver(DisasContext * dc,TCGv dst)3184 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3185 {
3186 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3187 return dst;
3188 }
3189
3190 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3191
trans_FLUSHW(DisasContext * dc,arg_FLUSHW * a)3192 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3193 {
3194 if (avail_64(dc)) {
3195 gen_helper_flushw(tcg_env);
3196 return advance_pc(dc);
3197 }
3198 return false;
3199 }
3200
do_wr_special(DisasContext * dc,arg_r_r_ri * a,bool priv,void (* func)(DisasContext *,TCGv))3201 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3202 void (*func)(DisasContext *, TCGv))
3203 {
3204 TCGv src;
3205
3206 /* For simplicity, we under-decoded the rs2 form. */
3207 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3208 return false;
3209 }
3210 if (!priv) {
3211 return raise_priv(dc);
3212 }
3213
3214 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3215 src = tcg_constant_tl(a->rs2_or_imm);
3216 } else {
3217 TCGv src1 = gen_load_gpr(dc, a->rs1);
3218 if (a->rs2_or_imm == 0) {
3219 src = src1;
3220 } else {
3221 src = tcg_temp_new();
3222 if (a->imm) {
3223 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3224 } else {
3225 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3226 }
3227 }
3228 }
3229 func(dc, src);
3230 return advance_pc(dc);
3231 }
3232
do_wry(DisasContext * dc,TCGv src)3233 static void do_wry(DisasContext *dc, TCGv src)
3234 {
3235 tcg_gen_ext32u_tl(cpu_y, src);
3236 }
3237
TRANS(WRY,ALL,do_wr_special,a,true,do_wry)3238 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3239
3240 static void do_wrccr(DisasContext *dc, TCGv src)
3241 {
3242 gen_helper_wrccr(tcg_env, src);
3243 }
3244
3245 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3246
do_wrasi(DisasContext * dc,TCGv src)3247 static void do_wrasi(DisasContext *dc, TCGv src)
3248 {
3249 TCGv tmp = tcg_temp_new();
3250
3251 tcg_gen_ext8u_tl(tmp, src);
3252 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3253 /* End TB to notice changed ASI. */
3254 dc->base.is_jmp = DISAS_EXIT;
3255 }
3256
3257 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3258
do_wrfprs(DisasContext * dc,TCGv src)3259 static void do_wrfprs(DisasContext *dc, TCGv src)
3260 {
3261 #ifdef TARGET_SPARC64
3262 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3263 dc->fprs_dirty = 0;
3264 dc->base.is_jmp = DISAS_EXIT;
3265 #else
3266 qemu_build_not_reached();
3267 #endif
3268 }
3269
3270 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3271
do_wrgsr(DisasContext * dc,TCGv src)3272 static void do_wrgsr(DisasContext *dc, TCGv src)
3273 {
3274 gen_trap_ifnofpu(dc);
3275 tcg_gen_mov_tl(cpu_gsr, src);
3276 }
3277
3278 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3279
do_wrsoftint_set(DisasContext * dc,TCGv src)3280 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3281 {
3282 gen_helper_set_softint(tcg_env, src);
3283 }
3284
3285 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3286
do_wrsoftint_clr(DisasContext * dc,TCGv src)3287 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3288 {
3289 gen_helper_clear_softint(tcg_env, src);
3290 }
3291
3292 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3293
do_wrsoftint(DisasContext * dc,TCGv src)3294 static void do_wrsoftint(DisasContext *dc, TCGv src)
3295 {
3296 gen_helper_write_softint(tcg_env, src);
3297 }
3298
3299 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3300
do_wrtick_cmpr(DisasContext * dc,TCGv src)3301 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3302 {
3303 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3304
3305 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3306 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3307 translator_io_start(&dc->base);
3308 gen_helper_tick_set_limit(r_tickptr, src);
3309 /* End TB to handle timer interrupt */
3310 dc->base.is_jmp = DISAS_EXIT;
3311 }
3312
3313 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3314
do_wrstick(DisasContext * dc,TCGv src)3315 static void do_wrstick(DisasContext *dc, TCGv src)
3316 {
3317 #ifdef TARGET_SPARC64
3318 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3319
3320 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3321 translator_io_start(&dc->base);
3322 gen_helper_tick_set_count(r_tickptr, src);
3323 /* End TB to handle timer interrupt */
3324 dc->base.is_jmp = DISAS_EXIT;
3325 #else
3326 qemu_build_not_reached();
3327 #endif
3328 }
3329
3330 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3331
do_wrstick_cmpr(DisasContext * dc,TCGv src)3332 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3333 {
3334 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3335
3336 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3337 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3338 translator_io_start(&dc->base);
3339 gen_helper_tick_set_limit(r_tickptr, src);
3340 /* End TB to handle timer interrupt */
3341 dc->base.is_jmp = DISAS_EXIT;
3342 }
3343
3344 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3345
do_wrpowerdown(DisasContext * dc,TCGv src)3346 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3347 {
3348 finishing_insn(dc);
3349 save_state(dc);
3350 gen_helper_power_down(tcg_env);
3351 }
3352
TRANS(WRPOWERDOWN,POWERDOWN,do_wr_special,a,supervisor (dc),do_wrpowerdown)3353 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3354
3355 static void do_wrmwait(DisasContext *dc, TCGv src)
3356 {
3357 /*
3358 * TODO: This is a stub version of mwait, which merely recognizes
3359 * interrupts immediately and does not wait.
3360 */
3361 dc->base.is_jmp = DISAS_EXIT;
3362 }
3363
TRANS(WRMWAIT,VIS4,do_wr_special,a,true,do_wrmwait)3364 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3365
3366 static void do_wrpsr(DisasContext *dc, TCGv src)
3367 {
3368 gen_helper_wrpsr(tcg_env, src);
3369 dc->base.is_jmp = DISAS_EXIT;
3370 }
3371
3372 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3373
do_wrwim(DisasContext * dc,TCGv src)3374 static void do_wrwim(DisasContext *dc, TCGv src)
3375 {
3376 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3377 TCGv tmp = tcg_temp_new();
3378
3379 tcg_gen_andi_tl(tmp, src, mask);
3380 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3381 }
3382
3383 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3384
do_wrtpc(DisasContext * dc,TCGv src)3385 static void do_wrtpc(DisasContext *dc, TCGv src)
3386 {
3387 #ifdef TARGET_SPARC64
3388 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3389
3390 gen_load_trap_state_at_tl(r_tsptr);
3391 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3392 #else
3393 qemu_build_not_reached();
3394 #endif
3395 }
3396
3397 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3398
do_wrtnpc(DisasContext * dc,TCGv src)3399 static void do_wrtnpc(DisasContext *dc, TCGv src)
3400 {
3401 #ifdef TARGET_SPARC64
3402 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3403
3404 gen_load_trap_state_at_tl(r_tsptr);
3405 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3406 #else
3407 qemu_build_not_reached();
3408 #endif
3409 }
3410
3411 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3412
do_wrtstate(DisasContext * dc,TCGv src)3413 static void do_wrtstate(DisasContext *dc, TCGv src)
3414 {
3415 #ifdef TARGET_SPARC64
3416 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3417
3418 gen_load_trap_state_at_tl(r_tsptr);
3419 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3420 #else
3421 qemu_build_not_reached();
3422 #endif
3423 }
3424
3425 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3426
do_wrtt(DisasContext * dc,TCGv src)3427 static void do_wrtt(DisasContext *dc, TCGv src)
3428 {
3429 #ifdef TARGET_SPARC64
3430 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3431
3432 gen_load_trap_state_at_tl(r_tsptr);
3433 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3434 #else
3435 qemu_build_not_reached();
3436 #endif
3437 }
3438
3439 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3440
do_wrtick(DisasContext * dc,TCGv src)3441 static void do_wrtick(DisasContext *dc, TCGv src)
3442 {
3443 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3444
3445 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3446 translator_io_start(&dc->base);
3447 gen_helper_tick_set_count(r_tickptr, src);
3448 /* End TB to handle timer interrupt */
3449 dc->base.is_jmp = DISAS_EXIT;
3450 }
3451
3452 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3453
do_wrtba(DisasContext * dc,TCGv src)3454 static void do_wrtba(DisasContext *dc, TCGv src)
3455 {
3456 tcg_gen_mov_tl(cpu_tbr, src);
3457 }
3458
3459 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3460
do_wrpstate(DisasContext * dc,TCGv src)3461 static void do_wrpstate(DisasContext *dc, TCGv src)
3462 {
3463 save_state(dc);
3464 if (translator_io_start(&dc->base)) {
3465 dc->base.is_jmp = DISAS_EXIT;
3466 }
3467 gen_helper_wrpstate(tcg_env, src);
3468 dc->npc = DYNAMIC_PC;
3469 }
3470
3471 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3472
do_wrtl(DisasContext * dc,TCGv src)3473 static void do_wrtl(DisasContext *dc, TCGv src)
3474 {
3475 save_state(dc);
3476 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3477 dc->npc = DYNAMIC_PC;
3478 }
3479
3480 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3481
do_wrpil(DisasContext * dc,TCGv src)3482 static void do_wrpil(DisasContext *dc, TCGv src)
3483 {
3484 if (translator_io_start(&dc->base)) {
3485 dc->base.is_jmp = DISAS_EXIT;
3486 }
3487 gen_helper_wrpil(tcg_env, src);
3488 }
3489
3490 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3491
do_wrcwp(DisasContext * dc,TCGv src)3492 static void do_wrcwp(DisasContext *dc, TCGv src)
3493 {
3494 gen_helper_wrcwp(tcg_env, src);
3495 }
3496
3497 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3498
do_wrcansave(DisasContext * dc,TCGv src)3499 static void do_wrcansave(DisasContext *dc, TCGv src)
3500 {
3501 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3502 }
3503
3504 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3505
do_wrcanrestore(DisasContext * dc,TCGv src)3506 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3507 {
3508 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3509 }
3510
3511 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3512
do_wrcleanwin(DisasContext * dc,TCGv src)3513 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3514 {
3515 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3516 }
3517
3518 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3519
do_wrotherwin(DisasContext * dc,TCGv src)3520 static void do_wrotherwin(DisasContext *dc, TCGv src)
3521 {
3522 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3523 }
3524
3525 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3526
do_wrwstate(DisasContext * dc,TCGv src)3527 static void do_wrwstate(DisasContext *dc, TCGv src)
3528 {
3529 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3530 }
3531
3532 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3533
do_wrgl(DisasContext * dc,TCGv src)3534 static void do_wrgl(DisasContext *dc, TCGv src)
3535 {
3536 gen_helper_wrgl(tcg_env, src);
3537 }
3538
TRANS(WRPR_gl,GL,do_wr_special,a,supervisor (dc),do_wrgl)3539 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3540
3541 /* UA2005 strand status */
3542 static void do_wrssr(DisasContext *dc, TCGv src)
3543 {
3544 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3545 }
3546
TRANS(WRPR_strand_status,HYPV,do_wr_special,a,hypervisor (dc),do_wrssr)3547 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3548
3549 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3550
3551 static void do_wrhpstate(DisasContext *dc, TCGv src)
3552 {
3553 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3554 dc->base.is_jmp = DISAS_EXIT;
3555 }
3556
TRANS(WRHPR_hpstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhpstate)3557 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3558
3559 static void do_wrhtstate(DisasContext *dc, TCGv src)
3560 {
3561 TCGv_i32 tl = tcg_temp_new_i32();
3562 TCGv_ptr tp = tcg_temp_new_ptr();
3563
3564 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3565 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3566 tcg_gen_shli_i32(tl, tl, 3);
3567 tcg_gen_ext_i32_ptr(tp, tl);
3568 tcg_gen_add_ptr(tp, tp, tcg_env);
3569
3570 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3571 }
3572
TRANS(WRHPR_htstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtstate)3573 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3574
3575 static void do_wrhintp(DisasContext *dc, TCGv src)
3576 {
3577 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3578 }
3579
TRANS(WRHPR_hintp,HYPV,do_wr_special,a,hypervisor (dc),do_wrhintp)3580 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3581
3582 static void do_wrhtba(DisasContext *dc, TCGv src)
3583 {
3584 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3585 }
3586
TRANS(WRHPR_htba,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtba)3587 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3588
3589 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3590 {
3591 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3592
3593 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3594 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3595 translator_io_start(&dc->base);
3596 gen_helper_tick_set_limit(r_tickptr, src);
3597 /* End TB to handle timer interrupt */
3598 dc->base.is_jmp = DISAS_EXIT;
3599 }
3600
TRANS(WRHPR_hstick_cmpr,HYPV,do_wr_special,a,hypervisor (dc),do_wrhstick_cmpr)3601 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3602 do_wrhstick_cmpr)
3603
3604 static bool do_saved_restored(DisasContext *dc, bool saved)
3605 {
3606 if (!supervisor(dc)) {
3607 return raise_priv(dc);
3608 }
3609 if (saved) {
3610 gen_helper_saved(tcg_env);
3611 } else {
3612 gen_helper_restored(tcg_env);
3613 }
3614 return advance_pc(dc);
3615 }
3616
3617 TRANS(SAVED, 64, do_saved_restored, true)
3618 TRANS(RESTORED, 64, do_saved_restored, false)
3619
trans_NOP(DisasContext * dc,arg_NOP * a)3620 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3621 {
3622 return advance_pc(dc);
3623 }
3624
3625 /*
3626 * TODO: Need a feature bit for sparcv8.
3627 * In the meantime, treat all 32-bit cpus like sparcv7.
3628 */
3629 TRANS(NOP_v7, 32, trans_NOP, a)
3630 TRANS(NOP_v9, 64, trans_NOP, a)
3631
do_arith_int(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),bool logic_cc)3632 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3633 void (*func)(TCGv, TCGv, TCGv),
3634 void (*funci)(TCGv, TCGv, target_long),
3635 bool logic_cc)
3636 {
3637 TCGv dst, src1;
3638
3639 /* For simplicity, we under-decoded the rs2 form. */
3640 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3641 return false;
3642 }
3643
3644 if (logic_cc) {
3645 dst = cpu_cc_N;
3646 } else {
3647 dst = gen_dest_gpr(dc, a->rd);
3648 }
3649 src1 = gen_load_gpr(dc, a->rs1);
3650
3651 if (a->imm || a->rs2_or_imm == 0) {
3652 if (funci) {
3653 funci(dst, src1, a->rs2_or_imm);
3654 } else {
3655 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3656 }
3657 } else {
3658 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3659 }
3660
3661 if (logic_cc) {
3662 if (TARGET_LONG_BITS == 64) {
3663 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3664 tcg_gen_movi_tl(cpu_icc_C, 0);
3665 }
3666 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3667 tcg_gen_movi_tl(cpu_cc_C, 0);
3668 tcg_gen_movi_tl(cpu_cc_V, 0);
3669 }
3670
3671 gen_store_gpr(dc, a->rd, dst);
3672 return advance_pc(dc);
3673 }
3674
do_arith(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),void (* func_cc)(TCGv,TCGv,TCGv))3675 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3676 void (*func)(TCGv, TCGv, TCGv),
3677 void (*funci)(TCGv, TCGv, target_long),
3678 void (*func_cc)(TCGv, TCGv, TCGv))
3679 {
3680 if (a->cc) {
3681 return do_arith_int(dc, a, func_cc, NULL, false);
3682 }
3683 return do_arith_int(dc, a, func, funci, false);
3684 }
3685
do_logic(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long))3686 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3687 void (*func)(TCGv, TCGv, TCGv),
3688 void (*funci)(TCGv, TCGv, target_long))
3689 {
3690 return do_arith_int(dc, a, func, funci, a->cc);
3691 }
3692
TRANS(ADD,ALL,do_arith,a,tcg_gen_add_tl,tcg_gen_addi_tl,gen_op_addcc)3693 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3694 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3695 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3696 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3697
3698 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3699 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3700 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3701 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3702
3703 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3704 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3705 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3706 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3707 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3708
3709 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3710 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3711 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3712 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3713
3714 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3715 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3716
3717 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3718 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3719
3720 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3721 {
3722 /* OR with %g0 is the canonical alias for MOV. */
3723 if (!a->cc && a->rs1 == 0) {
3724 if (a->imm || a->rs2_or_imm == 0) {
3725 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3726 } else if (a->rs2_or_imm & ~0x1f) {
3727 /* For simplicity, we under-decoded the rs2 form. */
3728 return false;
3729 } else {
3730 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3731 }
3732 return advance_pc(dc);
3733 }
3734 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3735 }
3736
trans_UDIV(DisasContext * dc,arg_r_r_ri * a)3737 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3738 {
3739 TCGv_i64 t1, t2;
3740 TCGv dst;
3741
3742 if (!avail_DIV(dc)) {
3743 return false;
3744 }
3745 /* For simplicity, we under-decoded the rs2 form. */
3746 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3747 return false;
3748 }
3749
3750 if (unlikely(a->rs2_or_imm == 0)) {
3751 gen_exception(dc, TT_DIV_ZERO);
3752 return true;
3753 }
3754
3755 if (a->imm) {
3756 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3757 } else {
3758 TCGLabel *lab;
3759 TCGv_i32 n2;
3760
3761 finishing_insn(dc);
3762 flush_cond(dc);
3763
3764 n2 = tcg_temp_new_i32();
3765 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3766
3767 lab = delay_exception(dc, TT_DIV_ZERO);
3768 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3769
3770 t2 = tcg_temp_new_i64();
3771 #ifdef TARGET_SPARC64
3772 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3773 #else
3774 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3775 #endif
3776 }
3777
3778 t1 = tcg_temp_new_i64();
3779 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3780
3781 tcg_gen_divu_i64(t1, t1, t2);
3782 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3783
3784 dst = gen_dest_gpr(dc, a->rd);
3785 tcg_gen_trunc_i64_tl(dst, t1);
3786 gen_store_gpr(dc, a->rd, dst);
3787 return advance_pc(dc);
3788 }
3789
trans_UDIVX(DisasContext * dc,arg_r_r_ri * a)3790 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3791 {
3792 TCGv dst, src1, src2;
3793
3794 if (!avail_64(dc)) {
3795 return false;
3796 }
3797 /* For simplicity, we under-decoded the rs2 form. */
3798 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3799 return false;
3800 }
3801
3802 if (unlikely(a->rs2_or_imm == 0)) {
3803 gen_exception(dc, TT_DIV_ZERO);
3804 return true;
3805 }
3806
3807 if (a->imm) {
3808 src2 = tcg_constant_tl(a->rs2_or_imm);
3809 } else {
3810 TCGLabel *lab;
3811
3812 finishing_insn(dc);
3813 flush_cond(dc);
3814
3815 lab = delay_exception(dc, TT_DIV_ZERO);
3816 src2 = cpu_regs[a->rs2_or_imm];
3817 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3818 }
3819
3820 dst = gen_dest_gpr(dc, a->rd);
3821 src1 = gen_load_gpr(dc, a->rs1);
3822
3823 tcg_gen_divu_tl(dst, src1, src2);
3824 gen_store_gpr(dc, a->rd, dst);
3825 return advance_pc(dc);
3826 }
3827
trans_SDIVX(DisasContext * dc,arg_r_r_ri * a)3828 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3829 {
3830 TCGv dst, src1, src2;
3831
3832 if (!avail_64(dc)) {
3833 return false;
3834 }
3835 /* For simplicity, we under-decoded the rs2 form. */
3836 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3837 return false;
3838 }
3839
3840 if (unlikely(a->rs2_or_imm == 0)) {
3841 gen_exception(dc, TT_DIV_ZERO);
3842 return true;
3843 }
3844
3845 dst = gen_dest_gpr(dc, a->rd);
3846 src1 = gen_load_gpr(dc, a->rs1);
3847
3848 if (a->imm) {
3849 if (unlikely(a->rs2_or_imm == -1)) {
3850 tcg_gen_neg_tl(dst, src1);
3851 gen_store_gpr(dc, a->rd, dst);
3852 return advance_pc(dc);
3853 }
3854 src2 = tcg_constant_tl(a->rs2_or_imm);
3855 } else {
3856 TCGLabel *lab;
3857 TCGv t1, t2;
3858
3859 finishing_insn(dc);
3860 flush_cond(dc);
3861
3862 lab = delay_exception(dc, TT_DIV_ZERO);
3863 src2 = cpu_regs[a->rs2_or_imm];
3864 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3865
3866 /*
3867 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3868 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3869 */
3870 t1 = tcg_temp_new();
3871 t2 = tcg_temp_new();
3872 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3873 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3874 tcg_gen_and_tl(t1, t1, t2);
3875 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3876 tcg_constant_tl(1), src2);
3877 src2 = t1;
3878 }
3879
3880 tcg_gen_div_tl(dst, src1, src2);
3881 gen_store_gpr(dc, a->rd, dst);
3882 return advance_pc(dc);
3883 }
3884
gen_edge(DisasContext * dc,arg_r_r_r * a,int width,bool cc,bool little_endian)3885 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3886 int width, bool cc, bool little_endian)
3887 {
3888 TCGv dst, s1, s2, l, r, t, m;
3889 uint64_t amask = address_mask_i(dc, -8);
3890
3891 dst = gen_dest_gpr(dc, a->rd);
3892 s1 = gen_load_gpr(dc, a->rs1);
3893 s2 = gen_load_gpr(dc, a->rs2);
3894
3895 if (cc) {
3896 gen_op_subcc(cpu_cc_N, s1, s2);
3897 }
3898
3899 l = tcg_temp_new();
3900 r = tcg_temp_new();
3901 t = tcg_temp_new();
3902
3903 switch (width) {
3904 case 8:
3905 tcg_gen_andi_tl(l, s1, 7);
3906 tcg_gen_andi_tl(r, s2, 7);
3907 tcg_gen_xori_tl(r, r, 7);
3908 m = tcg_constant_tl(0xff);
3909 break;
3910 case 16:
3911 tcg_gen_extract_tl(l, s1, 1, 2);
3912 tcg_gen_extract_tl(r, s2, 1, 2);
3913 tcg_gen_xori_tl(r, r, 3);
3914 m = tcg_constant_tl(0xf);
3915 break;
3916 case 32:
3917 tcg_gen_extract_tl(l, s1, 2, 1);
3918 tcg_gen_extract_tl(r, s2, 2, 1);
3919 tcg_gen_xori_tl(r, r, 1);
3920 m = tcg_constant_tl(0x3);
3921 break;
3922 default:
3923 abort();
3924 }
3925
3926 /* Compute Left Edge */
3927 if (little_endian) {
3928 tcg_gen_shl_tl(l, m, l);
3929 tcg_gen_and_tl(l, l, m);
3930 } else {
3931 tcg_gen_shr_tl(l, m, l);
3932 }
3933 /* Compute Right Edge */
3934 if (little_endian) {
3935 tcg_gen_shr_tl(r, m, r);
3936 } else {
3937 tcg_gen_shl_tl(r, m, r);
3938 tcg_gen_and_tl(r, r, m);
3939 }
3940
3941 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3942 tcg_gen_xor_tl(t, s1, s2);
3943 tcg_gen_and_tl(r, r, l);
3944 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3945
3946 gen_store_gpr(dc, a->rd, dst);
3947 return advance_pc(dc);
3948 }
3949
3950 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3951 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3952 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3953 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3954 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3955 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3956
3957 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3958 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3959 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3960 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3961 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3962 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3963
do_rr(DisasContext * dc,arg_r_r * a,void (* func)(TCGv,TCGv))3964 static bool do_rr(DisasContext *dc, arg_r_r *a,
3965 void (*func)(TCGv, TCGv))
3966 {
3967 TCGv dst = gen_dest_gpr(dc, a->rd);
3968 TCGv src = gen_load_gpr(dc, a->rs);
3969
3970 func(dst, src);
3971 gen_store_gpr(dc, a->rd, dst);
3972 return advance_pc(dc);
3973 }
3974
TRANS(LZCNT,VIS3,do_rr,a,gen_op_lzcnt)3975 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3976
3977 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3978 void (*func)(TCGv, TCGv, TCGv))
3979 {
3980 TCGv dst = gen_dest_gpr(dc, a->rd);
3981 TCGv src1 = gen_load_gpr(dc, a->rs1);
3982 TCGv src2 = gen_load_gpr(dc, a->rs2);
3983
3984 func(dst, src1, src2);
3985 gen_store_gpr(dc, a->rd, dst);
3986 return advance_pc(dc);
3987 }
3988
TRANS(ARRAY8,VIS1,do_rrr,a,gen_helper_array8)3989 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3990 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3991 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3992
3993 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3994 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3995
3996 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
3997 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
3998
3999 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4000
4001 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4002 {
4003 #ifdef TARGET_SPARC64
4004 TCGv tmp = tcg_temp_new();
4005
4006 tcg_gen_add_tl(tmp, s1, s2);
4007 tcg_gen_andi_tl(dst, tmp, -8);
4008 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4009 #else
4010 g_assert_not_reached();
4011 #endif
4012 }
4013
gen_op_alignaddrl(TCGv dst,TCGv s1,TCGv s2)4014 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4015 {
4016 #ifdef TARGET_SPARC64
4017 TCGv tmp = tcg_temp_new();
4018
4019 tcg_gen_add_tl(tmp, s1, s2);
4020 tcg_gen_andi_tl(dst, tmp, -8);
4021 tcg_gen_neg_tl(tmp, tmp);
4022 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4023 #else
4024 g_assert_not_reached();
4025 #endif
4026 }
4027
TRANS(ALIGNADDR,VIS1,do_rrr,a,gen_op_alignaddr)4028 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4029 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4030
4031 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4032 {
4033 #ifdef TARGET_SPARC64
4034 tcg_gen_add_tl(dst, s1, s2);
4035 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4036 #else
4037 g_assert_not_reached();
4038 #endif
4039 }
4040
TRANS(BMASK,VIS2,do_rrr,a,gen_op_bmask)4041 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4042
4043 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4044 {
4045 func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4046 return true;
4047 }
4048
4049 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4050 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4051 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4052
do_shift_r(DisasContext * dc,arg_shiftr * a,bool l,bool u)4053 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4054 {
4055 TCGv dst, src1, src2;
4056
4057 /* Reject 64-bit shifts for sparc32. */
4058 if (avail_32(dc) && a->x) {
4059 return false;
4060 }
4061
4062 src2 = tcg_temp_new();
4063 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4064 src1 = gen_load_gpr(dc, a->rs1);
4065 dst = gen_dest_gpr(dc, a->rd);
4066
4067 if (l) {
4068 tcg_gen_shl_tl(dst, src1, src2);
4069 if (!a->x) {
4070 tcg_gen_ext32u_tl(dst, dst);
4071 }
4072 } else if (u) {
4073 if (!a->x) {
4074 tcg_gen_ext32u_tl(dst, src1);
4075 src1 = dst;
4076 }
4077 tcg_gen_shr_tl(dst, src1, src2);
4078 } else {
4079 if (!a->x) {
4080 tcg_gen_ext32s_tl(dst, src1);
4081 src1 = dst;
4082 }
4083 tcg_gen_sar_tl(dst, src1, src2);
4084 }
4085 gen_store_gpr(dc, a->rd, dst);
4086 return advance_pc(dc);
4087 }
4088
TRANS(SLL_r,ALL,do_shift_r,a,true,true)4089 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4090 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4091 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4092
4093 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4094 {
4095 TCGv dst, src1;
4096
4097 /* Reject 64-bit shifts for sparc32. */
4098 if (avail_32(dc) && (a->x || a->i >= 32)) {
4099 return false;
4100 }
4101
4102 src1 = gen_load_gpr(dc, a->rs1);
4103 dst = gen_dest_gpr(dc, a->rd);
4104
4105 if (avail_32(dc) || a->x) {
4106 if (l) {
4107 tcg_gen_shli_tl(dst, src1, a->i);
4108 } else if (u) {
4109 tcg_gen_shri_tl(dst, src1, a->i);
4110 } else {
4111 tcg_gen_sari_tl(dst, src1, a->i);
4112 }
4113 } else {
4114 if (l) {
4115 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4116 } else if (u) {
4117 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4118 } else {
4119 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4120 }
4121 }
4122 gen_store_gpr(dc, a->rd, dst);
4123 return advance_pc(dc);
4124 }
4125
TRANS(SLL_i,ALL,do_shift_i,a,true,true)4126 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4127 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4128 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4129
4130 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4131 {
4132 /* For simplicity, we under-decoded the rs2 form. */
4133 if (!imm && rs2_or_imm & ~0x1f) {
4134 return NULL;
4135 }
4136 if (imm || rs2_or_imm == 0) {
4137 return tcg_constant_tl(rs2_or_imm);
4138 } else {
4139 return cpu_regs[rs2_or_imm];
4140 }
4141 }
4142
do_mov_cond(DisasContext * dc,DisasCompare * cmp,int rd,TCGv src2)4143 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4144 {
4145 TCGv dst = gen_load_gpr(dc, rd);
4146 TCGv c2 = tcg_constant_tl(cmp->c2);
4147
4148 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4149 gen_store_gpr(dc, rd, dst);
4150 return advance_pc(dc);
4151 }
4152
trans_MOVcc(DisasContext * dc,arg_MOVcc * a)4153 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4154 {
4155 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4156 DisasCompare cmp;
4157
4158 if (src2 == NULL) {
4159 return false;
4160 }
4161 gen_compare(&cmp, a->cc, a->cond, dc);
4162 return do_mov_cond(dc, &cmp, a->rd, src2);
4163 }
4164
trans_MOVfcc(DisasContext * dc,arg_MOVfcc * a)4165 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4166 {
4167 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4168 DisasCompare cmp;
4169
4170 if (src2 == NULL) {
4171 return false;
4172 }
4173 gen_fcompare(&cmp, a->cc, a->cond);
4174 return do_mov_cond(dc, &cmp, a->rd, src2);
4175 }
4176
trans_MOVR(DisasContext * dc,arg_MOVR * a)4177 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4178 {
4179 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4180 DisasCompare cmp;
4181
4182 if (src2 == NULL) {
4183 return false;
4184 }
4185 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4186 return false;
4187 }
4188 return do_mov_cond(dc, &cmp, a->rd, src2);
4189 }
4190
do_add_special(DisasContext * dc,arg_r_r_ri * a,bool (* func)(DisasContext * dc,int rd,TCGv src))4191 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4192 bool (*func)(DisasContext *dc, int rd, TCGv src))
4193 {
4194 TCGv src1, sum;
4195
4196 /* For simplicity, we under-decoded the rs2 form. */
4197 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4198 return false;
4199 }
4200
4201 /*
4202 * Always load the sum into a new temporary.
4203 * This is required to capture the value across a window change,
4204 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4205 */
4206 sum = tcg_temp_new();
4207 src1 = gen_load_gpr(dc, a->rs1);
4208 if (a->imm || a->rs2_or_imm == 0) {
4209 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4210 } else {
4211 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4212 }
4213 return func(dc, a->rd, sum);
4214 }
4215
do_jmpl(DisasContext * dc,int rd,TCGv src)4216 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4217 {
4218 /*
4219 * Preserve pc across advance, so that we can delay
4220 * the writeback to rd until after src is consumed.
4221 */
4222 target_ulong cur_pc = dc->pc;
4223
4224 gen_check_align(dc, src, 3);
4225
4226 gen_mov_pc_npc(dc);
4227 tcg_gen_mov_tl(cpu_npc, src);
4228 gen_address_mask(dc, cpu_npc);
4229 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4230
4231 dc->npc = DYNAMIC_PC_LOOKUP;
4232 return true;
4233 }
4234
TRANS(JMPL,ALL,do_add_special,a,do_jmpl)4235 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4236
4237 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4238 {
4239 if (!supervisor(dc)) {
4240 return raise_priv(dc);
4241 }
4242
4243 gen_check_align(dc, src, 3);
4244
4245 gen_mov_pc_npc(dc);
4246 tcg_gen_mov_tl(cpu_npc, src);
4247 gen_helper_rett(tcg_env);
4248
4249 dc->npc = DYNAMIC_PC;
4250 return true;
4251 }
4252
4253 TRANS(RETT, 32, do_add_special, a, do_rett)
4254
do_return(DisasContext * dc,int rd,TCGv src)4255 static bool do_return(DisasContext *dc, int rd, TCGv src)
4256 {
4257 gen_check_align(dc, src, 3);
4258 gen_helper_restore(tcg_env);
4259
4260 gen_mov_pc_npc(dc);
4261 tcg_gen_mov_tl(cpu_npc, src);
4262 gen_address_mask(dc, cpu_npc);
4263
4264 dc->npc = DYNAMIC_PC_LOOKUP;
4265 return true;
4266 }
4267
4268 TRANS(RETURN, 64, do_add_special, a, do_return)
4269
do_save(DisasContext * dc,int rd,TCGv src)4270 static bool do_save(DisasContext *dc, int rd, TCGv src)
4271 {
4272 gen_helper_save(tcg_env);
4273 gen_store_gpr(dc, rd, src);
4274 return advance_pc(dc);
4275 }
4276
TRANS(SAVE,ALL,do_add_special,a,do_save)4277 TRANS(SAVE, ALL, do_add_special, a, do_save)
4278
4279 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4280 {
4281 gen_helper_restore(tcg_env);
4282 gen_store_gpr(dc, rd, src);
4283 return advance_pc(dc);
4284 }
4285
TRANS(RESTORE,ALL,do_add_special,a,do_restore)4286 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4287
4288 static bool do_done_retry(DisasContext *dc, bool done)
4289 {
4290 if (!supervisor(dc)) {
4291 return raise_priv(dc);
4292 }
4293 dc->npc = DYNAMIC_PC;
4294 dc->pc = DYNAMIC_PC;
4295 translator_io_start(&dc->base);
4296 if (done) {
4297 gen_helper_done(tcg_env);
4298 } else {
4299 gen_helper_retry(tcg_env);
4300 }
4301 return true;
4302 }
4303
4304 TRANS(DONE, 64, do_done_retry, true)
4305 TRANS(RETRY, 64, do_done_retry, false)
4306
4307 /*
4308 * Major opcode 11 -- load and store instructions
4309 */
4310
gen_ldst_addr(DisasContext * dc,int rs1,bool imm,int rs2_or_imm)4311 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4312 {
4313 TCGv addr, tmp = NULL;
4314
4315 /* For simplicity, we under-decoded the rs2 form. */
4316 if (!imm && rs2_or_imm & ~0x1f) {
4317 return NULL;
4318 }
4319
4320 addr = gen_load_gpr(dc, rs1);
4321 if (rs2_or_imm) {
4322 tmp = tcg_temp_new();
4323 if (imm) {
4324 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4325 } else {
4326 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4327 }
4328 addr = tmp;
4329 }
4330 if (AM_CHECK(dc)) {
4331 if (!tmp) {
4332 tmp = tcg_temp_new();
4333 }
4334 tcg_gen_ext32u_tl(tmp, addr);
4335 addr = tmp;
4336 }
4337 return addr;
4338 }
4339
do_ld_gpr(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4340 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4341 {
4342 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4343 DisasASI da;
4344
4345 if (addr == NULL) {
4346 return false;
4347 }
4348 da = resolve_asi(dc, a->asi, mop);
4349
4350 reg = gen_dest_gpr(dc, a->rd);
4351 gen_ld_asi(dc, &da, reg, addr);
4352 gen_store_gpr(dc, a->rd, reg);
4353 return advance_pc(dc);
4354 }
4355
TRANS(LDUW,ALL,do_ld_gpr,a,MO_TEUL)4356 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4357 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4358 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4359 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4360 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4361 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4362 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4363
4364 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4365 {
4366 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4367 DisasASI da;
4368
4369 if (addr == NULL) {
4370 return false;
4371 }
4372 da = resolve_asi(dc, a->asi, mop);
4373
4374 reg = gen_load_gpr(dc, a->rd);
4375 gen_st_asi(dc, &da, reg, addr);
4376 return advance_pc(dc);
4377 }
4378
TRANS(STW,ALL,do_st_gpr,a,MO_TEUL)4379 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4380 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4381 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4382 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4383
4384 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4385 {
4386 TCGv addr;
4387 DisasASI da;
4388
4389 if (a->rd & 1) {
4390 return false;
4391 }
4392 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4393 if (addr == NULL) {
4394 return false;
4395 }
4396 da = resolve_asi(dc, a->asi, MO_TEUQ);
4397 gen_ldda_asi(dc, &da, addr, a->rd);
4398 return advance_pc(dc);
4399 }
4400
trans_STD(DisasContext * dc,arg_r_r_ri_asi * a)4401 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4402 {
4403 TCGv addr;
4404 DisasASI da;
4405
4406 if (a->rd & 1) {
4407 return false;
4408 }
4409 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4410 if (addr == NULL) {
4411 return false;
4412 }
4413 da = resolve_asi(dc, a->asi, MO_TEUQ);
4414 gen_stda_asi(dc, &da, addr, a->rd);
4415 return advance_pc(dc);
4416 }
4417
trans_LDSTUB(DisasContext * dc,arg_r_r_ri_asi * a)4418 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4419 {
4420 TCGv addr, reg;
4421 DisasASI da;
4422
4423 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4424 if (addr == NULL) {
4425 return false;
4426 }
4427 da = resolve_asi(dc, a->asi, MO_UB);
4428
4429 reg = gen_dest_gpr(dc, a->rd);
4430 gen_ldstub_asi(dc, &da, reg, addr);
4431 gen_store_gpr(dc, a->rd, reg);
4432 return advance_pc(dc);
4433 }
4434
trans_SWAP(DisasContext * dc,arg_r_r_ri_asi * a)4435 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4436 {
4437 TCGv addr, dst, src;
4438 DisasASI da;
4439
4440 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4441 if (addr == NULL) {
4442 return false;
4443 }
4444 da = resolve_asi(dc, a->asi, MO_TEUL);
4445
4446 dst = gen_dest_gpr(dc, a->rd);
4447 src = gen_load_gpr(dc, a->rd);
4448 gen_swap_asi(dc, &da, dst, src, addr);
4449 gen_store_gpr(dc, a->rd, dst);
4450 return advance_pc(dc);
4451 }
4452
do_casa(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4453 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4454 {
4455 TCGv addr, o, n, c;
4456 DisasASI da;
4457
4458 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4459 if (addr == NULL) {
4460 return false;
4461 }
4462 da = resolve_asi(dc, a->asi, mop);
4463
4464 o = gen_dest_gpr(dc, a->rd);
4465 n = gen_load_gpr(dc, a->rd);
4466 c = gen_load_gpr(dc, a->rs2_or_imm);
4467 gen_cas_asi(dc, &da, o, n, c, addr);
4468 gen_store_gpr(dc, a->rd, o);
4469 return advance_pc(dc);
4470 }
4471
TRANS(CASA,CASA,do_casa,a,MO_TEUL)4472 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4473 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4474
4475 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4476 {
4477 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4478 DisasASI da;
4479
4480 if (addr == NULL) {
4481 return false;
4482 }
4483 if (gen_trap_ifnofpu(dc)) {
4484 return true;
4485 }
4486 if (sz == MO_128 && gen_trap_float128(dc)) {
4487 return true;
4488 }
4489 da = resolve_asi(dc, a->asi, MO_TE | sz);
4490 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4491 gen_update_fprs_dirty(dc, a->rd);
4492 return advance_pc(dc);
4493 }
4494
TRANS(LDF,ALL,do_ld_fpr,a,MO_32)4495 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4496 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4497 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4498
4499 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4500 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4501 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4502
4503 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4504 {
4505 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4506 DisasASI da;
4507
4508 if (addr == NULL) {
4509 return false;
4510 }
4511 if (gen_trap_ifnofpu(dc)) {
4512 return true;
4513 }
4514 if (sz == MO_128 && gen_trap_float128(dc)) {
4515 return true;
4516 }
4517 da = resolve_asi(dc, a->asi, MO_TE | sz);
4518 gen_stf_asi(dc, &da, sz, addr, a->rd);
4519 return advance_pc(dc);
4520 }
4521
TRANS(STF,ALL,do_st_fpr,a,MO_32)4522 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4523 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4524 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4525
4526 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4527 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4528 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4529
4530 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4531 {
4532 if (!avail_32(dc)) {
4533 return false;
4534 }
4535 if (!supervisor(dc)) {
4536 return raise_priv(dc);
4537 }
4538 if (gen_trap_ifnofpu(dc)) {
4539 return true;
4540 }
4541 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4542 return true;
4543 }
4544
trans_LDFSR(DisasContext * dc,arg_r_r_ri * a)4545 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4546 {
4547 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4548 TCGv_i32 tmp;
4549
4550 if (addr == NULL) {
4551 return false;
4552 }
4553 if (gen_trap_ifnofpu(dc)) {
4554 return true;
4555 }
4556
4557 tmp = tcg_temp_new_i32();
4558 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4559
4560 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4561 /* LDFSR does not change FCC[1-3]. */
4562
4563 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4564 return advance_pc(dc);
4565 }
4566
do_ldxfsr(DisasContext * dc,arg_r_r_ri * a,bool entire)4567 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4568 {
4569 #ifdef TARGET_SPARC64
4570 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4571 TCGv_i64 t64;
4572 TCGv_i32 lo, hi;
4573
4574 if (addr == NULL) {
4575 return false;
4576 }
4577 if (gen_trap_ifnofpu(dc)) {
4578 return true;
4579 }
4580
4581 t64 = tcg_temp_new_i64();
4582 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4583
4584 lo = tcg_temp_new_i32();
4585 hi = cpu_fcc[3];
4586 tcg_gen_extr_i64_i32(lo, hi, t64);
4587 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4588 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4589 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4590 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4591
4592 if (entire) {
4593 gen_helper_set_fsr_nofcc(tcg_env, lo);
4594 } else {
4595 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4596 }
4597 return advance_pc(dc);
4598 #else
4599 return false;
4600 #endif
4601 }
4602
4603 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
TRANS(LDXEFSR,VIS3B,do_ldxfsr,a,true)4604 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4605
4606 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4607 {
4608 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4609 TCGv fsr;
4610
4611 if (addr == NULL) {
4612 return false;
4613 }
4614 if (gen_trap_ifnofpu(dc)) {
4615 return true;
4616 }
4617
4618 fsr = tcg_temp_new();
4619 gen_helper_get_fsr(fsr, tcg_env);
4620 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4621 return advance_pc(dc);
4622 }
4623
TRANS(STFSR,ALL,do_stfsr,a,MO_TEUL)4624 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4625 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4626
4627 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4628 {
4629 if (gen_trap_ifnofpu(dc)) {
4630 return true;
4631 }
4632 gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4633 return advance_pc(dc);
4634 }
4635
4636 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4637 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4638
do_dc(DisasContext * dc,int rd,int64_t c)4639 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4640 {
4641 if (gen_trap_ifnofpu(dc)) {
4642 return true;
4643 }
4644 gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4645 return advance_pc(dc);
4646 }
4647
4648 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4649 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4650
do_ff(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i32,TCGv_i32))4651 static bool do_ff(DisasContext *dc, arg_r_r *a,
4652 void (*func)(TCGv_i32, TCGv_i32))
4653 {
4654 TCGv_i32 tmp;
4655
4656 if (gen_trap_ifnofpu(dc)) {
4657 return true;
4658 }
4659
4660 tmp = gen_load_fpr_F(dc, a->rs);
4661 func(tmp, tmp);
4662 gen_store_fpr_F(dc, a->rd, tmp);
4663 return advance_pc(dc);
4664 }
4665
TRANS(FMOVs,ALL,do_ff,a,gen_op_fmovs)4666 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4667 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4668 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4669 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4670 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4671
4672 static bool do_fd(DisasContext *dc, arg_r_r *a,
4673 void (*func)(TCGv_i32, TCGv_i64))
4674 {
4675 TCGv_i32 dst;
4676 TCGv_i64 src;
4677
4678 if (gen_trap_ifnofpu(dc)) {
4679 return true;
4680 }
4681
4682 dst = tcg_temp_new_i32();
4683 src = gen_load_fpr_D(dc, a->rs);
4684 func(dst, src);
4685 gen_store_fpr_F(dc, a->rd, dst);
4686 return advance_pc(dc);
4687 }
4688
TRANS(FPACK16,VIS1,do_fd,a,gen_op_fpack16)4689 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4690 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4691
4692 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4693 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4694 {
4695 TCGv_i32 tmp;
4696
4697 if (gen_trap_ifnofpu(dc)) {
4698 return true;
4699 }
4700
4701 tmp = gen_load_fpr_F(dc, a->rs);
4702 func(tmp, tcg_env, tmp);
4703 gen_store_fpr_F(dc, a->rd, tmp);
4704 return advance_pc(dc);
4705 }
4706
TRANS(FSQRTs,ALL,do_env_ff,a,gen_helper_fsqrts)4707 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4708 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4709 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4710
4711 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4712 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4713 {
4714 TCGv_i32 dst;
4715 TCGv_i64 src;
4716
4717 if (gen_trap_ifnofpu(dc)) {
4718 return true;
4719 }
4720
4721 dst = tcg_temp_new_i32();
4722 src = gen_load_fpr_D(dc, a->rs);
4723 func(dst, tcg_env, src);
4724 gen_store_fpr_F(dc, a->rd, dst);
4725 return advance_pc(dc);
4726 }
4727
TRANS(FdTOs,ALL,do_env_fd,a,gen_helper_fdtos)4728 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4729 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4730 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4731
4732 static bool do_dd(DisasContext *dc, arg_r_r *a,
4733 void (*func)(TCGv_i64, TCGv_i64))
4734 {
4735 TCGv_i64 dst, src;
4736
4737 if (gen_trap_ifnofpu(dc)) {
4738 return true;
4739 }
4740
4741 dst = tcg_temp_new_i64();
4742 src = gen_load_fpr_D(dc, a->rs);
4743 func(dst, src);
4744 gen_store_fpr_D(dc, a->rd, dst);
4745 return advance_pc(dc);
4746 }
4747
4748 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4749 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4750 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
TRANS(FSRCd,VIS1,do_dd,a,tcg_gen_mov_i64)4751 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4752 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4753
4754 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4755 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4756 {
4757 TCGv_i64 dst, src;
4758
4759 if (gen_trap_ifnofpu(dc)) {
4760 return true;
4761 }
4762
4763 dst = tcg_temp_new_i64();
4764 src = gen_load_fpr_D(dc, a->rs);
4765 func(dst, tcg_env, src);
4766 gen_store_fpr_D(dc, a->rd, dst);
4767 return advance_pc(dc);
4768 }
4769
TRANS(FSQRTd,ALL,do_env_dd,a,gen_helper_fsqrtd)4770 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4771 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4772 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4773
4774 static bool do_df(DisasContext *dc, arg_r_r *a,
4775 void (*func)(TCGv_i64, TCGv_i32))
4776 {
4777 TCGv_i64 dst;
4778 TCGv_i32 src;
4779
4780 if (gen_trap_ifnofpu(dc)) {
4781 return true;
4782 }
4783
4784 dst = tcg_temp_new_i64();
4785 src = gen_load_fpr_F(dc, a->rs);
4786 func(dst, src);
4787 gen_store_fpr_D(dc, a->rd, dst);
4788 return advance_pc(dc);
4789 }
4790
TRANS(FEXPAND,VIS1,do_df,a,gen_helper_fexpand)4791 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4792
4793 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4794 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4795 {
4796 TCGv_i64 dst;
4797 TCGv_i32 src;
4798
4799 if (gen_trap_ifnofpu(dc)) {
4800 return true;
4801 }
4802
4803 dst = tcg_temp_new_i64();
4804 src = gen_load_fpr_F(dc, a->rs);
4805 func(dst, tcg_env, src);
4806 gen_store_fpr_D(dc, a->rd, dst);
4807 return advance_pc(dc);
4808 }
4809
TRANS(FiTOd,ALL,do_env_df,a,gen_helper_fitod)4810 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4811 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4812 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4813
4814 static bool do_qq(DisasContext *dc, arg_r_r *a,
4815 void (*func)(TCGv_i128, TCGv_i128))
4816 {
4817 TCGv_i128 t;
4818
4819 if (gen_trap_ifnofpu(dc)) {
4820 return true;
4821 }
4822 if (gen_trap_float128(dc)) {
4823 return true;
4824 }
4825
4826 gen_op_clear_ieee_excp_and_FTT();
4827 t = gen_load_fpr_Q(dc, a->rs);
4828 func(t, t);
4829 gen_store_fpr_Q(dc, a->rd, t);
4830 return advance_pc(dc);
4831 }
4832
4833 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4834 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4835 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4836
do_env_qq(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128))4837 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4838 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4839 {
4840 TCGv_i128 t;
4841
4842 if (gen_trap_ifnofpu(dc)) {
4843 return true;
4844 }
4845 if (gen_trap_float128(dc)) {
4846 return true;
4847 }
4848
4849 t = gen_load_fpr_Q(dc, a->rs);
4850 func(t, tcg_env, t);
4851 gen_store_fpr_Q(dc, a->rd, t);
4852 return advance_pc(dc);
4853 }
4854
TRANS(FSQRTq,ALL,do_env_qq,a,gen_helper_fsqrtq)4855 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4856
4857 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4858 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4859 {
4860 TCGv_i128 src;
4861 TCGv_i32 dst;
4862
4863 if (gen_trap_ifnofpu(dc)) {
4864 return true;
4865 }
4866 if (gen_trap_float128(dc)) {
4867 return true;
4868 }
4869
4870 src = gen_load_fpr_Q(dc, a->rs);
4871 dst = tcg_temp_new_i32();
4872 func(dst, tcg_env, src);
4873 gen_store_fpr_F(dc, a->rd, dst);
4874 return advance_pc(dc);
4875 }
4876
TRANS(FqTOs,ALL,do_env_fq,a,gen_helper_fqtos)4877 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4878 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4879
4880 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4881 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4882 {
4883 TCGv_i128 src;
4884 TCGv_i64 dst;
4885
4886 if (gen_trap_ifnofpu(dc)) {
4887 return true;
4888 }
4889 if (gen_trap_float128(dc)) {
4890 return true;
4891 }
4892
4893 src = gen_load_fpr_Q(dc, a->rs);
4894 dst = tcg_temp_new_i64();
4895 func(dst, tcg_env, src);
4896 gen_store_fpr_D(dc, a->rd, dst);
4897 return advance_pc(dc);
4898 }
4899
TRANS(FqTOd,ALL,do_env_dq,a,gen_helper_fqtod)4900 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4901 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4902
4903 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4904 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4905 {
4906 TCGv_i32 src;
4907 TCGv_i128 dst;
4908
4909 if (gen_trap_ifnofpu(dc)) {
4910 return true;
4911 }
4912 if (gen_trap_float128(dc)) {
4913 return true;
4914 }
4915
4916 src = gen_load_fpr_F(dc, a->rs);
4917 dst = tcg_temp_new_i128();
4918 func(dst, tcg_env, src);
4919 gen_store_fpr_Q(dc, a->rd, dst);
4920 return advance_pc(dc);
4921 }
4922
TRANS(FiTOq,ALL,do_env_qf,a,gen_helper_fitoq)4923 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4924 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4925
4926 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4927 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4928 {
4929 TCGv_i64 src;
4930 TCGv_i128 dst;
4931
4932 if (gen_trap_ifnofpu(dc)) {
4933 return true;
4934 }
4935 if (gen_trap_float128(dc)) {
4936 return true;
4937 }
4938
4939 src = gen_load_fpr_D(dc, a->rs);
4940 dst = tcg_temp_new_i128();
4941 func(dst, tcg_env, src);
4942 gen_store_fpr_Q(dc, a->rd, dst);
4943 return advance_pc(dc);
4944 }
4945
TRANS(FdTOq,ALL,do_env_qd,a,gen_helper_fdtoq)4946 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4947 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4948
4949 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4950 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4951 {
4952 TCGv_i32 src1, src2;
4953
4954 if (gen_trap_ifnofpu(dc)) {
4955 return true;
4956 }
4957
4958 src1 = gen_load_fpr_F(dc, a->rs1);
4959 src2 = gen_load_fpr_F(dc, a->rs2);
4960 func(src1, src1, src2);
4961 gen_store_fpr_F(dc, a->rd, src1);
4962 return advance_pc(dc);
4963 }
4964
TRANS(FPADD16s,VIS1,do_fff,a,tcg_gen_vec_add16_i32)4965 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4966 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4967 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4968 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4969 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4970 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4971 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4972 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4973 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4974 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4975 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4976 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4977
4978 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4979 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4980 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4981
4982 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4983 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4984 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4985 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4986
4987 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4988 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4989 {
4990 TCGv_i32 src1, src2;
4991
4992 if (gen_trap_ifnofpu(dc)) {
4993 return true;
4994 }
4995
4996 src1 = gen_load_fpr_F(dc, a->rs1);
4997 src2 = gen_load_fpr_F(dc, a->rs2);
4998 func(src1, tcg_env, src1, src2);
4999 gen_store_fpr_F(dc, a->rd, src1);
5000 return advance_pc(dc);
5001 }
5002
TRANS(FADDs,ALL,do_env_fff,a,gen_helper_fadds)5003 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5004 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5005 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5006 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5007 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5008 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5009
5010 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5011 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5012 {
5013 TCGv_i64 dst;
5014 TCGv_i32 src1, src2;
5015
5016 if (gen_trap_ifnofpu(dc)) {
5017 return true;
5018 }
5019
5020 dst = tcg_temp_new_i64();
5021 src1 = gen_load_fpr_F(dc, a->rs1);
5022 src2 = gen_load_fpr_F(dc, a->rs2);
5023 func(dst, src1, src2);
5024 gen_store_fpr_D(dc, a->rd, dst);
5025 return advance_pc(dc);
5026 }
5027
TRANS(FMUL8x16AU,VIS1,do_dff,a,gen_op_fmul8x16au)5028 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5029 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5030 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5031 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5032 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5033
5034 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5035 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5036 {
5037 TCGv_i64 dst, src2;
5038 TCGv_i32 src1;
5039
5040 if (gen_trap_ifnofpu(dc)) {
5041 return true;
5042 }
5043
5044 dst = tcg_temp_new_i64();
5045 src1 = gen_load_fpr_F(dc, a->rs1);
5046 src2 = gen_load_fpr_D(dc, a->rs2);
5047 func(dst, src1, src2);
5048 gen_store_fpr_D(dc, a->rd, dst);
5049 return advance_pc(dc);
5050 }
5051
TRANS(FMUL8x16,VIS1,do_dfd,a,gen_helper_fmul8x16)5052 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5053
5054 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5055 void (*func)(unsigned, uint32_t, uint32_t,
5056 uint32_t, uint32_t, uint32_t))
5057 {
5058 if (gen_trap_ifnofpu(dc)) {
5059 return true;
5060 }
5061
5062 func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5063 gen_offset_fpr_D(a->rs2), 8, 8);
5064 return advance_pc(dc);
5065 }
5066
TRANS(FPADD8,VIS4,do_gvec_ddd,a,MO_8,tcg_gen_gvec_add)5067 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5068 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5069 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5070
5071 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5072 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5073 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5074
5075 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5076 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5077
5078 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5079 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5080 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5081 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5082 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5083
5084 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5085 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5086 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5087 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5088 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5089
5090 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5091 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5092 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5093 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5094 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5095 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5096
5097 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5098 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5099 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5100 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5101 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5102 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5103
5104 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5105 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5106 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5107 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5108 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5109 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5110
5111 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5112 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5113 {
5114 TCGv_i64 dst, src1, src2;
5115
5116 if (gen_trap_ifnofpu(dc)) {
5117 return true;
5118 }
5119
5120 dst = tcg_temp_new_i64();
5121 src1 = gen_load_fpr_D(dc, a->rs1);
5122 src2 = gen_load_fpr_D(dc, a->rs2);
5123 func(dst, src1, src2);
5124 gen_store_fpr_D(dc, a->rd, dst);
5125 return advance_pc(dc);
5126 }
5127
TRANS(FMUL8SUx16,VIS1,do_ddd,a,gen_helper_fmul8sux16)5128 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5129 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5130
5131 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5132 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5133 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5134 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5135 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5136 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5137 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5138 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5139
5140 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5141 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5142 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5143
5144 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5145 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5146 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5147
5148 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5149 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5150 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5151 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5152
5153 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5154 void (*func)(TCGv, TCGv_i64, TCGv_i64))
5155 {
5156 TCGv_i64 src1, src2;
5157 TCGv dst;
5158
5159 if (gen_trap_ifnofpu(dc)) {
5160 return true;
5161 }
5162
5163 dst = gen_dest_gpr(dc, a->rd);
5164 src1 = gen_load_fpr_D(dc, a->rs1);
5165 src2 = gen_load_fpr_D(dc, a->rs2);
5166 func(dst, src1, src2);
5167 gen_store_gpr(dc, a->rd, dst);
5168 return advance_pc(dc);
5169 }
5170
TRANS(FPCMPLE16,VIS1,do_rdd,a,gen_helper_fcmple16)5171 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5172 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5173 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5174 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5175 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5176 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5177
5178 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5179 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5180 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5181 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5182 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5183 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5184
5185 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5186 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5187 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5188 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5189 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5190 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5191
5192 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5193 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5194 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5195
5196 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5197 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5198 {
5199 TCGv_i64 dst, src1, src2;
5200
5201 if (gen_trap_ifnofpu(dc)) {
5202 return true;
5203 }
5204
5205 dst = tcg_temp_new_i64();
5206 src1 = gen_load_fpr_D(dc, a->rs1);
5207 src2 = gen_load_fpr_D(dc, a->rs2);
5208 func(dst, tcg_env, src1, src2);
5209 gen_store_fpr_D(dc, a->rd, dst);
5210 return advance_pc(dc);
5211 }
5212
TRANS(FADDd,ALL,do_env_ddd,a,gen_helper_faddd)5213 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5214 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5215 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5216 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5217 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5218 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5219
5220 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5221 {
5222 TCGv_i64 dst;
5223 TCGv_i32 src1, src2;
5224
5225 if (gen_trap_ifnofpu(dc)) {
5226 return true;
5227 }
5228 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5229 return raise_unimpfpop(dc);
5230 }
5231
5232 dst = tcg_temp_new_i64();
5233 src1 = gen_load_fpr_F(dc, a->rs1);
5234 src2 = gen_load_fpr_F(dc, a->rs2);
5235 gen_helper_fsmuld(dst, tcg_env, src1, src2);
5236 gen_store_fpr_D(dc, a->rd, dst);
5237 return advance_pc(dc);
5238 }
5239
trans_FNsMULd(DisasContext * dc,arg_r_r_r * a)5240 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5241 {
5242 TCGv_i64 dst;
5243 TCGv_i32 src1, src2;
5244
5245 if (!avail_VIS3(dc)) {
5246 return false;
5247 }
5248 if (gen_trap_ifnofpu(dc)) {
5249 return true;
5250 }
5251 dst = tcg_temp_new_i64();
5252 src1 = gen_load_fpr_F(dc, a->rs1);
5253 src2 = gen_load_fpr_F(dc, a->rs2);
5254 gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5255 gen_store_fpr_D(dc, a->rd, dst);
5256 return advance_pc(dc);
5257 }
5258
do_ffff(DisasContext * dc,arg_r_r_r_r * a,void (* func)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_i32))5259 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5260 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5261 {
5262 TCGv_i32 dst, src1, src2, src3;
5263
5264 if (gen_trap_ifnofpu(dc)) {
5265 return true;
5266 }
5267
5268 src1 = gen_load_fpr_F(dc, a->rs1);
5269 src2 = gen_load_fpr_F(dc, a->rs2);
5270 src3 = gen_load_fpr_F(dc, a->rs3);
5271 dst = tcg_temp_new_i32();
5272 func(dst, src1, src2, src3);
5273 gen_store_fpr_F(dc, a->rd, dst);
5274 return advance_pc(dc);
5275 }
5276
TRANS(FMADDs,FMAF,do_ffff,a,gen_op_fmadds)5277 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5278 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5279 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5280 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5281
5282 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5283 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5284 {
5285 TCGv_i64 dst, src1, src2, src3;
5286
5287 if (gen_trap_ifnofpu(dc)) {
5288 return true;
5289 }
5290
5291 dst = tcg_temp_new_i64();
5292 src1 = gen_load_fpr_D(dc, a->rs1);
5293 src2 = gen_load_fpr_D(dc, a->rs2);
5294 src3 = gen_load_fpr_D(dc, a->rs3);
5295 func(dst, src1, src2, src3);
5296 gen_store_fpr_D(dc, a->rd, dst);
5297 return advance_pc(dc);
5298 }
5299
TRANS(PDIST,VIS1,do_dddd,a,gen_helper_pdist)5300 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5301 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5302 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5303 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5304 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5305 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5306 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5307
5308 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5309 {
5310 TCGv_i64 dst, src1, src2;
5311 TCGv src3;
5312
5313 if (!avail_VIS4(dc)) {
5314 return false;
5315 }
5316 if (gen_trap_ifnofpu(dc)) {
5317 return true;
5318 }
5319
5320 dst = tcg_temp_new_i64();
5321 src1 = gen_load_fpr_D(dc, a->rd);
5322 src2 = gen_load_fpr_D(dc, a->rs2);
5323 src3 = gen_load_gpr(dc, a->rs1);
5324 gen_op_faligndata_i(dst, src1, src2, src3);
5325 gen_store_fpr_D(dc, a->rd, dst);
5326 return advance_pc(dc);
5327 }
5328
do_env_qqq(DisasContext * dc,arg_r_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128,TCGv_i128))5329 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5330 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5331 {
5332 TCGv_i128 src1, src2;
5333
5334 if (gen_trap_ifnofpu(dc)) {
5335 return true;
5336 }
5337 if (gen_trap_float128(dc)) {
5338 return true;
5339 }
5340
5341 src1 = gen_load_fpr_Q(dc, a->rs1);
5342 src2 = gen_load_fpr_Q(dc, a->rs2);
5343 func(src1, tcg_env, src1, src2);
5344 gen_store_fpr_Q(dc, a->rd, src1);
5345 return advance_pc(dc);
5346 }
5347
TRANS(FADDq,ALL,do_env_qqq,a,gen_helper_faddq)5348 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5349 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5350 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5351 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5352
5353 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5354 {
5355 TCGv_i64 src1, src2;
5356 TCGv_i128 dst;
5357
5358 if (gen_trap_ifnofpu(dc)) {
5359 return true;
5360 }
5361 if (gen_trap_float128(dc)) {
5362 return true;
5363 }
5364
5365 src1 = gen_load_fpr_D(dc, a->rs1);
5366 src2 = gen_load_fpr_D(dc, a->rs2);
5367 dst = tcg_temp_new_i128();
5368 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5369 gen_store_fpr_Q(dc, a->rd, dst);
5370 return advance_pc(dc);
5371 }
5372
do_fmovr(DisasContext * dc,arg_FMOVRs * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5373 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5374 void (*func)(DisasContext *, DisasCompare *, int, int))
5375 {
5376 DisasCompare cmp;
5377
5378 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5379 return false;
5380 }
5381 if (gen_trap_ifnofpu(dc)) {
5382 return true;
5383 }
5384 if (is_128 && gen_trap_float128(dc)) {
5385 return true;
5386 }
5387
5388 gen_op_clear_ieee_excp_and_FTT();
5389 func(dc, &cmp, a->rd, a->rs2);
5390 return advance_pc(dc);
5391 }
5392
5393 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5394 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5395 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5396
do_fmovcc(DisasContext * dc,arg_FMOVscc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5397 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5398 void (*func)(DisasContext *, DisasCompare *, int, int))
5399 {
5400 DisasCompare cmp;
5401
5402 if (gen_trap_ifnofpu(dc)) {
5403 return true;
5404 }
5405 if (is_128 && gen_trap_float128(dc)) {
5406 return true;
5407 }
5408
5409 gen_op_clear_ieee_excp_and_FTT();
5410 gen_compare(&cmp, a->cc, a->cond, dc);
5411 func(dc, &cmp, a->rd, a->rs2);
5412 return advance_pc(dc);
5413 }
5414
5415 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5416 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5417 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5418
do_fmovfcc(DisasContext * dc,arg_FMOVsfcc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5419 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5420 void (*func)(DisasContext *, DisasCompare *, int, int))
5421 {
5422 DisasCompare cmp;
5423
5424 if (gen_trap_ifnofpu(dc)) {
5425 return true;
5426 }
5427 if (is_128 && gen_trap_float128(dc)) {
5428 return true;
5429 }
5430
5431 gen_op_clear_ieee_excp_and_FTT();
5432 gen_fcompare(&cmp, a->cc, a->cond);
5433 func(dc, &cmp, a->rd, a->rs2);
5434 return advance_pc(dc);
5435 }
5436
5437 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5438 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5439 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5440
do_fcmps(DisasContext * dc,arg_FCMPs * a,bool e)5441 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5442 {
5443 TCGv_i32 src1, src2;
5444
5445 if (avail_32(dc) && a->cc != 0) {
5446 return false;
5447 }
5448 if (gen_trap_ifnofpu(dc)) {
5449 return true;
5450 }
5451
5452 src1 = gen_load_fpr_F(dc, a->rs1);
5453 src2 = gen_load_fpr_F(dc, a->rs2);
5454 if (e) {
5455 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5456 } else {
5457 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5458 }
5459 return advance_pc(dc);
5460 }
5461
TRANS(FCMPs,ALL,do_fcmps,a,false)5462 TRANS(FCMPs, ALL, do_fcmps, a, false)
5463 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5464
5465 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5466 {
5467 TCGv_i64 src1, src2;
5468
5469 if (avail_32(dc) && a->cc != 0) {
5470 return false;
5471 }
5472 if (gen_trap_ifnofpu(dc)) {
5473 return true;
5474 }
5475
5476 src1 = gen_load_fpr_D(dc, a->rs1);
5477 src2 = gen_load_fpr_D(dc, a->rs2);
5478 if (e) {
5479 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5480 } else {
5481 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5482 }
5483 return advance_pc(dc);
5484 }
5485
TRANS(FCMPd,ALL,do_fcmpd,a,false)5486 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5487 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5488
5489 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5490 {
5491 TCGv_i128 src1, src2;
5492
5493 if (avail_32(dc) && a->cc != 0) {
5494 return false;
5495 }
5496 if (gen_trap_ifnofpu(dc)) {
5497 return true;
5498 }
5499 if (gen_trap_float128(dc)) {
5500 return true;
5501 }
5502
5503 src1 = gen_load_fpr_Q(dc, a->rs1);
5504 src2 = gen_load_fpr_Q(dc, a->rs2);
5505 if (e) {
5506 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5507 } else {
5508 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5509 }
5510 return advance_pc(dc);
5511 }
5512
TRANS(FCMPq,ALL,do_fcmpq,a,false)5513 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5514 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5515
5516 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5517 {
5518 TCGv_i32 src1, src2;
5519
5520 if (!avail_VIS3(dc)) {
5521 return false;
5522 }
5523 if (gen_trap_ifnofpu(dc)) {
5524 return true;
5525 }
5526
5527 src1 = gen_load_fpr_F(dc, a->rs1);
5528 src2 = gen_load_fpr_F(dc, a->rs2);
5529 gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5530 return advance_pc(dc);
5531 }
5532
trans_FLCMPd(DisasContext * dc,arg_FLCMPd * a)5533 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5534 {
5535 TCGv_i64 src1, src2;
5536
5537 if (!avail_VIS3(dc)) {
5538 return false;
5539 }
5540 if (gen_trap_ifnofpu(dc)) {
5541 return true;
5542 }
5543
5544 src1 = gen_load_fpr_D(dc, a->rs1);
5545 src2 = gen_load_fpr_D(dc, a->rs2);
5546 gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5547 return advance_pc(dc);
5548 }
5549
do_movf2r(DisasContext * dc,arg_r_r * a,int (* offset)(unsigned int),void (* load)(TCGv,TCGv_ptr,tcg_target_long))5550 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5551 int (*offset)(unsigned int),
5552 void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5553 {
5554 TCGv dst;
5555
5556 if (gen_trap_ifnofpu(dc)) {
5557 return true;
5558 }
5559 dst = gen_dest_gpr(dc, a->rd);
5560 load(dst, tcg_env, offset(a->rs));
5561 gen_store_gpr(dc, a->rd, dst);
5562 return advance_pc(dc);
5563 }
5564
TRANS(MOVsTOsw,VIS3B,do_movf2r,a,gen_offset_fpr_F,tcg_gen_ld32s_tl)5565 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5566 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5567 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5568
5569 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5570 int (*offset)(unsigned int),
5571 void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5572 {
5573 TCGv src;
5574
5575 if (gen_trap_ifnofpu(dc)) {
5576 return true;
5577 }
5578 src = gen_load_gpr(dc, a->rs);
5579 store(src, tcg_env, offset(a->rd));
5580 return advance_pc(dc);
5581 }
5582
TRANS(MOVwTOs,VIS3B,do_movr2f,a,gen_offset_fpr_F,tcg_gen_st32_tl)5583 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5584 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5585
5586 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5587 {
5588 DisasContext *dc = container_of(dcbase, DisasContext, base);
5589 int bound;
5590
5591 dc->pc = dc->base.pc_first;
5592 dc->npc = (target_ulong)dc->base.tb->cs_base;
5593 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5594 dc->def = &cpu_env(cs)->def;
5595 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5596 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5597 #ifndef CONFIG_USER_ONLY
5598 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5599 #endif
5600 #ifdef TARGET_SPARC64
5601 dc->fprs_dirty = 0;
5602 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5603 #ifndef CONFIG_USER_ONLY
5604 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5605 #endif
5606 #endif
5607 /*
5608 * if we reach a page boundary, we stop generation so that the
5609 * PC of a TT_TFAULT exception is always in the right page
5610 */
5611 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5612 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5613 }
5614
sparc_tr_tb_start(DisasContextBase * db,CPUState * cs)5615 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5616 {
5617 }
5618
sparc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)5619 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5620 {
5621 DisasContext *dc = container_of(dcbase, DisasContext, base);
5622 target_ulong npc = dc->npc;
5623
5624 if (npc & 3) {
5625 switch (npc) {
5626 case JUMP_PC:
5627 assert(dc->jump_pc[1] == dc->pc + 4);
5628 npc = dc->jump_pc[0] | JUMP_PC;
5629 break;
5630 case DYNAMIC_PC:
5631 case DYNAMIC_PC_LOOKUP:
5632 npc = DYNAMIC_PC;
5633 break;
5634 default:
5635 g_assert_not_reached();
5636 }
5637 }
5638 tcg_gen_insn_start(dc->pc, npc);
5639 }
5640
sparc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)5641 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5642 {
5643 DisasContext *dc = container_of(dcbase, DisasContext, base);
5644 unsigned int insn;
5645
5646 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5647 dc->base.pc_next += 4;
5648
5649 if (!decode(dc, insn)) {
5650 gen_exception(dc, TT_ILL_INSN);
5651 }
5652
5653 if (dc->base.is_jmp == DISAS_NORETURN) {
5654 return;
5655 }
5656 if (dc->pc != dc->base.pc_next) {
5657 dc->base.is_jmp = DISAS_TOO_MANY;
5658 }
5659 }
5660
sparc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)5661 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5662 {
5663 DisasContext *dc = container_of(dcbase, DisasContext, base);
5664 DisasDelayException *e, *e_next;
5665 bool may_lookup;
5666
5667 finishing_insn(dc);
5668
5669 switch (dc->base.is_jmp) {
5670 case DISAS_NEXT:
5671 case DISAS_TOO_MANY:
5672 if (((dc->pc | dc->npc) & 3) == 0) {
5673 /* static PC and NPC: we can use direct chaining */
5674 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5675 break;
5676 }
5677
5678 may_lookup = true;
5679 if (dc->pc & 3) {
5680 switch (dc->pc) {
5681 case DYNAMIC_PC_LOOKUP:
5682 break;
5683 case DYNAMIC_PC:
5684 may_lookup = false;
5685 break;
5686 default:
5687 g_assert_not_reached();
5688 }
5689 } else {
5690 tcg_gen_movi_tl(cpu_pc, dc->pc);
5691 }
5692
5693 if (dc->npc & 3) {
5694 switch (dc->npc) {
5695 case JUMP_PC:
5696 gen_generic_branch(dc);
5697 break;
5698 case DYNAMIC_PC:
5699 may_lookup = false;
5700 break;
5701 case DYNAMIC_PC_LOOKUP:
5702 break;
5703 default:
5704 g_assert_not_reached();
5705 }
5706 } else {
5707 tcg_gen_movi_tl(cpu_npc, dc->npc);
5708 }
5709 if (may_lookup) {
5710 tcg_gen_lookup_and_goto_ptr();
5711 } else {
5712 tcg_gen_exit_tb(NULL, 0);
5713 }
5714 break;
5715
5716 case DISAS_NORETURN:
5717 break;
5718
5719 case DISAS_EXIT:
5720 /* Exit TB */
5721 save_state(dc);
5722 tcg_gen_exit_tb(NULL, 0);
5723 break;
5724
5725 default:
5726 g_assert_not_reached();
5727 }
5728
5729 for (e = dc->delay_excp_list; e ; e = e_next) {
5730 gen_set_label(e->lab);
5731
5732 tcg_gen_movi_tl(cpu_pc, e->pc);
5733 if (e->npc % 4 == 0) {
5734 tcg_gen_movi_tl(cpu_npc, e->npc);
5735 }
5736 gen_helper_raise_exception(tcg_env, e->excp);
5737
5738 e_next = e->next;
5739 g_free(e);
5740 }
5741 }
5742
5743 static const TranslatorOps sparc_tr_ops = {
5744 .init_disas_context = sparc_tr_init_disas_context,
5745 .tb_start = sparc_tr_tb_start,
5746 .insn_start = sparc_tr_insn_start,
5747 .translate_insn = sparc_tr_translate_insn,
5748 .tb_stop = sparc_tr_tb_stop,
5749 };
5750
gen_intermediate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)5751 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5752 vaddr pc, void *host_pc)
5753 {
5754 DisasContext dc = {};
5755
5756 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5757 }
5758
sparc_tcg_init(void)5759 void sparc_tcg_init(void)
5760 {
5761 static const char gregnames[32][4] = {
5762 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5763 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5764 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5765 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5766 };
5767
5768 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5769 #ifdef TARGET_SPARC64
5770 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5771 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5772 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5773 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5774 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5775 #else
5776 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5777 #endif
5778 };
5779
5780 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5781 #ifdef TARGET_SPARC64
5782 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5783 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5784 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5785 #endif
5786 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5787 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5788 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5789 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5790 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5791 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5792 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5793 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5794 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5795 };
5796
5797 unsigned int i;
5798
5799 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5800 offsetof(CPUSPARCState, regwptr),
5801 "regwptr");
5802
5803 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5804 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5805 }
5806
5807 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5808 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5809 }
5810
5811 cpu_regs[0] = NULL;
5812 for (i = 1; i < 8; ++i) {
5813 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5814 offsetof(CPUSPARCState, gregs[i]),
5815 gregnames[i]);
5816 }
5817
5818 for (i = 8; i < 32; ++i) {
5819 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5820 (i - 8) * sizeof(target_ulong),
5821 gregnames[i]);
5822 }
5823 }
5824
sparc_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)5825 void sparc_restore_state_to_opc(CPUState *cs,
5826 const TranslationBlock *tb,
5827 const uint64_t *data)
5828 {
5829 CPUSPARCState *env = cpu_env(cs);
5830 target_ulong pc = data[0];
5831 target_ulong npc = data[1];
5832
5833 env->pc = pc;
5834 if (npc == DYNAMIC_PC) {
5835 /* dynamic NPC: already stored */
5836 } else if (npc & JUMP_PC) {
5837 /* jump PC: use 'cond' and the jump targets of the translation */
5838 if (env->cond) {
5839 env->npc = npc & ~3;
5840 } else {
5841 env->npc = pc + 4;
5842 }
5843 } else {
5844 env->npc = npc;
5845 }
5846 }
5847