1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "asi.h"
34 #include "target/sparc/translate.h"
35
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
39
40 #ifdef TARGET_SPARC64
41 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
42 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
43 # define gen_helper_rett(E) qemu_build_not_reached()
44 # define gen_helper_power_down(E) qemu_build_not_reached()
45 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
46 #else
47 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
48 # define gen_helper_done(E) qemu_build_not_reached()
49 # define gen_helper_flushw(E) qemu_build_not_reached()
50 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
51 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
52 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
53 # define gen_helper_restored(E) qemu_build_not_reached()
54 # define gen_helper_retry(E) qemu_build_not_reached()
55 # define gen_helper_saved(E) qemu_build_not_reached()
56 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
57 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
58 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
59 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_cmask8 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_cmask16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq8 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt8 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple8 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne8 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule8 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule16 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule32 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt8 ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt16 ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt32 ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fslas16 ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas32 ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_xmulx ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulxhi ({ qemu_build_not_reached(); NULL; })
103 # define MAXTL_MASK 0
104 #endif
105
106 #define DISAS_EXIT DISAS_TARGET_0
107
108 /* global register indexes */
109 static TCGv_ptr cpu_regwptr;
110 static TCGv cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z cpu_xcc_Z
131 #define cpu_cc_C cpu_xcc_C
132 #else
133 #define cpu_cc_Z cpu_icc_Z
134 #define cpu_cc_C cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138
139 /* Floating point comparison registers */
140 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
141
142 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X) env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X) env_field_offsetof(X)
148 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
149 #endif
150
151 typedef struct DisasCompare {
152 TCGCond cond;
153 TCGv c1;
154 int c2;
155 } DisasCompare;
156
157 typedef struct DisasDelayException {
158 struct DisasDelayException *next;
159 TCGLabel *lab;
160 TCGv_i32 excp;
161 /* Saved state at parent insn. */
162 target_ulong pc;
163 target_ulong npc;
164 } DisasDelayException;
165
166 typedef struct DisasContext {
167 DisasContextBase base;
168 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
169 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
170
171 /* Used when JUMP_PC value is used. */
172 DisasCompare jump;
173 target_ulong jump_pc[2];
174
175 int mem_idx;
176 bool cpu_cond_live;
177 bool fpu_enabled;
178 bool address_mask_32bit;
179 #ifndef CONFIG_USER_ONLY
180 bool supervisor;
181 #ifdef TARGET_SPARC64
182 bool hypervisor;
183 #else
184 bool fsr_qne;
185 #endif
186 #endif
187
188 sparc_def_t *def;
189 #ifdef TARGET_SPARC64
190 int fprs_dirty;
191 int asi;
192 #endif
193 DisasDelayException *delay_excp_list;
194 } DisasContext;
195
196 // This function uses non-native bit order
197 #define GET_FIELD(X, FROM, TO) \
198 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
199
200 // This function uses the order in the manuals, i.e. bit 0 is 2^0
201 #define GET_FIELD_SP(X, FROM, TO) \
202 GET_FIELD(X, 31 - (TO), 31 - (FROM))
203
204 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
205 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
206
207 #define UA2005_HTRAP_MASK 0xff
208 #define V8_TRAP_MASK 0x7f
209
210 #define IS_IMM (insn & (1<<13))
211
gen_update_fprs_dirty(DisasContext * dc,int rd)212 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
213 {
214 #if defined(TARGET_SPARC64)
215 int bit = (rd < 32) ? 1 : 2;
216 /* If we know we've already set this bit within the TB,
217 we can avoid setting it again. */
218 if (!(dc->fprs_dirty & bit)) {
219 dc->fprs_dirty |= bit;
220 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
221 }
222 #endif
223 }
224
225 /* floating point registers moves */
226
gen_offset_fpr_F(unsigned int reg)227 static int gen_offset_fpr_F(unsigned int reg)
228 {
229 int ret;
230
231 tcg_debug_assert(reg < 32);
232 ret= offsetof(CPUSPARCState, fpr[reg / 2]);
233 if (reg & 1) {
234 ret += offsetof(CPU_DoubleU, l.lower);
235 } else {
236 ret += offsetof(CPU_DoubleU, l.upper);
237 }
238 return ret;
239 }
240
gen_load_fpr_F(DisasContext * dc,unsigned int src)241 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
242 {
243 TCGv_i32 ret = tcg_temp_new_i32();
244 tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
245 return ret;
246 }
247
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)248 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
249 {
250 tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
251 gen_update_fprs_dirty(dc, dst);
252 }
253
gen_offset_fpr_D(unsigned int reg)254 static int gen_offset_fpr_D(unsigned int reg)
255 {
256 tcg_debug_assert(reg < 64);
257 tcg_debug_assert(reg % 2 == 0);
258 return offsetof(CPUSPARCState, fpr[reg / 2]);
259 }
260
gen_load_fpr_D(DisasContext * dc,unsigned int src)261 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
262 {
263 TCGv_i64 ret = tcg_temp_new_i64();
264 tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
265 return ret;
266 }
267
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)268 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
269 {
270 tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
271 gen_update_fprs_dirty(dc, dst);
272 }
273
gen_load_fpr_Q(DisasContext * dc,unsigned int src)274 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
275 {
276 TCGv_i128 ret = tcg_temp_new_i128();
277 TCGv_i64 h = gen_load_fpr_D(dc, src);
278 TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
279
280 tcg_gen_concat_i64_i128(ret, l, h);
281 return ret;
282 }
283
gen_store_fpr_Q(DisasContext * dc,unsigned int dst,TCGv_i128 v)284 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
285 {
286 TCGv_i64 h = tcg_temp_new_i64();
287 TCGv_i64 l = tcg_temp_new_i64();
288
289 tcg_gen_extr_i128_i64(l, h, v);
290 gen_store_fpr_D(dc, dst, h);
291 gen_store_fpr_D(dc, dst + 2, l);
292 }
293
294 /* moves */
295 #ifdef CONFIG_USER_ONLY
296 #define supervisor(dc) 0
297 #define hypervisor(dc) 0
298 #else
299 #ifdef TARGET_SPARC64
300 #define hypervisor(dc) (dc->hypervisor)
301 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
302 #else
303 #define supervisor(dc) (dc->supervisor)
304 #define hypervisor(dc) 0
305 #endif
306 #endif
307
308 #if !defined(TARGET_SPARC64)
309 # define AM_CHECK(dc) false
310 #elif defined(TARGET_ABI32)
311 # define AM_CHECK(dc) true
312 #elif defined(CONFIG_USER_ONLY)
313 # define AM_CHECK(dc) false
314 #else
315 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
316 #endif
317
gen_address_mask(DisasContext * dc,TCGv addr)318 static void gen_address_mask(DisasContext *dc, TCGv addr)
319 {
320 if (AM_CHECK(dc)) {
321 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
322 }
323 }
324
address_mask_i(DisasContext * dc,target_ulong addr)325 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
326 {
327 return AM_CHECK(dc) ? (uint32_t)addr : addr;
328 }
329
gen_load_gpr(DisasContext * dc,int reg)330 static TCGv gen_load_gpr(DisasContext *dc, int reg)
331 {
332 if (reg > 0) {
333 assert(reg < 32);
334 return cpu_regs[reg];
335 } else {
336 TCGv t = tcg_temp_new();
337 tcg_gen_movi_tl(t, 0);
338 return t;
339 }
340 }
341
gen_store_gpr(DisasContext * dc,int reg,TCGv v)342 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
343 {
344 if (reg > 0) {
345 assert(reg < 32);
346 tcg_gen_mov_tl(cpu_regs[reg], v);
347 }
348 }
349
gen_dest_gpr(DisasContext * dc,int reg)350 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
351 {
352 if (reg > 0) {
353 assert(reg < 32);
354 return cpu_regs[reg];
355 } else {
356 return tcg_temp_new();
357 }
358 }
359
use_goto_tb(DisasContext * s,target_ulong pc,target_ulong npc)360 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
361 {
362 return translator_use_goto_tb(&s->base, pc) &&
363 translator_use_goto_tb(&s->base, npc);
364 }
365
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)366 static void gen_goto_tb(DisasContext *s, int tb_num,
367 target_ulong pc, target_ulong npc)
368 {
369 if (use_goto_tb(s, pc, npc)) {
370 /* jump to same page: we can use a direct jump */
371 tcg_gen_goto_tb(tb_num);
372 tcg_gen_movi_tl(cpu_pc, pc);
373 tcg_gen_movi_tl(cpu_npc, npc);
374 tcg_gen_exit_tb(s->base.tb, tb_num);
375 } else {
376 /* jump to another page: we can use an indirect jump */
377 tcg_gen_movi_tl(cpu_pc, pc);
378 tcg_gen_movi_tl(cpu_npc, npc);
379 tcg_gen_lookup_and_goto_ptr();
380 }
381 }
382
gen_carry32(void)383 static TCGv gen_carry32(void)
384 {
385 if (TARGET_LONG_BITS == 64) {
386 TCGv t = tcg_temp_new();
387 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
388 return t;
389 }
390 return cpu_icc_C;
391 }
392
gen_op_addcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)393 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
394 {
395 TCGv z = tcg_constant_tl(0);
396
397 if (cin) {
398 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
399 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
400 } else {
401 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
402 }
403 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
404 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
405 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
406 if (TARGET_LONG_BITS == 64) {
407 /*
408 * Carry-in to bit 32 is result ^ src1 ^ src2.
409 * We already have the src xor term in Z, from computation of V.
410 */
411 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
412 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
413 }
414 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
415 tcg_gen_mov_tl(dst, cpu_cc_N);
416 }
417
gen_op_addcc(TCGv dst,TCGv src1,TCGv src2)418 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
419 {
420 gen_op_addcc_int(dst, src1, src2, NULL);
421 }
422
gen_op_taddcc(TCGv dst,TCGv src1,TCGv src2)423 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
424 {
425 TCGv t = tcg_temp_new();
426
427 /* Save the tag bits around modification of dst. */
428 tcg_gen_or_tl(t, src1, src2);
429
430 gen_op_addcc(dst, src1, src2);
431
432 /* Incorprate tag bits into icc.V */
433 tcg_gen_andi_tl(t, t, 3);
434 tcg_gen_neg_tl(t, t);
435 tcg_gen_ext32u_tl(t, t);
436 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
437 }
438
gen_op_addc(TCGv dst,TCGv src1,TCGv src2)439 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
440 {
441 tcg_gen_add_tl(dst, src1, src2);
442 tcg_gen_add_tl(dst, dst, gen_carry32());
443 }
444
gen_op_addccc(TCGv dst,TCGv src1,TCGv src2)445 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
446 {
447 gen_op_addcc_int(dst, src1, src2, gen_carry32());
448 }
449
gen_op_addxc(TCGv dst,TCGv src1,TCGv src2)450 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
451 {
452 tcg_gen_add_tl(dst, src1, src2);
453 tcg_gen_add_tl(dst, dst, cpu_cc_C);
454 }
455
gen_op_addxccc(TCGv dst,TCGv src1,TCGv src2)456 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
457 {
458 gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
459 }
460
gen_op_subcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)461 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
462 {
463 TCGv z = tcg_constant_tl(0);
464
465 if (cin) {
466 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
467 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
468 } else {
469 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
470 }
471 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
472 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
473 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
474 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
475 #ifdef TARGET_SPARC64
476 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
477 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
478 #endif
479 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
480 tcg_gen_mov_tl(dst, cpu_cc_N);
481 }
482
gen_op_subcc(TCGv dst,TCGv src1,TCGv src2)483 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
484 {
485 gen_op_subcc_int(dst, src1, src2, NULL);
486 }
487
gen_op_tsubcc(TCGv dst,TCGv src1,TCGv src2)488 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
489 {
490 TCGv t = tcg_temp_new();
491
492 /* Save the tag bits around modification of dst. */
493 tcg_gen_or_tl(t, src1, src2);
494
495 gen_op_subcc(dst, src1, src2);
496
497 /* Incorprate tag bits into icc.V */
498 tcg_gen_andi_tl(t, t, 3);
499 tcg_gen_neg_tl(t, t);
500 tcg_gen_ext32u_tl(t, t);
501 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
502 }
503
gen_op_subc(TCGv dst,TCGv src1,TCGv src2)504 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
505 {
506 tcg_gen_sub_tl(dst, src1, src2);
507 tcg_gen_sub_tl(dst, dst, gen_carry32());
508 }
509
gen_op_subccc(TCGv dst,TCGv src1,TCGv src2)510 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
511 {
512 gen_op_subcc_int(dst, src1, src2, gen_carry32());
513 }
514
gen_op_subxc(TCGv dst,TCGv src1,TCGv src2)515 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
516 {
517 tcg_gen_sub_tl(dst, src1, src2);
518 tcg_gen_sub_tl(dst, dst, cpu_cc_C);
519 }
520
gen_op_subxccc(TCGv dst,TCGv src1,TCGv src2)521 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
522 {
523 gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
524 }
525
gen_op_mulscc(TCGv dst,TCGv src1,TCGv src2)526 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
527 {
528 TCGv zero = tcg_constant_tl(0);
529 TCGv one = tcg_constant_tl(1);
530 TCGv t_src1 = tcg_temp_new();
531 TCGv t_src2 = tcg_temp_new();
532 TCGv t0 = tcg_temp_new();
533
534 tcg_gen_ext32u_tl(t_src1, src1);
535 tcg_gen_ext32u_tl(t_src2, src2);
536
537 /*
538 * if (!(env->y & 1))
539 * src2 = 0;
540 */
541 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
542
543 /*
544 * b2 = src1 & 1;
545 * y = (b2 << 31) | (y >> 1);
546 */
547 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
548 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
549
550 // b1 = N ^ V;
551 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
552
553 /*
554 * src1 = (b1 << 31) | (src1 >> 1)
555 */
556 tcg_gen_andi_tl(t0, t0, 1u << 31);
557 tcg_gen_shri_tl(t_src1, t_src1, 1);
558 tcg_gen_or_tl(t_src1, t_src1, t0);
559
560 gen_op_addcc(dst, t_src1, t_src2);
561 }
562
gen_op_multiply(TCGv dst,TCGv src1,TCGv src2,int sign_ext)563 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
564 {
565 #if TARGET_LONG_BITS == 32
566 if (sign_ext) {
567 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
568 } else {
569 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
570 }
571 #else
572 TCGv t0 = tcg_temp_new_i64();
573 TCGv t1 = tcg_temp_new_i64();
574
575 if (sign_ext) {
576 tcg_gen_ext32s_i64(t0, src1);
577 tcg_gen_ext32s_i64(t1, src2);
578 } else {
579 tcg_gen_ext32u_i64(t0, src1);
580 tcg_gen_ext32u_i64(t1, src2);
581 }
582
583 tcg_gen_mul_i64(dst, t0, t1);
584 tcg_gen_shri_i64(cpu_y, dst, 32);
585 #endif
586 }
587
gen_op_umul(TCGv dst,TCGv src1,TCGv src2)588 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
589 {
590 /* zero-extend truncated operands before multiplication */
591 gen_op_multiply(dst, src1, src2, 0);
592 }
593
gen_op_smul(TCGv dst,TCGv src1,TCGv src2)594 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
595 {
596 /* sign-extend truncated operands before multiplication */
597 gen_op_multiply(dst, src1, src2, 1);
598 }
599
gen_op_umulxhi(TCGv dst,TCGv src1,TCGv src2)600 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
601 {
602 TCGv discard = tcg_temp_new();
603 tcg_gen_mulu2_tl(discard, dst, src1, src2);
604 }
605
gen_op_fpmaddx(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)606 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
607 TCGv_i64 src2, TCGv_i64 src3)
608 {
609 TCGv_i64 t = tcg_temp_new_i64();
610
611 tcg_gen_mul_i64(t, src1, src2);
612 tcg_gen_add_i64(dst, src3, t);
613 }
614
gen_op_fpmaddxhi(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)615 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
616 TCGv_i64 src2, TCGv_i64 src3)
617 {
618 TCGv_i64 l = tcg_temp_new_i64();
619 TCGv_i64 h = tcg_temp_new_i64();
620 TCGv_i64 z = tcg_constant_i64(0);
621
622 tcg_gen_mulu2_i64(l, h, src1, src2);
623 tcg_gen_add2_i64(l, dst, l, h, src3, z);
624 }
625
gen_op_sdiv(TCGv dst,TCGv src1,TCGv src2)626 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
627 {
628 #ifdef TARGET_SPARC64
629 gen_helper_sdiv(dst, tcg_env, src1, src2);
630 tcg_gen_ext32s_tl(dst, dst);
631 #else
632 TCGv_i64 t64 = tcg_temp_new_i64();
633 gen_helper_sdiv(t64, tcg_env, src1, src2);
634 tcg_gen_trunc_i64_tl(dst, t64);
635 #endif
636 }
637
gen_op_udivcc(TCGv dst,TCGv src1,TCGv src2)638 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
639 {
640 TCGv_i64 t64;
641
642 #ifdef TARGET_SPARC64
643 t64 = cpu_cc_V;
644 #else
645 t64 = tcg_temp_new_i64();
646 #endif
647
648 gen_helper_udiv(t64, tcg_env, src1, src2);
649
650 #ifdef TARGET_SPARC64
651 tcg_gen_ext32u_tl(cpu_cc_N, t64);
652 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
653 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
654 tcg_gen_movi_tl(cpu_icc_C, 0);
655 #else
656 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
657 #endif
658 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
659 tcg_gen_movi_tl(cpu_cc_C, 0);
660 tcg_gen_mov_tl(dst, cpu_cc_N);
661 }
662
gen_op_sdivcc(TCGv dst,TCGv src1,TCGv src2)663 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
664 {
665 TCGv_i64 t64;
666
667 #ifdef TARGET_SPARC64
668 t64 = cpu_cc_V;
669 #else
670 t64 = tcg_temp_new_i64();
671 #endif
672
673 gen_helper_sdiv(t64, tcg_env, src1, src2);
674
675 #ifdef TARGET_SPARC64
676 tcg_gen_ext32s_tl(cpu_cc_N, t64);
677 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
678 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
679 tcg_gen_movi_tl(cpu_icc_C, 0);
680 #else
681 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
682 #endif
683 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
684 tcg_gen_movi_tl(cpu_cc_C, 0);
685 tcg_gen_mov_tl(dst, cpu_cc_N);
686 }
687
gen_op_taddcctv(TCGv dst,TCGv src1,TCGv src2)688 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
689 {
690 gen_helper_taddcctv(dst, tcg_env, src1, src2);
691 }
692
gen_op_tsubcctv(TCGv dst,TCGv src1,TCGv src2)693 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
694 {
695 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
696 }
697
gen_op_popc(TCGv dst,TCGv src1,TCGv src2)698 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
699 {
700 tcg_gen_ctpop_tl(dst, src2);
701 }
702
gen_op_lzcnt(TCGv dst,TCGv src)703 static void gen_op_lzcnt(TCGv dst, TCGv src)
704 {
705 tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
706 }
707
708 #ifndef TARGET_SPARC64
gen_helper_array8(TCGv dst,TCGv src1,TCGv src2)709 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
710 {
711 g_assert_not_reached();
712 }
713 #endif
714
gen_op_array16(TCGv dst,TCGv src1,TCGv src2)715 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
716 {
717 gen_helper_array8(dst, src1, src2);
718 tcg_gen_shli_tl(dst, dst, 1);
719 }
720
gen_op_array32(TCGv dst,TCGv src1,TCGv src2)721 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
722 {
723 gen_helper_array8(dst, src1, src2);
724 tcg_gen_shli_tl(dst, dst, 2);
725 }
726
gen_op_fpack16(TCGv_i32 dst,TCGv_i64 src)727 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
728 {
729 #ifdef TARGET_SPARC64
730 gen_helper_fpack16(dst, cpu_gsr, src);
731 #else
732 g_assert_not_reached();
733 #endif
734 }
735
gen_op_fpackfix(TCGv_i32 dst,TCGv_i64 src)736 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
737 {
738 #ifdef TARGET_SPARC64
739 gen_helper_fpackfix(dst, cpu_gsr, src);
740 #else
741 g_assert_not_reached();
742 #endif
743 }
744
gen_op_fpack32(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)745 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
746 {
747 #ifdef TARGET_SPARC64
748 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
749 #else
750 g_assert_not_reached();
751 #endif
752 }
753
gen_op_fpadds16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)754 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
755 {
756 TCGv_i32 t[2];
757
758 for (int i = 0; i < 2; i++) {
759 TCGv_i32 u = tcg_temp_new_i32();
760 TCGv_i32 v = tcg_temp_new_i32();
761
762 tcg_gen_sextract_i32(u, src1, i * 16, 16);
763 tcg_gen_sextract_i32(v, src2, i * 16, 16);
764 tcg_gen_add_i32(u, u, v);
765 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
766 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
767 t[i] = u;
768 }
769 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
770 }
771
gen_op_fpsubs16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)772 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
773 {
774 TCGv_i32 t[2];
775
776 for (int i = 0; i < 2; i++) {
777 TCGv_i32 u = tcg_temp_new_i32();
778 TCGv_i32 v = tcg_temp_new_i32();
779
780 tcg_gen_sextract_i32(u, src1, i * 16, 16);
781 tcg_gen_sextract_i32(v, src2, i * 16, 16);
782 tcg_gen_sub_i32(u, u, v);
783 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
784 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
785 t[i] = u;
786 }
787 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
788 }
789
gen_op_fpadds32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)790 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
791 {
792 TCGv_i32 r = tcg_temp_new_i32();
793 TCGv_i32 t = tcg_temp_new_i32();
794 TCGv_i32 v = tcg_temp_new_i32();
795 TCGv_i32 z = tcg_constant_i32(0);
796
797 tcg_gen_add_i32(r, src1, src2);
798 tcg_gen_xor_i32(t, src1, src2);
799 tcg_gen_xor_i32(v, r, src2);
800 tcg_gen_andc_i32(v, v, t);
801
802 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
803 tcg_gen_addi_i32(t, t, INT32_MAX);
804
805 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
806 }
807
gen_op_fpsubs32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)808 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
809 {
810 TCGv_i32 r = tcg_temp_new_i32();
811 TCGv_i32 t = tcg_temp_new_i32();
812 TCGv_i32 v = tcg_temp_new_i32();
813 TCGv_i32 z = tcg_constant_i32(0);
814
815 tcg_gen_sub_i32(r, src1, src2);
816 tcg_gen_xor_i32(t, src1, src2);
817 tcg_gen_xor_i32(v, r, src1);
818 tcg_gen_and_i32(v, v, t);
819
820 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
821 tcg_gen_addi_i32(t, t, INT32_MAX);
822
823 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
824 }
825
gen_op_faligndata_i(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2,TCGv gsr)826 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
827 TCGv_i64 s2, TCGv gsr)
828 {
829 #ifdef TARGET_SPARC64
830 TCGv t1, t2, shift;
831
832 t1 = tcg_temp_new();
833 t2 = tcg_temp_new();
834 shift = tcg_temp_new();
835
836 tcg_gen_andi_tl(shift, gsr, 7);
837 tcg_gen_shli_tl(shift, shift, 3);
838 tcg_gen_shl_tl(t1, s1, shift);
839
840 /*
841 * A shift of 64 does not produce 0 in TCG. Divide this into a
842 * shift of (up to 63) followed by a constant shift of 1.
843 */
844 tcg_gen_xori_tl(shift, shift, 63);
845 tcg_gen_shr_tl(t2, s2, shift);
846 tcg_gen_shri_tl(t2, t2, 1);
847
848 tcg_gen_or_tl(dst, t1, t2);
849 #else
850 g_assert_not_reached();
851 #endif
852 }
853
gen_op_faligndata_g(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2)854 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
855 {
856 gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
857 }
858
gen_op_bshuffle(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)859 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
860 {
861 #ifdef TARGET_SPARC64
862 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
863 #else
864 g_assert_not_reached();
865 #endif
866 }
867
gen_op_pdistn(TCGv dst,TCGv_i64 src1,TCGv_i64 src2)868 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
869 {
870 #ifdef TARGET_SPARC64
871 gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
872 #else
873 g_assert_not_reached();
874 #endif
875 }
876
gen_op_fmul8x16al(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)877 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
878 {
879 tcg_gen_ext16s_i32(src2, src2);
880 gen_helper_fmul8x16a(dst, src1, src2);
881 }
882
gen_op_fmul8x16au(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)883 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
884 {
885 tcg_gen_sari_i32(src2, src2, 16);
886 gen_helper_fmul8x16a(dst, src1, src2);
887 }
888
gen_op_fmuld8ulx16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)889 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
890 {
891 TCGv_i32 t0 = tcg_temp_new_i32();
892 TCGv_i32 t1 = tcg_temp_new_i32();
893 TCGv_i32 t2 = tcg_temp_new_i32();
894
895 tcg_gen_ext8u_i32(t0, src1);
896 tcg_gen_ext16s_i32(t1, src2);
897 tcg_gen_mul_i32(t0, t0, t1);
898
899 tcg_gen_extract_i32(t1, src1, 16, 8);
900 tcg_gen_sextract_i32(t2, src2, 16, 16);
901 tcg_gen_mul_i32(t1, t1, t2);
902
903 tcg_gen_concat_i32_i64(dst, t0, t1);
904 }
905
gen_op_fmuld8sux16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)906 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
907 {
908 TCGv_i32 t0 = tcg_temp_new_i32();
909 TCGv_i32 t1 = tcg_temp_new_i32();
910 TCGv_i32 t2 = tcg_temp_new_i32();
911
912 /*
913 * The insn description talks about extracting the upper 8 bits
914 * of the signed 16-bit input rs1, performing the multiply, then
915 * shifting left by 8 bits. Instead, zap the lower 8 bits of
916 * the rs1 input, which avoids the need for two shifts.
917 */
918 tcg_gen_ext16s_i32(t0, src1);
919 tcg_gen_andi_i32(t0, t0, ~0xff);
920 tcg_gen_ext16s_i32(t1, src2);
921 tcg_gen_mul_i32(t0, t0, t1);
922
923 tcg_gen_sextract_i32(t1, src1, 16, 16);
924 tcg_gen_andi_i32(t1, t1, ~0xff);
925 tcg_gen_sextract_i32(t2, src2, 16, 16);
926 tcg_gen_mul_i32(t1, t1, t2);
927
928 tcg_gen_concat_i32_i64(dst, t0, t1);
929 }
930
931 #ifdef TARGET_SPARC64
gen_vec_fchksm16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)932 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
933 TCGv_vec src1, TCGv_vec src2)
934 {
935 TCGv_vec a = tcg_temp_new_vec_matching(dst);
936 TCGv_vec c = tcg_temp_new_vec_matching(dst);
937
938 tcg_gen_add_vec(vece, a, src1, src2);
939 tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
940 /* Vector cmp produces -1 for true, so subtract to add carry. */
941 tcg_gen_sub_vec(vece, dst, a, c);
942 }
943
gen_op_fchksm16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)944 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
945 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
946 {
947 static const TCGOpcode vecop_list[] = {
948 INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
949 };
950 static const GVecGen3 op = {
951 .fni8 = gen_helper_fchksm16,
952 .fniv = gen_vec_fchksm16,
953 .opt_opc = vecop_list,
954 .vece = MO_16,
955 };
956 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
957 }
958
gen_vec_fmean16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)959 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
960 TCGv_vec src1, TCGv_vec src2)
961 {
962 TCGv_vec t = tcg_temp_new_vec_matching(dst);
963
964 tcg_gen_or_vec(vece, t, src1, src2);
965 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
966 tcg_gen_sari_vec(vece, src1, src1, 1);
967 tcg_gen_sari_vec(vece, src2, src2, 1);
968 tcg_gen_add_vec(vece, dst, src1, src2);
969 tcg_gen_add_vec(vece, dst, dst, t);
970 }
971
gen_op_fmean16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)972 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
973 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
974 {
975 static const TCGOpcode vecop_list[] = {
976 INDEX_op_add_vec, INDEX_op_sari_vec,
977 };
978 static const GVecGen3 op = {
979 .fni8 = gen_helper_fmean16,
980 .fniv = gen_vec_fmean16,
981 .opt_opc = vecop_list,
982 .vece = MO_16,
983 };
984 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
985 }
986 #else
987 #define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
988 #define gen_op_fmean16 ({ qemu_build_not_reached(); NULL; })
989 #endif
990
finishing_insn(DisasContext * dc)991 static void finishing_insn(DisasContext *dc)
992 {
993 /*
994 * From here, there is no future path through an unwinding exception.
995 * If the current insn cannot raise an exception, the computation of
996 * cpu_cond may be able to be elided.
997 */
998 if (dc->cpu_cond_live) {
999 tcg_gen_discard_tl(cpu_cond);
1000 dc->cpu_cond_live = false;
1001 }
1002 }
1003
gen_generic_branch(DisasContext * dc)1004 static void gen_generic_branch(DisasContext *dc)
1005 {
1006 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1007 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1008 TCGv c2 = tcg_constant_tl(dc->jump.c2);
1009
1010 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1011 }
1012
1013 /* call this function before using the condition register as it may
1014 have been set for a jump */
flush_cond(DisasContext * dc)1015 static void flush_cond(DisasContext *dc)
1016 {
1017 if (dc->npc == JUMP_PC) {
1018 gen_generic_branch(dc);
1019 dc->npc = DYNAMIC_PC_LOOKUP;
1020 }
1021 }
1022
save_npc(DisasContext * dc)1023 static void save_npc(DisasContext *dc)
1024 {
1025 if (dc->npc & 3) {
1026 switch (dc->npc) {
1027 case JUMP_PC:
1028 gen_generic_branch(dc);
1029 dc->npc = DYNAMIC_PC_LOOKUP;
1030 break;
1031 case DYNAMIC_PC:
1032 case DYNAMIC_PC_LOOKUP:
1033 break;
1034 default:
1035 g_assert_not_reached();
1036 }
1037 } else {
1038 tcg_gen_movi_tl(cpu_npc, dc->npc);
1039 }
1040 }
1041
save_state(DisasContext * dc)1042 static void save_state(DisasContext *dc)
1043 {
1044 tcg_gen_movi_tl(cpu_pc, dc->pc);
1045 save_npc(dc);
1046 }
1047
gen_exception(DisasContext * dc,int which)1048 static void gen_exception(DisasContext *dc, int which)
1049 {
1050 finishing_insn(dc);
1051 save_state(dc);
1052 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1053 dc->base.is_jmp = DISAS_NORETURN;
1054 }
1055
delay_exceptionv(DisasContext * dc,TCGv_i32 excp)1056 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1057 {
1058 DisasDelayException *e = g_new0(DisasDelayException, 1);
1059
1060 e->next = dc->delay_excp_list;
1061 dc->delay_excp_list = e;
1062
1063 e->lab = gen_new_label();
1064 e->excp = excp;
1065 e->pc = dc->pc;
1066 /* Caller must have used flush_cond before branch. */
1067 assert(e->npc != JUMP_PC);
1068 e->npc = dc->npc;
1069
1070 return e->lab;
1071 }
1072
delay_exception(DisasContext * dc,int excp)1073 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1074 {
1075 return delay_exceptionv(dc, tcg_constant_i32(excp));
1076 }
1077
gen_check_align(DisasContext * dc,TCGv addr,int mask)1078 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1079 {
1080 TCGv t = tcg_temp_new();
1081 TCGLabel *lab;
1082
1083 tcg_gen_andi_tl(t, addr, mask);
1084
1085 flush_cond(dc);
1086 lab = delay_exception(dc, TT_UNALIGNED);
1087 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1088 }
1089
gen_mov_pc_npc(DisasContext * dc)1090 static void gen_mov_pc_npc(DisasContext *dc)
1091 {
1092 finishing_insn(dc);
1093
1094 if (dc->npc & 3) {
1095 switch (dc->npc) {
1096 case JUMP_PC:
1097 gen_generic_branch(dc);
1098 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1099 dc->pc = DYNAMIC_PC_LOOKUP;
1100 break;
1101 case DYNAMIC_PC:
1102 case DYNAMIC_PC_LOOKUP:
1103 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1104 dc->pc = dc->npc;
1105 break;
1106 default:
1107 g_assert_not_reached();
1108 }
1109 } else {
1110 dc->pc = dc->npc;
1111 }
1112 }
1113
gen_compare(DisasCompare * cmp,bool xcc,unsigned int cond,DisasContext * dc)1114 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1115 DisasContext *dc)
1116 {
1117 TCGv t1;
1118
1119 cmp->c1 = t1 = tcg_temp_new();
1120 cmp->c2 = 0;
1121
1122 switch (cond & 7) {
1123 case 0x0: /* never */
1124 cmp->cond = TCG_COND_NEVER;
1125 cmp->c1 = tcg_constant_tl(0);
1126 break;
1127
1128 case 0x1: /* eq: Z */
1129 cmp->cond = TCG_COND_EQ;
1130 if (TARGET_LONG_BITS == 32 || xcc) {
1131 tcg_gen_mov_tl(t1, cpu_cc_Z);
1132 } else {
1133 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1134 }
1135 break;
1136
1137 case 0x2: /* le: Z | (N ^ V) */
1138 /*
1139 * Simplify:
1140 * cc_Z || (N ^ V) < 0 NE
1141 * cc_Z && !((N ^ V) < 0) EQ
1142 * cc_Z & ~((N ^ V) >> TLB) EQ
1143 */
1144 cmp->cond = TCG_COND_EQ;
1145 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1146 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1147 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1148 if (TARGET_LONG_BITS == 64 && !xcc) {
1149 tcg_gen_ext32u_tl(t1, t1);
1150 }
1151 break;
1152
1153 case 0x3: /* lt: N ^ V */
1154 cmp->cond = TCG_COND_LT;
1155 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1156 if (TARGET_LONG_BITS == 64 && !xcc) {
1157 tcg_gen_ext32s_tl(t1, t1);
1158 }
1159 break;
1160
1161 case 0x4: /* leu: Z | C */
1162 /*
1163 * Simplify:
1164 * cc_Z == 0 || cc_C != 0 NE
1165 * cc_Z != 0 && cc_C == 0 EQ
1166 * cc_Z & (cc_C ? 0 : -1) EQ
1167 * cc_Z & (cc_C - 1) EQ
1168 */
1169 cmp->cond = TCG_COND_EQ;
1170 if (TARGET_LONG_BITS == 32 || xcc) {
1171 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1172 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1173 } else {
1174 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1175 tcg_gen_subi_tl(t1, t1, 1);
1176 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1177 tcg_gen_ext32u_tl(t1, t1);
1178 }
1179 break;
1180
1181 case 0x5: /* ltu: C */
1182 cmp->cond = TCG_COND_NE;
1183 if (TARGET_LONG_BITS == 32 || xcc) {
1184 tcg_gen_mov_tl(t1, cpu_cc_C);
1185 } else {
1186 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1187 }
1188 break;
1189
1190 case 0x6: /* neg: N */
1191 cmp->cond = TCG_COND_LT;
1192 if (TARGET_LONG_BITS == 32 || xcc) {
1193 tcg_gen_mov_tl(t1, cpu_cc_N);
1194 } else {
1195 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1196 }
1197 break;
1198
1199 case 0x7: /* vs: V */
1200 cmp->cond = TCG_COND_LT;
1201 if (TARGET_LONG_BITS == 32 || xcc) {
1202 tcg_gen_mov_tl(t1, cpu_cc_V);
1203 } else {
1204 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1205 }
1206 break;
1207 }
1208 if (cond & 8) {
1209 cmp->cond = tcg_invert_cond(cmp->cond);
1210 }
1211 }
1212
gen_fcompare(DisasCompare * cmp,unsigned int cc,unsigned int cond)1213 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1214 {
1215 TCGv_i32 fcc = cpu_fcc[cc];
1216 TCGv_i32 c1 = fcc;
1217 int c2 = 0;
1218 TCGCond tcond;
1219
1220 /*
1221 * FCC values:
1222 * 0 =
1223 * 1 <
1224 * 2 >
1225 * 3 unordered
1226 */
1227 switch (cond & 7) {
1228 case 0x0: /* fbn */
1229 tcond = TCG_COND_NEVER;
1230 break;
1231 case 0x1: /* fbne : !0 */
1232 tcond = TCG_COND_NE;
1233 break;
1234 case 0x2: /* fblg : 1 or 2 */
1235 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1236 c1 = tcg_temp_new_i32();
1237 tcg_gen_addi_i32(c1, fcc, -1);
1238 c2 = 1;
1239 tcond = TCG_COND_LEU;
1240 break;
1241 case 0x3: /* fbul : 1 or 3 */
1242 c1 = tcg_temp_new_i32();
1243 tcg_gen_andi_i32(c1, fcc, 1);
1244 tcond = TCG_COND_NE;
1245 break;
1246 case 0x4: /* fbl : 1 */
1247 c2 = 1;
1248 tcond = TCG_COND_EQ;
1249 break;
1250 case 0x5: /* fbug : 2 or 3 */
1251 c2 = 2;
1252 tcond = TCG_COND_GEU;
1253 break;
1254 case 0x6: /* fbg : 2 */
1255 c2 = 2;
1256 tcond = TCG_COND_EQ;
1257 break;
1258 case 0x7: /* fbu : 3 */
1259 c2 = 3;
1260 tcond = TCG_COND_EQ;
1261 break;
1262 }
1263 if (cond & 8) {
1264 tcond = tcg_invert_cond(tcond);
1265 }
1266
1267 cmp->cond = tcond;
1268 cmp->c2 = c2;
1269 cmp->c1 = tcg_temp_new();
1270 tcg_gen_extu_i32_tl(cmp->c1, c1);
1271 }
1272
gen_compare_reg(DisasCompare * cmp,int cond,TCGv r_src)1273 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1274 {
1275 static const TCGCond cond_reg[4] = {
1276 TCG_COND_NEVER, /* reserved */
1277 TCG_COND_EQ,
1278 TCG_COND_LE,
1279 TCG_COND_LT,
1280 };
1281 TCGCond tcond;
1282
1283 if ((cond & 3) == 0) {
1284 return false;
1285 }
1286 tcond = cond_reg[cond & 3];
1287 if (cond & 4) {
1288 tcond = tcg_invert_cond(tcond);
1289 }
1290
1291 cmp->cond = tcond;
1292 cmp->c1 = tcg_temp_new();
1293 cmp->c2 = 0;
1294 tcg_gen_mov_tl(cmp->c1, r_src);
1295 return true;
1296 }
1297
gen_op_clear_ieee_excp_and_FTT(void)1298 static void gen_op_clear_ieee_excp_and_FTT(void)
1299 {
1300 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1301 offsetof(CPUSPARCState, fsr_cexc_ftt));
1302 }
1303
gen_op_fmovs(TCGv_i32 dst,TCGv_i32 src)1304 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1305 {
1306 gen_op_clear_ieee_excp_and_FTT();
1307 tcg_gen_mov_i32(dst, src);
1308 }
1309
gen_op_fnegs(TCGv_i32 dst,TCGv_i32 src)1310 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1311 {
1312 gen_op_clear_ieee_excp_and_FTT();
1313 tcg_gen_xori_i32(dst, src, 1u << 31);
1314 }
1315
gen_op_fabss(TCGv_i32 dst,TCGv_i32 src)1316 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1317 {
1318 gen_op_clear_ieee_excp_and_FTT();
1319 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1320 }
1321
gen_op_fmovd(TCGv_i64 dst,TCGv_i64 src)1322 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1323 {
1324 gen_op_clear_ieee_excp_and_FTT();
1325 tcg_gen_mov_i64(dst, src);
1326 }
1327
gen_op_fnegd(TCGv_i64 dst,TCGv_i64 src)1328 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1329 {
1330 gen_op_clear_ieee_excp_and_FTT();
1331 tcg_gen_xori_i64(dst, src, 1ull << 63);
1332 }
1333
gen_op_fabsd(TCGv_i64 dst,TCGv_i64 src)1334 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1335 {
1336 gen_op_clear_ieee_excp_and_FTT();
1337 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1338 }
1339
gen_op_fnegq(TCGv_i128 dst,TCGv_i128 src)1340 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1341 {
1342 TCGv_i64 l = tcg_temp_new_i64();
1343 TCGv_i64 h = tcg_temp_new_i64();
1344
1345 tcg_gen_extr_i128_i64(l, h, src);
1346 tcg_gen_xori_i64(h, h, 1ull << 63);
1347 tcg_gen_concat_i64_i128(dst, l, h);
1348 }
1349
gen_op_fabsq(TCGv_i128 dst,TCGv_i128 src)1350 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1351 {
1352 TCGv_i64 l = tcg_temp_new_i64();
1353 TCGv_i64 h = tcg_temp_new_i64();
1354
1355 tcg_gen_extr_i128_i64(l, h, src);
1356 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1357 tcg_gen_concat_i64_i128(dst, l, h);
1358 }
1359
gen_op_fmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1360 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1361 {
1362 TCGv_i32 z = tcg_constant_i32(0);
1363 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
1364 }
1365
gen_op_fmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1366 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1367 {
1368 TCGv_i32 z = tcg_constant_i32(0);
1369 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
1370 }
1371
gen_op_fmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1372 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1373 {
1374 TCGv_i32 z = tcg_constant_i32(0);
1375 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1376 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1377 }
1378
gen_op_fmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1380 {
1381 TCGv_i32 z = tcg_constant_i32(0);
1382 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1383 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1384 }
1385
gen_op_fnmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1386 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1387 {
1388 TCGv_i32 z = tcg_constant_i32(0);
1389 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1390 float_muladd_negate_result);
1391 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1392 }
1393
gen_op_fnmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1394 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1395 {
1396 TCGv_i32 z = tcg_constant_i32(0);
1397 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1398 float_muladd_negate_result);
1399 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1400 }
1401
gen_op_fnmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1402 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1403 {
1404 TCGv_i32 z = tcg_constant_i32(0);
1405 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1406 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1407 }
1408
gen_op_fnmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1409 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1410 {
1411 TCGv_i32 z = tcg_constant_i32(0);
1412 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1413 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1414 }
1415
1416 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
gen_op_fhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1417 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1418 {
1419 TCGv_i32 fone = tcg_constant_i32(float32_one);
1420 TCGv_i32 mone = tcg_constant_i32(-1);
1421 TCGv_i32 op = tcg_constant_i32(0);
1422 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1423 }
1424
gen_op_fhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1425 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1426 {
1427 TCGv_i64 fone = tcg_constant_i64(float64_one);
1428 TCGv_i32 mone = tcg_constant_i32(-1);
1429 TCGv_i32 op = tcg_constant_i32(0);
1430 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1431 }
1432
1433 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
gen_op_fhsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1434 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1435 {
1436 TCGv_i32 fone = tcg_constant_i32(float32_one);
1437 TCGv_i32 mone = tcg_constant_i32(-1);
1438 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1439 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1440 }
1441
gen_op_fhsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1442 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1443 {
1444 TCGv_i64 fone = tcg_constant_i64(float64_one);
1445 TCGv_i32 mone = tcg_constant_i32(-1);
1446 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1447 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1448 }
1449
1450 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
gen_op_fnhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1451 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1452 {
1453 TCGv_i32 fone = tcg_constant_i32(float32_one);
1454 TCGv_i32 mone = tcg_constant_i32(-1);
1455 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1456 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1457 }
1458
gen_op_fnhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1459 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1460 {
1461 TCGv_i64 fone = tcg_constant_i64(float64_one);
1462 TCGv_i32 mone = tcg_constant_i32(-1);
1463 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1464 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1465 }
1466
gen_op_fpexception_im(DisasContext * dc,int ftt)1467 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1468 {
1469 /*
1470 * CEXC is only set when succesfully completing an FPop,
1471 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1472 * Thus we can simply store FTT into this field.
1473 */
1474 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1475 offsetof(CPUSPARCState, fsr_cexc_ftt));
1476 gen_exception(dc, TT_FP_EXCP);
1477 }
1478
gen_trap_ifnofpu(DisasContext * dc)1479 static bool gen_trap_ifnofpu(DisasContext *dc)
1480 {
1481 #if !defined(CONFIG_USER_ONLY)
1482 if (!dc->fpu_enabled) {
1483 gen_exception(dc, TT_NFPU_INSN);
1484 return true;
1485 }
1486 #endif
1487 return false;
1488 }
1489
gen_trap_iffpexception(DisasContext * dc)1490 static bool gen_trap_iffpexception(DisasContext *dc)
1491 {
1492 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1493 /*
1494 * There are 3 states for the sparc32 fpu:
1495 * Normally the fpu is in fp_execute, and all insns are allowed.
1496 * When an exception is signaled, it moves to fp_exception_pending state.
1497 * Upon seeing the next FPop, the fpu moves to fp_exception state,
1498 * populates the FQ, and generates an fp_exception trap.
1499 * The fpu remains in fp_exception state until FQ becomes empty
1500 * after execution of a STDFQ instruction. While the fpu is in
1501 * fp_exception state, and FPop, fp load or fp branch insn will
1502 * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1503 * and the insn will not be entered into the FQ.
1504 *
1505 * In QEMU, we do not model the fp_exception_pending state and
1506 * instead populate FQ and raise the exception immediately.
1507 * But we can still honor fp_exception state by noticing when
1508 * the FQ is not empty.
1509 */
1510 if (dc->fsr_qne) {
1511 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1512 return true;
1513 }
1514 #endif
1515 return false;
1516 }
1517
gen_trap_if_nofpu_fpexception(DisasContext * dc)1518 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1519 {
1520 return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1521 }
1522
1523 /* asi moves */
1524 typedef enum {
1525 GET_ASI_HELPER,
1526 GET_ASI_EXCP,
1527 GET_ASI_DIRECT,
1528 GET_ASI_DTWINX,
1529 GET_ASI_CODE,
1530 GET_ASI_BLOCK,
1531 GET_ASI_SHORT,
1532 GET_ASI_BCOPY,
1533 GET_ASI_BFILL,
1534 } ASIType;
1535
1536 typedef struct {
1537 ASIType type;
1538 int asi;
1539 int mem_idx;
1540 MemOp memop;
1541 } DisasASI;
1542
1543 /*
1544 * Build DisasASI.
1545 * For asi == -1, treat as non-asi.
1546 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1547 */
resolve_asi(DisasContext * dc,int asi,MemOp memop)1548 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1549 {
1550 ASIType type = GET_ASI_HELPER;
1551 int mem_idx = dc->mem_idx;
1552
1553 if (asi == -1) {
1554 /* Artificial "non-asi" case. */
1555 type = GET_ASI_DIRECT;
1556 goto done;
1557 }
1558
1559 #ifndef TARGET_SPARC64
1560 /* Before v9, all asis are immediate and privileged. */
1561 if (asi < 0) {
1562 gen_exception(dc, TT_ILL_INSN);
1563 type = GET_ASI_EXCP;
1564 } else if (supervisor(dc)
1565 /* Note that LEON accepts ASI_USERDATA in user mode, for
1566 use with CASA. Also note that previous versions of
1567 QEMU allowed (and old versions of gcc emitted) ASI_P
1568 for LEON, which is incorrect. */
1569 || (asi == ASI_USERDATA
1570 && (dc->def->features & CPU_FEATURE_CASA))) {
1571 switch (asi) {
1572 case ASI_USERDATA: /* User data access */
1573 mem_idx = MMU_USER_IDX;
1574 type = GET_ASI_DIRECT;
1575 break;
1576 case ASI_KERNELDATA: /* Supervisor data access */
1577 mem_idx = MMU_KERNEL_IDX;
1578 type = GET_ASI_DIRECT;
1579 break;
1580 case ASI_USERTXT: /* User text access */
1581 mem_idx = MMU_USER_IDX;
1582 type = GET_ASI_CODE;
1583 break;
1584 case ASI_KERNELTXT: /* Supervisor text access */
1585 mem_idx = MMU_KERNEL_IDX;
1586 type = GET_ASI_CODE;
1587 break;
1588 case ASI_M_BYPASS: /* MMU passthrough */
1589 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1590 mem_idx = MMU_PHYS_IDX;
1591 type = GET_ASI_DIRECT;
1592 break;
1593 case ASI_M_BCOPY: /* Block copy, sta access */
1594 mem_idx = MMU_KERNEL_IDX;
1595 type = GET_ASI_BCOPY;
1596 break;
1597 case ASI_M_BFILL: /* Block fill, stda access */
1598 mem_idx = MMU_KERNEL_IDX;
1599 type = GET_ASI_BFILL;
1600 break;
1601 }
1602
1603 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1604 * permissions check in get_physical_address(..).
1605 */
1606 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1607 } else {
1608 gen_exception(dc, TT_PRIV_INSN);
1609 type = GET_ASI_EXCP;
1610 }
1611 #else
1612 if (asi < 0) {
1613 asi = dc->asi;
1614 }
1615 /* With v9, all asis below 0x80 are privileged. */
1616 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1617 down that bit into DisasContext. For the moment that's ok,
1618 since the direct implementations below doesn't have any ASIs
1619 in the restricted [0x30, 0x7f] range, and the check will be
1620 done properly in the helper. */
1621 if (!supervisor(dc) && asi < 0x80) {
1622 gen_exception(dc, TT_PRIV_ACT);
1623 type = GET_ASI_EXCP;
1624 } else {
1625 switch (asi) {
1626 case ASI_REAL: /* Bypass */
1627 case ASI_REAL_IO: /* Bypass, non-cacheable */
1628 case ASI_REAL_L: /* Bypass LE */
1629 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1630 case ASI_TWINX_REAL: /* Real address, twinx */
1631 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1632 case ASI_QUAD_LDD_PHYS:
1633 case ASI_QUAD_LDD_PHYS_L:
1634 mem_idx = MMU_PHYS_IDX;
1635 break;
1636 case ASI_N: /* Nucleus */
1637 case ASI_NL: /* Nucleus LE */
1638 case ASI_TWINX_N:
1639 case ASI_TWINX_NL:
1640 case ASI_NUCLEUS_QUAD_LDD:
1641 case ASI_NUCLEUS_QUAD_LDD_L:
1642 if (hypervisor(dc)) {
1643 mem_idx = MMU_PHYS_IDX;
1644 } else {
1645 mem_idx = MMU_NUCLEUS_IDX;
1646 }
1647 break;
1648 case ASI_AIUP: /* As if user primary */
1649 case ASI_AIUPL: /* As if user primary LE */
1650 case ASI_TWINX_AIUP:
1651 case ASI_TWINX_AIUP_L:
1652 case ASI_BLK_AIUP_4V:
1653 case ASI_BLK_AIUP_L_4V:
1654 case ASI_BLK_AIUP:
1655 case ASI_BLK_AIUPL:
1656 case ASI_MON_AIUP:
1657 mem_idx = MMU_USER_IDX;
1658 break;
1659 case ASI_AIUS: /* As if user secondary */
1660 case ASI_AIUSL: /* As if user secondary LE */
1661 case ASI_TWINX_AIUS:
1662 case ASI_TWINX_AIUS_L:
1663 case ASI_BLK_AIUS_4V:
1664 case ASI_BLK_AIUS_L_4V:
1665 case ASI_BLK_AIUS:
1666 case ASI_BLK_AIUSL:
1667 case ASI_MON_AIUS:
1668 mem_idx = MMU_USER_SECONDARY_IDX;
1669 break;
1670 case ASI_S: /* Secondary */
1671 case ASI_SL: /* Secondary LE */
1672 case ASI_TWINX_S:
1673 case ASI_TWINX_SL:
1674 case ASI_BLK_COMMIT_S:
1675 case ASI_BLK_S:
1676 case ASI_BLK_SL:
1677 case ASI_FL8_S:
1678 case ASI_FL8_SL:
1679 case ASI_FL16_S:
1680 case ASI_FL16_SL:
1681 case ASI_MON_S:
1682 if (mem_idx == MMU_USER_IDX) {
1683 mem_idx = MMU_USER_SECONDARY_IDX;
1684 } else if (mem_idx == MMU_KERNEL_IDX) {
1685 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1686 }
1687 break;
1688 case ASI_P: /* Primary */
1689 case ASI_PL: /* Primary LE */
1690 case ASI_TWINX_P:
1691 case ASI_TWINX_PL:
1692 case ASI_BLK_COMMIT_P:
1693 case ASI_BLK_P:
1694 case ASI_BLK_PL:
1695 case ASI_FL8_P:
1696 case ASI_FL8_PL:
1697 case ASI_FL16_P:
1698 case ASI_FL16_PL:
1699 case ASI_MON_P:
1700 break;
1701 }
1702 switch (asi) {
1703 case ASI_REAL:
1704 case ASI_REAL_IO:
1705 case ASI_REAL_L:
1706 case ASI_REAL_IO_L:
1707 case ASI_N:
1708 case ASI_NL:
1709 case ASI_AIUP:
1710 case ASI_AIUPL:
1711 case ASI_AIUS:
1712 case ASI_AIUSL:
1713 case ASI_S:
1714 case ASI_SL:
1715 case ASI_P:
1716 case ASI_PL:
1717 case ASI_MON_P:
1718 case ASI_MON_S:
1719 case ASI_MON_AIUP:
1720 case ASI_MON_AIUS:
1721 type = GET_ASI_DIRECT;
1722 break;
1723 case ASI_TWINX_REAL:
1724 case ASI_TWINX_REAL_L:
1725 case ASI_TWINX_N:
1726 case ASI_TWINX_NL:
1727 case ASI_TWINX_AIUP:
1728 case ASI_TWINX_AIUP_L:
1729 case ASI_TWINX_AIUS:
1730 case ASI_TWINX_AIUS_L:
1731 case ASI_TWINX_P:
1732 case ASI_TWINX_PL:
1733 case ASI_TWINX_S:
1734 case ASI_TWINX_SL:
1735 case ASI_QUAD_LDD_PHYS:
1736 case ASI_QUAD_LDD_PHYS_L:
1737 case ASI_NUCLEUS_QUAD_LDD:
1738 case ASI_NUCLEUS_QUAD_LDD_L:
1739 type = GET_ASI_DTWINX;
1740 break;
1741 case ASI_BLK_COMMIT_P:
1742 case ASI_BLK_COMMIT_S:
1743 case ASI_BLK_AIUP_4V:
1744 case ASI_BLK_AIUP_L_4V:
1745 case ASI_BLK_AIUP:
1746 case ASI_BLK_AIUPL:
1747 case ASI_BLK_AIUS_4V:
1748 case ASI_BLK_AIUS_L_4V:
1749 case ASI_BLK_AIUS:
1750 case ASI_BLK_AIUSL:
1751 case ASI_BLK_S:
1752 case ASI_BLK_SL:
1753 case ASI_BLK_P:
1754 case ASI_BLK_PL:
1755 type = GET_ASI_BLOCK;
1756 break;
1757 case ASI_FL8_S:
1758 case ASI_FL8_SL:
1759 case ASI_FL8_P:
1760 case ASI_FL8_PL:
1761 memop = MO_UB;
1762 type = GET_ASI_SHORT;
1763 break;
1764 case ASI_FL16_S:
1765 case ASI_FL16_SL:
1766 case ASI_FL16_P:
1767 case ASI_FL16_PL:
1768 memop = MO_TEUW;
1769 type = GET_ASI_SHORT;
1770 break;
1771 }
1772 /* The little-endian asis all have bit 3 set. */
1773 if (asi & 8) {
1774 memop ^= MO_BSWAP;
1775 }
1776 }
1777 #endif
1778
1779 done:
1780 return (DisasASI){ type, asi, mem_idx, memop };
1781 }
1782
1783 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
gen_helper_ld_asi(TCGv_i64 r,TCGv_env e,TCGv a,TCGv_i32 asi,TCGv_i32 mop)1784 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1785 TCGv_i32 asi, TCGv_i32 mop)
1786 {
1787 g_assert_not_reached();
1788 }
1789
gen_helper_st_asi(TCGv_env e,TCGv a,TCGv_i64 r,TCGv_i32 asi,TCGv_i32 mop)1790 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1791 TCGv_i32 asi, TCGv_i32 mop)
1792 {
1793 g_assert_not_reached();
1794 }
1795 #endif
1796
gen_ld_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1797 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1798 {
1799 switch (da->type) {
1800 case GET_ASI_EXCP:
1801 break;
1802 case GET_ASI_DTWINX: /* Reserved for ldda. */
1803 gen_exception(dc, TT_ILL_INSN);
1804 break;
1805 case GET_ASI_DIRECT:
1806 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1807 break;
1808
1809 case GET_ASI_CODE:
1810 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1811 {
1812 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1813 TCGv_i64 t64 = tcg_temp_new_i64();
1814
1815 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1816 tcg_gen_trunc_i64_tl(dst, t64);
1817 }
1818 break;
1819 #else
1820 g_assert_not_reached();
1821 #endif
1822
1823 default:
1824 {
1825 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1826 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1827
1828 save_state(dc);
1829 #ifdef TARGET_SPARC64
1830 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1831 #else
1832 {
1833 TCGv_i64 t64 = tcg_temp_new_i64();
1834 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1835 tcg_gen_trunc_i64_tl(dst, t64);
1836 }
1837 #endif
1838 }
1839 break;
1840 }
1841 }
1842
gen_st_asi(DisasContext * dc,DisasASI * da,TCGv src,TCGv addr)1843 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1844 {
1845 switch (da->type) {
1846 case GET_ASI_EXCP:
1847 break;
1848
1849 case GET_ASI_DTWINX: /* Reserved for stda. */
1850 if (TARGET_LONG_BITS == 32) {
1851 gen_exception(dc, TT_ILL_INSN);
1852 break;
1853 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1854 /* Pre OpenSPARC CPUs don't have these */
1855 gen_exception(dc, TT_ILL_INSN);
1856 break;
1857 }
1858 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1859 /* fall through */
1860
1861 case GET_ASI_DIRECT:
1862 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1863 break;
1864
1865 case GET_ASI_BCOPY:
1866 assert(TARGET_LONG_BITS == 32);
1867 /*
1868 * Copy 32 bytes from the address in SRC to ADDR.
1869 *
1870 * From Ross RT625 hyperSPARC manual, section 4.6:
1871 * "Block Copy and Block Fill will work only on cache line boundaries."
1872 *
1873 * It does not specify if an unaliged address is truncated or trapped.
1874 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1875 * is obviously wrong. The only place I can see this used is in the
1876 * Linux kernel which begins with page alignment, advancing by 32,
1877 * so is always aligned. Assume truncation as the simpler option.
1878 *
1879 * Since the loads and stores are paired, allow the copy to happen
1880 * in the host endianness. The copy need not be atomic.
1881 */
1882 {
1883 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1884 TCGv saddr = tcg_temp_new();
1885 TCGv daddr = tcg_temp_new();
1886 TCGv_i128 tmp = tcg_temp_new_i128();
1887
1888 tcg_gen_andi_tl(saddr, src, -32);
1889 tcg_gen_andi_tl(daddr, addr, -32);
1890 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1891 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1892 tcg_gen_addi_tl(saddr, saddr, 16);
1893 tcg_gen_addi_tl(daddr, daddr, 16);
1894 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1895 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1896 }
1897 break;
1898
1899 default:
1900 {
1901 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1902 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1903
1904 save_state(dc);
1905 #ifdef TARGET_SPARC64
1906 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1907 #else
1908 {
1909 TCGv_i64 t64 = tcg_temp_new_i64();
1910 tcg_gen_extu_tl_i64(t64, src);
1911 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1912 }
1913 #endif
1914
1915 /* A write to a TLB register may alter page maps. End the TB. */
1916 dc->npc = DYNAMIC_PC;
1917 }
1918 break;
1919 }
1920 }
1921
gen_swap_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv src,TCGv addr)1922 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1923 TCGv dst, TCGv src, TCGv addr)
1924 {
1925 switch (da->type) {
1926 case GET_ASI_EXCP:
1927 break;
1928 case GET_ASI_DIRECT:
1929 tcg_gen_atomic_xchg_tl(dst, addr, src,
1930 da->mem_idx, da->memop | MO_ALIGN);
1931 break;
1932 default:
1933 /* ??? Should be DAE_invalid_asi. */
1934 gen_exception(dc, TT_DATA_ACCESS);
1935 break;
1936 }
1937 }
1938
gen_cas_asi(DisasContext * dc,DisasASI * da,TCGv oldv,TCGv newv,TCGv cmpv,TCGv addr)1939 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1940 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1941 {
1942 switch (da->type) {
1943 case GET_ASI_EXCP:
1944 return;
1945 case GET_ASI_DIRECT:
1946 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1947 da->mem_idx, da->memop | MO_ALIGN);
1948 break;
1949 default:
1950 /* ??? Should be DAE_invalid_asi. */
1951 gen_exception(dc, TT_DATA_ACCESS);
1952 break;
1953 }
1954 }
1955
gen_ldstub_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1956 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1957 {
1958 switch (da->type) {
1959 case GET_ASI_EXCP:
1960 break;
1961 case GET_ASI_DIRECT:
1962 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1963 da->mem_idx, MO_UB);
1964 break;
1965 default:
1966 /* ??? In theory, this should be raise DAE_invalid_asi.
1967 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1968 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1969 gen_helper_exit_atomic(tcg_env);
1970 } else {
1971 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1972 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1973 TCGv_i64 s64, t64;
1974
1975 save_state(dc);
1976 t64 = tcg_temp_new_i64();
1977 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1978
1979 s64 = tcg_constant_i64(0xff);
1980 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1981
1982 tcg_gen_trunc_i64_tl(dst, t64);
1983
1984 /* End the TB. */
1985 dc->npc = DYNAMIC_PC;
1986 }
1987 break;
1988 }
1989 }
1990
gen_ldf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)1991 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1992 TCGv addr, int rd)
1993 {
1994 MemOp memop = da->memop;
1995 MemOp size = memop & MO_SIZE;
1996 TCGv_i32 d32;
1997 TCGv_i64 d64, l64;
1998 TCGv addr_tmp;
1999
2000 /* TODO: Use 128-bit load/store below. */
2001 if (size == MO_128) {
2002 memop = (memop & ~MO_SIZE) | MO_64;
2003 }
2004
2005 switch (da->type) {
2006 case GET_ASI_EXCP:
2007 break;
2008
2009 case GET_ASI_DIRECT:
2010 memop |= MO_ALIGN_4;
2011 switch (size) {
2012 case MO_32:
2013 d32 = tcg_temp_new_i32();
2014 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2015 gen_store_fpr_F(dc, rd, d32);
2016 break;
2017
2018 case MO_64:
2019 d64 = tcg_temp_new_i64();
2020 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2021 gen_store_fpr_D(dc, rd, d64);
2022 break;
2023
2024 case MO_128:
2025 d64 = tcg_temp_new_i64();
2026 l64 = tcg_temp_new_i64();
2027 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2028 addr_tmp = tcg_temp_new();
2029 tcg_gen_addi_tl(addr_tmp, addr, 8);
2030 tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2031 gen_store_fpr_D(dc, rd, d64);
2032 gen_store_fpr_D(dc, rd + 2, l64);
2033 break;
2034 default:
2035 g_assert_not_reached();
2036 }
2037 break;
2038
2039 case GET_ASI_BLOCK:
2040 /* Valid for lddfa on aligned registers only. */
2041 if (orig_size == MO_64 && (rd & 7) == 0) {
2042 /* The first operation checks required alignment. */
2043 addr_tmp = tcg_temp_new();
2044 d64 = tcg_temp_new_i64();
2045 for (int i = 0; ; ++i) {
2046 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2047 memop | (i == 0 ? MO_ALIGN_64 : 0));
2048 gen_store_fpr_D(dc, rd + 2 * i, d64);
2049 if (i == 7) {
2050 break;
2051 }
2052 tcg_gen_addi_tl(addr_tmp, addr, 8);
2053 addr = addr_tmp;
2054 }
2055 } else {
2056 gen_exception(dc, TT_ILL_INSN);
2057 }
2058 break;
2059
2060 case GET_ASI_SHORT:
2061 /* Valid for lddfa only. */
2062 if (orig_size == MO_64) {
2063 d64 = tcg_temp_new_i64();
2064 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2065 gen_store_fpr_D(dc, rd, d64);
2066 } else {
2067 gen_exception(dc, TT_ILL_INSN);
2068 }
2069 break;
2070
2071 default:
2072 {
2073 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2074 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2075
2076 save_state(dc);
2077 /* According to the table in the UA2011 manual, the only
2078 other asis that are valid for ldfa/lddfa/ldqfa are
2079 the NO_FAULT asis. We still need a helper for these,
2080 but we can just use the integer asi helper for them. */
2081 switch (size) {
2082 case MO_32:
2083 d64 = tcg_temp_new_i64();
2084 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2085 d32 = tcg_temp_new_i32();
2086 tcg_gen_extrl_i64_i32(d32, d64);
2087 gen_store_fpr_F(dc, rd, d32);
2088 break;
2089 case MO_64:
2090 d64 = tcg_temp_new_i64();
2091 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2092 gen_store_fpr_D(dc, rd, d64);
2093 break;
2094 case MO_128:
2095 d64 = tcg_temp_new_i64();
2096 l64 = tcg_temp_new_i64();
2097 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2098 addr_tmp = tcg_temp_new();
2099 tcg_gen_addi_tl(addr_tmp, addr, 8);
2100 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2101 gen_store_fpr_D(dc, rd, d64);
2102 gen_store_fpr_D(dc, rd + 2, l64);
2103 break;
2104 default:
2105 g_assert_not_reached();
2106 }
2107 }
2108 break;
2109 }
2110 }
2111
gen_stf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)2112 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2113 TCGv addr, int rd)
2114 {
2115 MemOp memop = da->memop;
2116 MemOp size = memop & MO_SIZE;
2117 TCGv_i32 d32;
2118 TCGv_i64 d64;
2119 TCGv addr_tmp;
2120
2121 /* TODO: Use 128-bit load/store below. */
2122 if (size == MO_128) {
2123 memop = (memop & ~MO_SIZE) | MO_64;
2124 }
2125
2126 switch (da->type) {
2127 case GET_ASI_EXCP:
2128 break;
2129
2130 case GET_ASI_DIRECT:
2131 memop |= MO_ALIGN_4;
2132 switch (size) {
2133 case MO_32:
2134 d32 = gen_load_fpr_F(dc, rd);
2135 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2136 break;
2137 case MO_64:
2138 d64 = gen_load_fpr_D(dc, rd);
2139 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2140 break;
2141 case MO_128:
2142 /* Only 4-byte alignment required. However, it is legal for the
2143 cpu to signal the alignment fault, and the OS trap handler is
2144 required to fix it up. Requiring 16-byte alignment here avoids
2145 having to probe the second page before performing the first
2146 write. */
2147 d64 = gen_load_fpr_D(dc, rd);
2148 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2149 addr_tmp = tcg_temp_new();
2150 tcg_gen_addi_tl(addr_tmp, addr, 8);
2151 d64 = gen_load_fpr_D(dc, rd + 2);
2152 tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2153 break;
2154 default:
2155 g_assert_not_reached();
2156 }
2157 break;
2158
2159 case GET_ASI_BLOCK:
2160 /* Valid for stdfa on aligned registers only. */
2161 if (orig_size == MO_64 && (rd & 7) == 0) {
2162 /* The first operation checks required alignment. */
2163 addr_tmp = tcg_temp_new();
2164 for (int i = 0; ; ++i) {
2165 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2166 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2167 memop | (i == 0 ? MO_ALIGN_64 : 0));
2168 if (i == 7) {
2169 break;
2170 }
2171 tcg_gen_addi_tl(addr_tmp, addr, 8);
2172 addr = addr_tmp;
2173 }
2174 } else {
2175 gen_exception(dc, TT_ILL_INSN);
2176 }
2177 break;
2178
2179 case GET_ASI_SHORT:
2180 /* Valid for stdfa only. */
2181 if (orig_size == MO_64) {
2182 d64 = gen_load_fpr_D(dc, rd);
2183 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2184 } else {
2185 gen_exception(dc, TT_ILL_INSN);
2186 }
2187 break;
2188
2189 default:
2190 /* According to the table in the UA2011 manual, the only
2191 other asis that are valid for ldfa/lddfa/ldqfa are
2192 the PST* asis, which aren't currently handled. */
2193 gen_exception(dc, TT_ILL_INSN);
2194 break;
2195 }
2196 }
2197
gen_ldda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2198 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2199 {
2200 TCGv hi = gen_dest_gpr(dc, rd);
2201 TCGv lo = gen_dest_gpr(dc, rd + 1);
2202
2203 switch (da->type) {
2204 case GET_ASI_EXCP:
2205 return;
2206
2207 case GET_ASI_DTWINX:
2208 #ifdef TARGET_SPARC64
2209 {
2210 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2211 TCGv_i128 t = tcg_temp_new_i128();
2212
2213 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2214 /*
2215 * Note that LE twinx acts as if each 64-bit register result is
2216 * byte swapped. We perform one 128-bit LE load, so must swap
2217 * the order of the writebacks.
2218 */
2219 if ((mop & MO_BSWAP) == MO_TE) {
2220 tcg_gen_extr_i128_i64(lo, hi, t);
2221 } else {
2222 tcg_gen_extr_i128_i64(hi, lo, t);
2223 }
2224 }
2225 break;
2226 #else
2227 g_assert_not_reached();
2228 #endif
2229
2230 case GET_ASI_DIRECT:
2231 {
2232 TCGv_i64 tmp = tcg_temp_new_i64();
2233
2234 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2235
2236 /* Note that LE ldda acts as if each 32-bit register
2237 result is byte swapped. Having just performed one
2238 64-bit bswap, we need now to swap the writebacks. */
2239 if ((da->memop & MO_BSWAP) == MO_TE) {
2240 tcg_gen_extr_i64_tl(lo, hi, tmp);
2241 } else {
2242 tcg_gen_extr_i64_tl(hi, lo, tmp);
2243 }
2244 }
2245 break;
2246
2247 case GET_ASI_CODE:
2248 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2249 {
2250 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2251 TCGv_i64 tmp = tcg_temp_new_i64();
2252
2253 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2254
2255 /* See above. */
2256 if ((da->memop & MO_BSWAP) == MO_TE) {
2257 tcg_gen_extr_i64_tl(lo, hi, tmp);
2258 } else {
2259 tcg_gen_extr_i64_tl(hi, lo, tmp);
2260 }
2261 }
2262 break;
2263 #else
2264 g_assert_not_reached();
2265 #endif
2266
2267 default:
2268 /* ??? In theory we've handled all of the ASIs that are valid
2269 for ldda, and this should raise DAE_invalid_asi. However,
2270 real hardware allows others. This can be seen with e.g.
2271 FreeBSD 10.3 wrt ASI_IC_TAG. */
2272 {
2273 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2274 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2275 TCGv_i64 tmp = tcg_temp_new_i64();
2276
2277 save_state(dc);
2278 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2279
2280 /* See above. */
2281 if ((da->memop & MO_BSWAP) == MO_TE) {
2282 tcg_gen_extr_i64_tl(lo, hi, tmp);
2283 } else {
2284 tcg_gen_extr_i64_tl(hi, lo, tmp);
2285 }
2286 }
2287 break;
2288 }
2289
2290 gen_store_gpr(dc, rd, hi);
2291 gen_store_gpr(dc, rd + 1, lo);
2292 }
2293
gen_stda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2294 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2295 {
2296 TCGv hi = gen_load_gpr(dc, rd);
2297 TCGv lo = gen_load_gpr(dc, rd + 1);
2298
2299 switch (da->type) {
2300 case GET_ASI_EXCP:
2301 break;
2302
2303 case GET_ASI_DTWINX:
2304 #ifdef TARGET_SPARC64
2305 {
2306 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2307 TCGv_i128 t = tcg_temp_new_i128();
2308
2309 /*
2310 * Note that LE twinx acts as if each 64-bit register result is
2311 * byte swapped. We perform one 128-bit LE store, so must swap
2312 * the order of the construction.
2313 */
2314 if ((mop & MO_BSWAP) == MO_TE) {
2315 tcg_gen_concat_i64_i128(t, lo, hi);
2316 } else {
2317 tcg_gen_concat_i64_i128(t, hi, lo);
2318 }
2319 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2320 }
2321 break;
2322 #else
2323 g_assert_not_reached();
2324 #endif
2325
2326 case GET_ASI_DIRECT:
2327 {
2328 TCGv_i64 t64 = tcg_temp_new_i64();
2329
2330 /* Note that LE stda acts as if each 32-bit register result is
2331 byte swapped. We will perform one 64-bit LE store, so now
2332 we must swap the order of the construction. */
2333 if ((da->memop & MO_BSWAP) == MO_TE) {
2334 tcg_gen_concat_tl_i64(t64, lo, hi);
2335 } else {
2336 tcg_gen_concat_tl_i64(t64, hi, lo);
2337 }
2338 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2339 }
2340 break;
2341
2342 case GET_ASI_BFILL:
2343 assert(TARGET_LONG_BITS == 32);
2344 /*
2345 * Store 32 bytes of [rd:rd+1] to ADDR.
2346 * See comments for GET_ASI_COPY above.
2347 */
2348 {
2349 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2350 TCGv_i64 t8 = tcg_temp_new_i64();
2351 TCGv_i128 t16 = tcg_temp_new_i128();
2352 TCGv daddr = tcg_temp_new();
2353
2354 tcg_gen_concat_tl_i64(t8, lo, hi);
2355 tcg_gen_concat_i64_i128(t16, t8, t8);
2356 tcg_gen_andi_tl(daddr, addr, -32);
2357 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2358 tcg_gen_addi_tl(daddr, daddr, 16);
2359 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2360 }
2361 break;
2362
2363 default:
2364 /* ??? In theory we've handled all of the ASIs that are valid
2365 for stda, and this should raise DAE_invalid_asi. */
2366 {
2367 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2368 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2369 TCGv_i64 t64 = tcg_temp_new_i64();
2370
2371 /* See above. */
2372 if ((da->memop & MO_BSWAP) == MO_TE) {
2373 tcg_gen_concat_tl_i64(t64, lo, hi);
2374 } else {
2375 tcg_gen_concat_tl_i64(t64, hi, lo);
2376 }
2377
2378 save_state(dc);
2379 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2380 }
2381 break;
2382 }
2383 }
2384
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2385 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2386 {
2387 #ifdef TARGET_SPARC64
2388 TCGv_i32 c32, zero, dst, s1, s2;
2389 TCGv_i64 c64 = tcg_temp_new_i64();
2390
2391 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2392 or fold the comparison down to 32 bits and use movcond_i32. Choose
2393 the later. */
2394 c32 = tcg_temp_new_i32();
2395 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2396 tcg_gen_extrl_i64_i32(c32, c64);
2397
2398 s1 = gen_load_fpr_F(dc, rs);
2399 s2 = gen_load_fpr_F(dc, rd);
2400 dst = tcg_temp_new_i32();
2401 zero = tcg_constant_i32(0);
2402
2403 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2404
2405 gen_store_fpr_F(dc, rd, dst);
2406 #else
2407 qemu_build_not_reached();
2408 #endif
2409 }
2410
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2411 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2412 {
2413 #ifdef TARGET_SPARC64
2414 TCGv_i64 dst = tcg_temp_new_i64();
2415 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2416 gen_load_fpr_D(dc, rs),
2417 gen_load_fpr_D(dc, rd));
2418 gen_store_fpr_D(dc, rd, dst);
2419 #else
2420 qemu_build_not_reached();
2421 #endif
2422 }
2423
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2424 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2425 {
2426 #ifdef TARGET_SPARC64
2427 TCGv c2 = tcg_constant_tl(cmp->c2);
2428 TCGv_i64 h = tcg_temp_new_i64();
2429 TCGv_i64 l = tcg_temp_new_i64();
2430
2431 tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2432 gen_load_fpr_D(dc, rs),
2433 gen_load_fpr_D(dc, rd));
2434 tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2435 gen_load_fpr_D(dc, rs + 2),
2436 gen_load_fpr_D(dc, rd + 2));
2437 gen_store_fpr_D(dc, rd, h);
2438 gen_store_fpr_D(dc, rd + 2, l);
2439 #else
2440 qemu_build_not_reached();
2441 #endif
2442 }
2443
2444 #ifdef TARGET_SPARC64
gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)2445 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2446 {
2447 TCGv_i32 r_tl = tcg_temp_new_i32();
2448
2449 /* load env->tl into r_tl */
2450 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2451
2452 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2453 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2454
2455 /* calculate offset to current trap state from env->ts, reuse r_tl */
2456 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2457 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2458
2459 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2460 {
2461 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2462 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2463 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2464 }
2465 }
2466 #endif
2467
extract_dfpreg(DisasContext * dc,int x)2468 static int extract_dfpreg(DisasContext *dc, int x)
2469 {
2470 int r = x & 0x1e;
2471 #ifdef TARGET_SPARC64
2472 r |= (x & 1) << 5;
2473 #endif
2474 return r;
2475 }
2476
extract_qfpreg(DisasContext * dc,int x)2477 static int extract_qfpreg(DisasContext *dc, int x)
2478 {
2479 int r = x & 0x1c;
2480 #ifdef TARGET_SPARC64
2481 r |= (x & 1) << 5;
2482 #endif
2483 return r;
2484 }
2485
2486 /* Include the auto-generated decoder. */
2487 #include "decode-insns.c.inc"
2488
2489 #define TRANS(NAME, AVAIL, FUNC, ...) \
2490 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2491 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2492
2493 #define avail_ALL(C) true
2494 #ifdef TARGET_SPARC64
2495 # define avail_32(C) false
2496 # define avail_ASR17(C) false
2497 # define avail_CASA(C) true
2498 # define avail_DIV(C) true
2499 # define avail_MUL(C) true
2500 # define avail_POWERDOWN(C) false
2501 # define avail_64(C) true
2502 # define avail_FMAF(C) ((C)->def->features & CPU_FEATURE_FMAF)
2503 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2504 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2505 # define avail_IMA(C) ((C)->def->features & CPU_FEATURE_IMA)
2506 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2507 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2508 # define avail_VIS3(C) ((C)->def->features & CPU_FEATURE_VIS3)
2509 # define avail_VIS3B(C) avail_VIS3(C)
2510 # define avail_VIS4(C) ((C)->def->features & CPU_FEATURE_VIS4)
2511 #else
2512 # define avail_32(C) true
2513 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2514 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2515 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2516 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2517 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2518 # define avail_64(C) false
2519 # define avail_FMAF(C) false
2520 # define avail_GL(C) false
2521 # define avail_HYPV(C) false
2522 # define avail_IMA(C) false
2523 # define avail_VIS1(C) false
2524 # define avail_VIS2(C) false
2525 # define avail_VIS3(C) false
2526 # define avail_VIS3B(C) false
2527 # define avail_VIS4(C) false
2528 #endif
2529
2530 /* Default case for non jump instructions. */
advance_pc(DisasContext * dc)2531 static bool advance_pc(DisasContext *dc)
2532 {
2533 TCGLabel *l1;
2534
2535 finishing_insn(dc);
2536
2537 if (dc->npc & 3) {
2538 switch (dc->npc) {
2539 case DYNAMIC_PC:
2540 case DYNAMIC_PC_LOOKUP:
2541 dc->pc = dc->npc;
2542 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2543 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2544 break;
2545
2546 case JUMP_PC:
2547 /* we can do a static jump */
2548 l1 = gen_new_label();
2549 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2550
2551 /* jump not taken */
2552 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2553
2554 /* jump taken */
2555 gen_set_label(l1);
2556 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2557
2558 dc->base.is_jmp = DISAS_NORETURN;
2559 break;
2560
2561 default:
2562 g_assert_not_reached();
2563 }
2564 } else {
2565 dc->pc = dc->npc;
2566 dc->npc = dc->npc + 4;
2567 }
2568 return true;
2569 }
2570
2571 /*
2572 * Major opcodes 00 and 01 -- branches, call, and sethi
2573 */
2574
advance_jump_cond(DisasContext * dc,DisasCompare * cmp,bool annul,int disp)2575 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2576 bool annul, int disp)
2577 {
2578 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2579 target_ulong npc;
2580
2581 finishing_insn(dc);
2582
2583 if (cmp->cond == TCG_COND_ALWAYS) {
2584 if (annul) {
2585 dc->pc = dest;
2586 dc->npc = dest + 4;
2587 } else {
2588 gen_mov_pc_npc(dc);
2589 dc->npc = dest;
2590 }
2591 return true;
2592 }
2593
2594 if (cmp->cond == TCG_COND_NEVER) {
2595 npc = dc->npc;
2596 if (npc & 3) {
2597 gen_mov_pc_npc(dc);
2598 if (annul) {
2599 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2600 }
2601 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2602 } else {
2603 dc->pc = npc + (annul ? 4 : 0);
2604 dc->npc = dc->pc + 4;
2605 }
2606 return true;
2607 }
2608
2609 flush_cond(dc);
2610 npc = dc->npc;
2611
2612 if (annul) {
2613 TCGLabel *l1 = gen_new_label();
2614
2615 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2616 gen_goto_tb(dc, 0, npc, dest);
2617 gen_set_label(l1);
2618 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2619
2620 dc->base.is_jmp = DISAS_NORETURN;
2621 } else {
2622 if (npc & 3) {
2623 switch (npc) {
2624 case DYNAMIC_PC:
2625 case DYNAMIC_PC_LOOKUP:
2626 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2627 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2628 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2629 cmp->c1, tcg_constant_tl(cmp->c2),
2630 tcg_constant_tl(dest), cpu_npc);
2631 dc->pc = npc;
2632 break;
2633 default:
2634 g_assert_not_reached();
2635 }
2636 } else {
2637 dc->pc = npc;
2638 dc->npc = JUMP_PC;
2639 dc->jump = *cmp;
2640 dc->jump_pc[0] = dest;
2641 dc->jump_pc[1] = npc + 4;
2642
2643 /* The condition for cpu_cond is always NE -- normalize. */
2644 if (cmp->cond == TCG_COND_NE) {
2645 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2646 } else {
2647 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2648 }
2649 dc->cpu_cond_live = true;
2650 }
2651 }
2652 return true;
2653 }
2654
raise_priv(DisasContext * dc)2655 static bool raise_priv(DisasContext *dc)
2656 {
2657 gen_exception(dc, TT_PRIV_INSN);
2658 return true;
2659 }
2660
raise_unimpfpop(DisasContext * dc)2661 static bool raise_unimpfpop(DisasContext *dc)
2662 {
2663 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2664 return true;
2665 }
2666
gen_trap_float128(DisasContext * dc)2667 static bool gen_trap_float128(DisasContext *dc)
2668 {
2669 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2670 return false;
2671 }
2672 return raise_unimpfpop(dc);
2673 }
2674
do_bpcc(DisasContext * dc,arg_bcc * a)2675 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2676 {
2677 DisasCompare cmp;
2678
2679 gen_compare(&cmp, a->cc, a->cond, dc);
2680 return advance_jump_cond(dc, &cmp, a->a, a->i);
2681 }
2682
TRANS(Bicc,ALL,do_bpcc,a)2683 TRANS(Bicc, ALL, do_bpcc, a)
2684 TRANS(BPcc, 64, do_bpcc, a)
2685
2686 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2687 {
2688 DisasCompare cmp;
2689
2690 if (gen_trap_if_nofpu_fpexception(dc)) {
2691 return true;
2692 }
2693 gen_fcompare(&cmp, a->cc, a->cond);
2694 return advance_jump_cond(dc, &cmp, a->a, a->i);
2695 }
2696
2697 TRANS(FBPfcc, 64, do_fbpfcc, a)
TRANS(FBfcc,ALL,do_fbpfcc,a)2698 TRANS(FBfcc, ALL, do_fbpfcc, a)
2699
2700 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2701 {
2702 DisasCompare cmp;
2703
2704 if (!avail_64(dc)) {
2705 return false;
2706 }
2707 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2708 return false;
2709 }
2710 return advance_jump_cond(dc, &cmp, a->a, a->i);
2711 }
2712
trans_CALL(DisasContext * dc,arg_CALL * a)2713 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2714 {
2715 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2716
2717 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2718 gen_mov_pc_npc(dc);
2719 dc->npc = target;
2720 return true;
2721 }
2722
trans_NCP(DisasContext * dc,arg_NCP * a)2723 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2724 {
2725 /*
2726 * For sparc32, always generate the no-coprocessor exception.
2727 * For sparc64, always generate illegal instruction.
2728 */
2729 #ifdef TARGET_SPARC64
2730 return false;
2731 #else
2732 gen_exception(dc, TT_NCP_INSN);
2733 return true;
2734 #endif
2735 }
2736
trans_SETHI(DisasContext * dc,arg_SETHI * a)2737 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2738 {
2739 /* Special-case %g0 because that's the canonical nop. */
2740 if (a->rd) {
2741 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2742 }
2743 return advance_pc(dc);
2744 }
2745
2746 /*
2747 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2748 */
2749
do_tcc(DisasContext * dc,int cond,int cc,int rs1,bool imm,int rs2_or_imm)2750 static bool do_tcc(DisasContext *dc, int cond, int cc,
2751 int rs1, bool imm, int rs2_or_imm)
2752 {
2753 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2754 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2755 DisasCompare cmp;
2756 TCGLabel *lab;
2757 TCGv_i32 trap;
2758
2759 /* Trap never. */
2760 if (cond == 0) {
2761 return advance_pc(dc);
2762 }
2763
2764 /*
2765 * Immediate traps are the most common case. Since this value is
2766 * live across the branch, it really pays to evaluate the constant.
2767 */
2768 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2769 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2770 } else {
2771 trap = tcg_temp_new_i32();
2772 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2773 if (imm) {
2774 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2775 } else {
2776 TCGv_i32 t2 = tcg_temp_new_i32();
2777 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2778 tcg_gen_add_i32(trap, trap, t2);
2779 }
2780 tcg_gen_andi_i32(trap, trap, mask);
2781 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2782 }
2783
2784 finishing_insn(dc);
2785
2786 /* Trap always. */
2787 if (cond == 8) {
2788 save_state(dc);
2789 gen_helper_raise_exception(tcg_env, trap);
2790 dc->base.is_jmp = DISAS_NORETURN;
2791 return true;
2792 }
2793
2794 /* Conditional trap. */
2795 flush_cond(dc);
2796 lab = delay_exceptionv(dc, trap);
2797 gen_compare(&cmp, cc, cond, dc);
2798 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2799
2800 return advance_pc(dc);
2801 }
2802
trans_Tcc_r(DisasContext * dc,arg_Tcc_r * a)2803 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2804 {
2805 if (avail_32(dc) && a->cc) {
2806 return false;
2807 }
2808 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2809 }
2810
trans_Tcc_i_v7(DisasContext * dc,arg_Tcc_i_v7 * a)2811 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2812 {
2813 if (avail_64(dc)) {
2814 return false;
2815 }
2816 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2817 }
2818
trans_Tcc_i_v9(DisasContext * dc,arg_Tcc_i_v9 * a)2819 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2820 {
2821 if (avail_32(dc)) {
2822 return false;
2823 }
2824 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2825 }
2826
trans_STBAR(DisasContext * dc,arg_STBAR * a)2827 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2828 {
2829 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2830 return advance_pc(dc);
2831 }
2832
trans_MEMBAR(DisasContext * dc,arg_MEMBAR * a)2833 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2834 {
2835 if (avail_32(dc)) {
2836 return false;
2837 }
2838 if (a->mmask) {
2839 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2840 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2841 }
2842 if (a->cmask) {
2843 /* For #Sync, etc, end the TB to recognize interrupts. */
2844 dc->base.is_jmp = DISAS_EXIT;
2845 }
2846 return advance_pc(dc);
2847 }
2848
do_rd_special(DisasContext * dc,bool priv,int rd,TCGv (* func)(DisasContext *,TCGv))2849 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2850 TCGv (*func)(DisasContext *, TCGv))
2851 {
2852 if (!priv) {
2853 return raise_priv(dc);
2854 }
2855 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2856 return advance_pc(dc);
2857 }
2858
do_rdy(DisasContext * dc,TCGv dst)2859 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2860 {
2861 return cpu_y;
2862 }
2863
trans_RDY(DisasContext * dc,arg_RDY * a)2864 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2865 {
2866 /*
2867 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2868 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2869 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2870 */
2871 if (avail_64(dc) && a->rs1 != 0) {
2872 return false;
2873 }
2874 return do_rd_special(dc, true, a->rd, do_rdy);
2875 }
2876
do_rd_leon3_config(DisasContext * dc,TCGv dst)2877 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2878 {
2879 gen_helper_rdasr17(dst, tcg_env);
2880 return dst;
2881 }
2882
2883 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2884
do_rdpic(DisasContext * dc,TCGv dst)2885 static TCGv do_rdpic(DisasContext *dc, TCGv dst)
2886 {
2887 return tcg_constant_tl(0);
2888 }
2889
2890 TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
2891
2892
do_rdccr(DisasContext * dc,TCGv dst)2893 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2894 {
2895 gen_helper_rdccr(dst, tcg_env);
2896 return dst;
2897 }
2898
2899 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2900
do_rdasi(DisasContext * dc,TCGv dst)2901 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2902 {
2903 #ifdef TARGET_SPARC64
2904 return tcg_constant_tl(dc->asi);
2905 #else
2906 qemu_build_not_reached();
2907 #endif
2908 }
2909
2910 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2911
do_rdtick(DisasContext * dc,TCGv dst)2912 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2913 {
2914 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2915
2916 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2917 if (translator_io_start(&dc->base)) {
2918 dc->base.is_jmp = DISAS_EXIT;
2919 }
2920 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2921 tcg_constant_i32(dc->mem_idx));
2922 return dst;
2923 }
2924
2925 /* TODO: non-priv access only allowed when enabled. */
2926 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2927
do_rdpc(DisasContext * dc,TCGv dst)2928 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2929 {
2930 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2931 }
2932
2933 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2934
do_rdfprs(DisasContext * dc,TCGv dst)2935 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2936 {
2937 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2938 return dst;
2939 }
2940
2941 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2942
do_rdgsr(DisasContext * dc,TCGv dst)2943 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2944 {
2945 gen_trap_ifnofpu(dc);
2946 return cpu_gsr;
2947 }
2948
2949 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2950
do_rdsoftint(DisasContext * dc,TCGv dst)2951 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2952 {
2953 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2954 return dst;
2955 }
2956
2957 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2958
do_rdtick_cmpr(DisasContext * dc,TCGv dst)2959 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2960 {
2961 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2962 return dst;
2963 }
2964
2965 /* TODO: non-priv access only allowed when enabled. */
2966 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2967
do_rdstick(DisasContext * dc,TCGv dst)2968 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2969 {
2970 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2971
2972 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2973 if (translator_io_start(&dc->base)) {
2974 dc->base.is_jmp = DISAS_EXIT;
2975 }
2976 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2977 tcg_constant_i32(dc->mem_idx));
2978 return dst;
2979 }
2980
2981 /* TODO: non-priv access only allowed when enabled. */
2982 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2983
do_rdstick_cmpr(DisasContext * dc,TCGv dst)2984 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2985 {
2986 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2987 return dst;
2988 }
2989
2990 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2991 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2992
2993 /*
2994 * UltraSPARC-T1 Strand status.
2995 * HYPV check maybe not enough, UA2005 & UA2007 describe
2996 * this ASR as impl. dep
2997 */
do_rdstrand_status(DisasContext * dc,TCGv dst)2998 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2999 {
3000 return tcg_constant_tl(1);
3001 }
3002
3003 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3004
do_rdpsr(DisasContext * dc,TCGv dst)3005 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3006 {
3007 gen_helper_rdpsr(dst, tcg_env);
3008 return dst;
3009 }
3010
3011 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3012
do_rdhpstate(DisasContext * dc,TCGv dst)3013 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3014 {
3015 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3016 return dst;
3017 }
3018
3019 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3020
do_rdhtstate(DisasContext * dc,TCGv dst)3021 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3022 {
3023 TCGv_i32 tl = tcg_temp_new_i32();
3024 TCGv_ptr tp = tcg_temp_new_ptr();
3025
3026 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3027 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3028 tcg_gen_shli_i32(tl, tl, 3);
3029 tcg_gen_ext_i32_ptr(tp, tl);
3030 tcg_gen_add_ptr(tp, tp, tcg_env);
3031
3032 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3033 return dst;
3034 }
3035
3036 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3037
do_rdhintp(DisasContext * dc,TCGv dst)3038 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3039 {
3040 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3041 return dst;
3042 }
3043
3044 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3045
do_rdhtba(DisasContext * dc,TCGv dst)3046 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3047 {
3048 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3049 return dst;
3050 }
3051
3052 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3053
do_rdhver(DisasContext * dc,TCGv dst)3054 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3055 {
3056 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3057 return dst;
3058 }
3059
3060 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3061
do_rdhstick_cmpr(DisasContext * dc,TCGv dst)3062 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3063 {
3064 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3065 return dst;
3066 }
3067
3068 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3069 do_rdhstick_cmpr)
3070
do_rdwim(DisasContext * dc,TCGv dst)3071 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3072 {
3073 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3074 return dst;
3075 }
3076
3077 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3078
do_rdtpc(DisasContext * dc,TCGv dst)3079 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3080 {
3081 #ifdef TARGET_SPARC64
3082 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3083
3084 gen_load_trap_state_at_tl(r_tsptr);
3085 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3086 return dst;
3087 #else
3088 qemu_build_not_reached();
3089 #endif
3090 }
3091
3092 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3093
do_rdtnpc(DisasContext * dc,TCGv dst)3094 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3095 {
3096 #ifdef TARGET_SPARC64
3097 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3098
3099 gen_load_trap_state_at_tl(r_tsptr);
3100 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3101 return dst;
3102 #else
3103 qemu_build_not_reached();
3104 #endif
3105 }
3106
3107 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3108
do_rdtstate(DisasContext * dc,TCGv dst)3109 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3110 {
3111 #ifdef TARGET_SPARC64
3112 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3113
3114 gen_load_trap_state_at_tl(r_tsptr);
3115 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3116 return dst;
3117 #else
3118 qemu_build_not_reached();
3119 #endif
3120 }
3121
3122 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3123
do_rdtt(DisasContext * dc,TCGv dst)3124 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3125 {
3126 #ifdef TARGET_SPARC64
3127 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3128
3129 gen_load_trap_state_at_tl(r_tsptr);
3130 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3131 return dst;
3132 #else
3133 qemu_build_not_reached();
3134 #endif
3135 }
3136
3137 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3138 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3139
do_rdtba(DisasContext * dc,TCGv dst)3140 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3141 {
3142 return cpu_tbr;
3143 }
3144
3145 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3146 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3147
do_rdpstate(DisasContext * dc,TCGv dst)3148 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3149 {
3150 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3151 return dst;
3152 }
3153
3154 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3155
do_rdtl(DisasContext * dc,TCGv dst)3156 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3157 {
3158 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3159 return dst;
3160 }
3161
3162 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3163
do_rdpil(DisasContext * dc,TCGv dst)3164 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3165 {
3166 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3167 return dst;
3168 }
3169
3170 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3171
do_rdcwp(DisasContext * dc,TCGv dst)3172 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3173 {
3174 gen_helper_rdcwp(dst, tcg_env);
3175 return dst;
3176 }
3177
3178 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3179
do_rdcansave(DisasContext * dc,TCGv dst)3180 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3181 {
3182 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3183 return dst;
3184 }
3185
3186 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3187
do_rdcanrestore(DisasContext * dc,TCGv dst)3188 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3189 {
3190 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3191 return dst;
3192 }
3193
3194 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3195 do_rdcanrestore)
3196
do_rdcleanwin(DisasContext * dc,TCGv dst)3197 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3198 {
3199 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3200 return dst;
3201 }
3202
3203 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3204
do_rdotherwin(DisasContext * dc,TCGv dst)3205 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3206 {
3207 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3208 return dst;
3209 }
3210
3211 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3212
do_rdwstate(DisasContext * dc,TCGv dst)3213 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3214 {
3215 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3216 return dst;
3217 }
3218
3219 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3220
do_rdgl(DisasContext * dc,TCGv dst)3221 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3222 {
3223 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3224 return dst;
3225 }
3226
3227 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3228
3229 /* UA2005 strand status */
do_rdssr(DisasContext * dc,TCGv dst)3230 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3231 {
3232 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3233 return dst;
3234 }
3235
3236 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3237
do_rdver(DisasContext * dc,TCGv dst)3238 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3239 {
3240 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3241 return dst;
3242 }
3243
3244 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3245
trans_FLUSHW(DisasContext * dc,arg_FLUSHW * a)3246 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3247 {
3248 if (avail_64(dc)) {
3249 gen_helper_flushw(tcg_env);
3250 return advance_pc(dc);
3251 }
3252 return false;
3253 }
3254
do_wr_special(DisasContext * dc,arg_r_r_ri * a,bool priv,void (* func)(DisasContext *,TCGv))3255 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3256 void (*func)(DisasContext *, TCGv))
3257 {
3258 TCGv src;
3259
3260 /* For simplicity, we under-decoded the rs2 form. */
3261 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3262 return false;
3263 }
3264 if (!priv) {
3265 return raise_priv(dc);
3266 }
3267
3268 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3269 src = tcg_constant_tl(a->rs2_or_imm);
3270 } else {
3271 TCGv src1 = gen_load_gpr(dc, a->rs1);
3272 if (a->rs2_or_imm == 0) {
3273 src = src1;
3274 } else {
3275 src = tcg_temp_new();
3276 if (a->imm) {
3277 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3278 } else {
3279 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3280 }
3281 }
3282 }
3283 func(dc, src);
3284 return advance_pc(dc);
3285 }
3286
do_wry(DisasContext * dc,TCGv src)3287 static void do_wry(DisasContext *dc, TCGv src)
3288 {
3289 tcg_gen_ext32u_tl(cpu_y, src);
3290 }
3291
TRANS(WRY,ALL,do_wr_special,a,true,do_wry)3292 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3293
3294 static void do_wrccr(DisasContext *dc, TCGv src)
3295 {
3296 gen_helper_wrccr(tcg_env, src);
3297 }
3298
3299 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3300
do_wrasi(DisasContext * dc,TCGv src)3301 static void do_wrasi(DisasContext *dc, TCGv src)
3302 {
3303 TCGv tmp = tcg_temp_new();
3304
3305 tcg_gen_ext8u_tl(tmp, src);
3306 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3307 /* End TB to notice changed ASI. */
3308 dc->base.is_jmp = DISAS_EXIT;
3309 }
3310
3311 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3312
do_wrfprs(DisasContext * dc,TCGv src)3313 static void do_wrfprs(DisasContext *dc, TCGv src)
3314 {
3315 #ifdef TARGET_SPARC64
3316 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3317 dc->fprs_dirty = 0;
3318 dc->base.is_jmp = DISAS_EXIT;
3319 #else
3320 qemu_build_not_reached();
3321 #endif
3322 }
3323
3324 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3325
do_priv_nop(DisasContext * dc,bool priv)3326 static bool do_priv_nop(DisasContext *dc, bool priv)
3327 {
3328 if (!priv) {
3329 return raise_priv(dc);
3330 }
3331 return advance_pc(dc);
3332 }
3333
TRANS(WRPCR,HYPV,do_priv_nop,supervisor (dc))3334 TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
3335 TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
3336
3337 static void do_wrgsr(DisasContext *dc, TCGv src)
3338 {
3339 gen_trap_ifnofpu(dc);
3340 tcg_gen_mov_tl(cpu_gsr, src);
3341 }
3342
3343 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3344
do_wrsoftint_set(DisasContext * dc,TCGv src)3345 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3346 {
3347 gen_helper_set_softint(tcg_env, src);
3348 }
3349
3350 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3351
do_wrsoftint_clr(DisasContext * dc,TCGv src)3352 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3353 {
3354 gen_helper_clear_softint(tcg_env, src);
3355 }
3356
3357 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3358
do_wrsoftint(DisasContext * dc,TCGv src)3359 static void do_wrsoftint(DisasContext *dc, TCGv src)
3360 {
3361 gen_helper_write_softint(tcg_env, src);
3362 }
3363
3364 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3365
do_wrtick_cmpr(DisasContext * dc,TCGv src)3366 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3367 {
3368 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3369
3370 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3371 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3372 translator_io_start(&dc->base);
3373 gen_helper_tick_set_limit(r_tickptr, src);
3374 /* End TB to handle timer interrupt */
3375 dc->base.is_jmp = DISAS_EXIT;
3376 }
3377
3378 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3379
do_wrstick(DisasContext * dc,TCGv src)3380 static void do_wrstick(DisasContext *dc, TCGv src)
3381 {
3382 #ifdef TARGET_SPARC64
3383 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3384
3385 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3386 translator_io_start(&dc->base);
3387 gen_helper_tick_set_count(r_tickptr, src);
3388 /* End TB to handle timer interrupt */
3389 dc->base.is_jmp = DISAS_EXIT;
3390 #else
3391 qemu_build_not_reached();
3392 #endif
3393 }
3394
3395 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3396
do_wrstick_cmpr(DisasContext * dc,TCGv src)3397 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3398 {
3399 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3400
3401 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3402 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3403 translator_io_start(&dc->base);
3404 gen_helper_tick_set_limit(r_tickptr, src);
3405 /* End TB to handle timer interrupt */
3406 dc->base.is_jmp = DISAS_EXIT;
3407 }
3408
3409 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3410
do_wrpowerdown(DisasContext * dc,TCGv src)3411 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3412 {
3413 finishing_insn(dc);
3414 save_state(dc);
3415 gen_helper_power_down(tcg_env);
3416 }
3417
TRANS(WRPOWERDOWN,POWERDOWN,do_wr_special,a,supervisor (dc),do_wrpowerdown)3418 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3419
3420 static void do_wrmwait(DisasContext *dc, TCGv src)
3421 {
3422 /*
3423 * TODO: This is a stub version of mwait, which merely recognizes
3424 * interrupts immediately and does not wait.
3425 */
3426 dc->base.is_jmp = DISAS_EXIT;
3427 }
3428
TRANS(WRMWAIT,VIS4,do_wr_special,a,true,do_wrmwait)3429 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3430
3431 static void do_wrpsr(DisasContext *dc, TCGv src)
3432 {
3433 gen_helper_wrpsr(tcg_env, src);
3434 dc->base.is_jmp = DISAS_EXIT;
3435 }
3436
3437 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3438
do_wrwim(DisasContext * dc,TCGv src)3439 static void do_wrwim(DisasContext *dc, TCGv src)
3440 {
3441 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3442 TCGv tmp = tcg_temp_new();
3443
3444 tcg_gen_andi_tl(tmp, src, mask);
3445 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3446 }
3447
3448 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3449
do_wrtpc(DisasContext * dc,TCGv src)3450 static void do_wrtpc(DisasContext *dc, TCGv src)
3451 {
3452 #ifdef TARGET_SPARC64
3453 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3454
3455 gen_load_trap_state_at_tl(r_tsptr);
3456 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3457 #else
3458 qemu_build_not_reached();
3459 #endif
3460 }
3461
3462 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3463
do_wrtnpc(DisasContext * dc,TCGv src)3464 static void do_wrtnpc(DisasContext *dc, TCGv src)
3465 {
3466 #ifdef TARGET_SPARC64
3467 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3468
3469 gen_load_trap_state_at_tl(r_tsptr);
3470 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3471 #else
3472 qemu_build_not_reached();
3473 #endif
3474 }
3475
3476 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3477
do_wrtstate(DisasContext * dc,TCGv src)3478 static void do_wrtstate(DisasContext *dc, TCGv src)
3479 {
3480 #ifdef TARGET_SPARC64
3481 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3482
3483 gen_load_trap_state_at_tl(r_tsptr);
3484 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3485 #else
3486 qemu_build_not_reached();
3487 #endif
3488 }
3489
3490 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3491
do_wrtt(DisasContext * dc,TCGv src)3492 static void do_wrtt(DisasContext *dc, TCGv src)
3493 {
3494 #ifdef TARGET_SPARC64
3495 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3496
3497 gen_load_trap_state_at_tl(r_tsptr);
3498 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3499 #else
3500 qemu_build_not_reached();
3501 #endif
3502 }
3503
3504 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3505
do_wrtick(DisasContext * dc,TCGv src)3506 static void do_wrtick(DisasContext *dc, TCGv src)
3507 {
3508 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3509
3510 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3511 translator_io_start(&dc->base);
3512 gen_helper_tick_set_count(r_tickptr, src);
3513 /* End TB to handle timer interrupt */
3514 dc->base.is_jmp = DISAS_EXIT;
3515 }
3516
3517 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3518
do_wrtba(DisasContext * dc,TCGv src)3519 static void do_wrtba(DisasContext *dc, TCGv src)
3520 {
3521 tcg_gen_mov_tl(cpu_tbr, src);
3522 }
3523
3524 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3525
do_wrpstate(DisasContext * dc,TCGv src)3526 static void do_wrpstate(DisasContext *dc, TCGv src)
3527 {
3528 save_state(dc);
3529 if (translator_io_start(&dc->base)) {
3530 dc->base.is_jmp = DISAS_EXIT;
3531 }
3532 gen_helper_wrpstate(tcg_env, src);
3533 dc->npc = DYNAMIC_PC;
3534 }
3535
3536 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3537
do_wrtl(DisasContext * dc,TCGv src)3538 static void do_wrtl(DisasContext *dc, TCGv src)
3539 {
3540 save_state(dc);
3541 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3542 dc->npc = DYNAMIC_PC;
3543 }
3544
3545 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3546
do_wrpil(DisasContext * dc,TCGv src)3547 static void do_wrpil(DisasContext *dc, TCGv src)
3548 {
3549 if (translator_io_start(&dc->base)) {
3550 dc->base.is_jmp = DISAS_EXIT;
3551 }
3552 gen_helper_wrpil(tcg_env, src);
3553 }
3554
3555 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3556
do_wrcwp(DisasContext * dc,TCGv src)3557 static void do_wrcwp(DisasContext *dc, TCGv src)
3558 {
3559 gen_helper_wrcwp(tcg_env, src);
3560 }
3561
3562 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3563
do_wrcansave(DisasContext * dc,TCGv src)3564 static void do_wrcansave(DisasContext *dc, TCGv src)
3565 {
3566 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3567 }
3568
3569 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3570
do_wrcanrestore(DisasContext * dc,TCGv src)3571 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3572 {
3573 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3574 }
3575
3576 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3577
do_wrcleanwin(DisasContext * dc,TCGv src)3578 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3579 {
3580 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3581 }
3582
3583 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3584
do_wrotherwin(DisasContext * dc,TCGv src)3585 static void do_wrotherwin(DisasContext *dc, TCGv src)
3586 {
3587 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3588 }
3589
3590 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3591
do_wrwstate(DisasContext * dc,TCGv src)3592 static void do_wrwstate(DisasContext *dc, TCGv src)
3593 {
3594 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3595 }
3596
3597 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3598
do_wrgl(DisasContext * dc,TCGv src)3599 static void do_wrgl(DisasContext *dc, TCGv src)
3600 {
3601 gen_helper_wrgl(tcg_env, src);
3602 }
3603
TRANS(WRPR_gl,GL,do_wr_special,a,supervisor (dc),do_wrgl)3604 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3605
3606 /* UA2005 strand status */
3607 static void do_wrssr(DisasContext *dc, TCGv src)
3608 {
3609 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3610 }
3611
TRANS(WRPR_strand_status,HYPV,do_wr_special,a,hypervisor (dc),do_wrssr)3612 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3613
3614 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3615
3616 static void do_wrhpstate(DisasContext *dc, TCGv src)
3617 {
3618 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3619 dc->base.is_jmp = DISAS_EXIT;
3620 }
3621
TRANS(WRHPR_hpstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhpstate)3622 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3623
3624 static void do_wrhtstate(DisasContext *dc, TCGv src)
3625 {
3626 TCGv_i32 tl = tcg_temp_new_i32();
3627 TCGv_ptr tp = tcg_temp_new_ptr();
3628
3629 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3630 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3631 tcg_gen_shli_i32(tl, tl, 3);
3632 tcg_gen_ext_i32_ptr(tp, tl);
3633 tcg_gen_add_ptr(tp, tp, tcg_env);
3634
3635 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3636 }
3637
TRANS(WRHPR_htstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtstate)3638 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3639
3640 static void do_wrhintp(DisasContext *dc, TCGv src)
3641 {
3642 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3643 }
3644
TRANS(WRHPR_hintp,HYPV,do_wr_special,a,hypervisor (dc),do_wrhintp)3645 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3646
3647 static void do_wrhtba(DisasContext *dc, TCGv src)
3648 {
3649 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3650 }
3651
TRANS(WRHPR_htba,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtba)3652 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3653
3654 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3655 {
3656 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3657
3658 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3659 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3660 translator_io_start(&dc->base);
3661 gen_helper_tick_set_limit(r_tickptr, src);
3662 /* End TB to handle timer interrupt */
3663 dc->base.is_jmp = DISAS_EXIT;
3664 }
3665
TRANS(WRHPR_hstick_cmpr,HYPV,do_wr_special,a,hypervisor (dc),do_wrhstick_cmpr)3666 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3667 do_wrhstick_cmpr)
3668
3669 static bool do_saved_restored(DisasContext *dc, bool saved)
3670 {
3671 if (!supervisor(dc)) {
3672 return raise_priv(dc);
3673 }
3674 if (saved) {
3675 gen_helper_saved(tcg_env);
3676 } else {
3677 gen_helper_restored(tcg_env);
3678 }
3679 return advance_pc(dc);
3680 }
3681
3682 TRANS(SAVED, 64, do_saved_restored, true)
3683 TRANS(RESTORED, 64, do_saved_restored, false)
3684
trans_NOP(DisasContext * dc,arg_NOP * a)3685 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3686 {
3687 return advance_pc(dc);
3688 }
3689
3690 /*
3691 * TODO: Need a feature bit for sparcv8.
3692 * In the meantime, treat all 32-bit cpus like sparcv7.
3693 */
3694 TRANS(NOP_v7, 32, trans_NOP, a)
3695 TRANS(NOP_v9, 64, trans_NOP, a)
3696
do_arith_int(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),bool logic_cc)3697 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3698 void (*func)(TCGv, TCGv, TCGv),
3699 void (*funci)(TCGv, TCGv, target_long),
3700 bool logic_cc)
3701 {
3702 TCGv dst, src1;
3703
3704 /* For simplicity, we under-decoded the rs2 form. */
3705 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3706 return false;
3707 }
3708
3709 if (logic_cc) {
3710 dst = cpu_cc_N;
3711 } else {
3712 dst = gen_dest_gpr(dc, a->rd);
3713 }
3714 src1 = gen_load_gpr(dc, a->rs1);
3715
3716 if (a->imm || a->rs2_or_imm == 0) {
3717 if (funci) {
3718 funci(dst, src1, a->rs2_or_imm);
3719 } else {
3720 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3721 }
3722 } else {
3723 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3724 }
3725
3726 if (logic_cc) {
3727 if (TARGET_LONG_BITS == 64) {
3728 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3729 tcg_gen_movi_tl(cpu_icc_C, 0);
3730 }
3731 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3732 tcg_gen_movi_tl(cpu_cc_C, 0);
3733 tcg_gen_movi_tl(cpu_cc_V, 0);
3734 }
3735
3736 gen_store_gpr(dc, a->rd, dst);
3737 return advance_pc(dc);
3738 }
3739
do_arith(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),void (* func_cc)(TCGv,TCGv,TCGv))3740 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3741 void (*func)(TCGv, TCGv, TCGv),
3742 void (*funci)(TCGv, TCGv, target_long),
3743 void (*func_cc)(TCGv, TCGv, TCGv))
3744 {
3745 if (a->cc) {
3746 return do_arith_int(dc, a, func_cc, NULL, false);
3747 }
3748 return do_arith_int(dc, a, func, funci, false);
3749 }
3750
do_logic(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long))3751 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3752 void (*func)(TCGv, TCGv, TCGv),
3753 void (*funci)(TCGv, TCGv, target_long))
3754 {
3755 return do_arith_int(dc, a, func, funci, a->cc);
3756 }
3757
TRANS(ADD,ALL,do_arith,a,tcg_gen_add_tl,tcg_gen_addi_tl,gen_op_addcc)3758 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3759 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3760 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3761 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3762
3763 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3764 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3765 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3766 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3767
3768 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3769 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3770 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3771 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3772 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3773
3774 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3775 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3776 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3777 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3778
3779 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3780 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3781
3782 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3783 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3784
3785 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3786 {
3787 /* OR with %g0 is the canonical alias for MOV. */
3788 if (!a->cc && a->rs1 == 0) {
3789 if (a->imm || a->rs2_or_imm == 0) {
3790 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3791 } else if (a->rs2_or_imm & ~0x1f) {
3792 /* For simplicity, we under-decoded the rs2 form. */
3793 return false;
3794 } else {
3795 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3796 }
3797 return advance_pc(dc);
3798 }
3799 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3800 }
3801
trans_UDIV(DisasContext * dc,arg_r_r_ri * a)3802 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3803 {
3804 TCGv_i64 t1, t2;
3805 TCGv dst;
3806
3807 if (!avail_DIV(dc)) {
3808 return false;
3809 }
3810 /* For simplicity, we under-decoded the rs2 form. */
3811 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3812 return false;
3813 }
3814
3815 if (unlikely(a->rs2_or_imm == 0)) {
3816 gen_exception(dc, TT_DIV_ZERO);
3817 return true;
3818 }
3819
3820 if (a->imm) {
3821 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3822 } else {
3823 TCGLabel *lab;
3824 TCGv_i32 n2;
3825
3826 finishing_insn(dc);
3827 flush_cond(dc);
3828
3829 n2 = tcg_temp_new_i32();
3830 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3831
3832 lab = delay_exception(dc, TT_DIV_ZERO);
3833 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3834
3835 t2 = tcg_temp_new_i64();
3836 #ifdef TARGET_SPARC64
3837 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3838 #else
3839 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3840 #endif
3841 }
3842
3843 t1 = tcg_temp_new_i64();
3844 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3845
3846 tcg_gen_divu_i64(t1, t1, t2);
3847 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3848
3849 dst = gen_dest_gpr(dc, a->rd);
3850 tcg_gen_trunc_i64_tl(dst, t1);
3851 gen_store_gpr(dc, a->rd, dst);
3852 return advance_pc(dc);
3853 }
3854
trans_UDIVX(DisasContext * dc,arg_r_r_ri * a)3855 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3856 {
3857 TCGv dst, src1, src2;
3858
3859 if (!avail_64(dc)) {
3860 return false;
3861 }
3862 /* For simplicity, we under-decoded the rs2 form. */
3863 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3864 return false;
3865 }
3866
3867 if (unlikely(a->rs2_or_imm == 0)) {
3868 gen_exception(dc, TT_DIV_ZERO);
3869 return true;
3870 }
3871
3872 if (a->imm) {
3873 src2 = tcg_constant_tl(a->rs2_or_imm);
3874 } else {
3875 TCGLabel *lab;
3876
3877 finishing_insn(dc);
3878 flush_cond(dc);
3879
3880 lab = delay_exception(dc, TT_DIV_ZERO);
3881 src2 = cpu_regs[a->rs2_or_imm];
3882 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3883 }
3884
3885 dst = gen_dest_gpr(dc, a->rd);
3886 src1 = gen_load_gpr(dc, a->rs1);
3887
3888 tcg_gen_divu_tl(dst, src1, src2);
3889 gen_store_gpr(dc, a->rd, dst);
3890 return advance_pc(dc);
3891 }
3892
trans_SDIVX(DisasContext * dc,arg_r_r_ri * a)3893 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3894 {
3895 TCGv dst, src1, src2;
3896
3897 if (!avail_64(dc)) {
3898 return false;
3899 }
3900 /* For simplicity, we under-decoded the rs2 form. */
3901 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3902 return false;
3903 }
3904
3905 if (unlikely(a->rs2_or_imm == 0)) {
3906 gen_exception(dc, TT_DIV_ZERO);
3907 return true;
3908 }
3909
3910 dst = gen_dest_gpr(dc, a->rd);
3911 src1 = gen_load_gpr(dc, a->rs1);
3912
3913 if (a->imm) {
3914 if (unlikely(a->rs2_or_imm == -1)) {
3915 tcg_gen_neg_tl(dst, src1);
3916 gen_store_gpr(dc, a->rd, dst);
3917 return advance_pc(dc);
3918 }
3919 src2 = tcg_constant_tl(a->rs2_or_imm);
3920 } else {
3921 TCGLabel *lab;
3922 TCGv t1, t2;
3923
3924 finishing_insn(dc);
3925 flush_cond(dc);
3926
3927 lab = delay_exception(dc, TT_DIV_ZERO);
3928 src2 = cpu_regs[a->rs2_or_imm];
3929 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3930
3931 /*
3932 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3933 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3934 */
3935 t1 = tcg_temp_new();
3936 t2 = tcg_temp_new();
3937 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3938 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3939 tcg_gen_and_tl(t1, t1, t2);
3940 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3941 tcg_constant_tl(1), src2);
3942 src2 = t1;
3943 }
3944
3945 tcg_gen_div_tl(dst, src1, src2);
3946 gen_store_gpr(dc, a->rd, dst);
3947 return advance_pc(dc);
3948 }
3949
gen_edge(DisasContext * dc,arg_r_r_r * a,int width,bool cc,bool little_endian)3950 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3951 int width, bool cc, bool little_endian)
3952 {
3953 TCGv dst, s1, s2, l, r, t, m;
3954 uint64_t amask = address_mask_i(dc, -8);
3955
3956 dst = gen_dest_gpr(dc, a->rd);
3957 s1 = gen_load_gpr(dc, a->rs1);
3958 s2 = gen_load_gpr(dc, a->rs2);
3959
3960 if (cc) {
3961 gen_op_subcc(cpu_cc_N, s1, s2);
3962 }
3963
3964 l = tcg_temp_new();
3965 r = tcg_temp_new();
3966 t = tcg_temp_new();
3967
3968 switch (width) {
3969 case 8:
3970 tcg_gen_andi_tl(l, s1, 7);
3971 tcg_gen_andi_tl(r, s2, 7);
3972 tcg_gen_xori_tl(r, r, 7);
3973 m = tcg_constant_tl(0xff);
3974 break;
3975 case 16:
3976 tcg_gen_extract_tl(l, s1, 1, 2);
3977 tcg_gen_extract_tl(r, s2, 1, 2);
3978 tcg_gen_xori_tl(r, r, 3);
3979 m = tcg_constant_tl(0xf);
3980 break;
3981 case 32:
3982 tcg_gen_extract_tl(l, s1, 2, 1);
3983 tcg_gen_extract_tl(r, s2, 2, 1);
3984 tcg_gen_xori_tl(r, r, 1);
3985 m = tcg_constant_tl(0x3);
3986 break;
3987 default:
3988 abort();
3989 }
3990
3991 /* Compute Left Edge */
3992 if (little_endian) {
3993 tcg_gen_shl_tl(l, m, l);
3994 tcg_gen_and_tl(l, l, m);
3995 } else {
3996 tcg_gen_shr_tl(l, m, l);
3997 }
3998 /* Compute Right Edge */
3999 if (little_endian) {
4000 tcg_gen_shr_tl(r, m, r);
4001 } else {
4002 tcg_gen_shl_tl(r, m, r);
4003 tcg_gen_and_tl(r, r, m);
4004 }
4005
4006 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
4007 tcg_gen_xor_tl(t, s1, s2);
4008 tcg_gen_and_tl(r, r, l);
4009 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
4010
4011 gen_store_gpr(dc, a->rd, dst);
4012 return advance_pc(dc);
4013 }
4014
4015 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
4016 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
4017 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
4018 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4019 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4020 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4021
4022 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4023 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4024 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4025 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4026 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4027 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4028
do_rr(DisasContext * dc,arg_r_r * a,void (* func)(TCGv,TCGv))4029 static bool do_rr(DisasContext *dc, arg_r_r *a,
4030 void (*func)(TCGv, TCGv))
4031 {
4032 TCGv dst = gen_dest_gpr(dc, a->rd);
4033 TCGv src = gen_load_gpr(dc, a->rs);
4034
4035 func(dst, src);
4036 gen_store_gpr(dc, a->rd, dst);
4037 return advance_pc(dc);
4038 }
4039
TRANS(LZCNT,VIS3,do_rr,a,gen_op_lzcnt)4040 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4041
4042 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4043 void (*func)(TCGv, TCGv, TCGv))
4044 {
4045 TCGv dst = gen_dest_gpr(dc, a->rd);
4046 TCGv src1 = gen_load_gpr(dc, a->rs1);
4047 TCGv src2 = gen_load_gpr(dc, a->rs2);
4048
4049 func(dst, src1, src2);
4050 gen_store_gpr(dc, a->rd, dst);
4051 return advance_pc(dc);
4052 }
4053
TRANS(ARRAY8,VIS1,do_rrr,a,gen_helper_array8)4054 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4055 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4056 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4057
4058 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4059 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4060
4061 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4062 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4063
4064 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4065
4066 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4067 {
4068 #ifdef TARGET_SPARC64
4069 TCGv tmp = tcg_temp_new();
4070
4071 tcg_gen_add_tl(tmp, s1, s2);
4072 tcg_gen_andi_tl(dst, tmp, -8);
4073 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4074 #else
4075 g_assert_not_reached();
4076 #endif
4077 }
4078
gen_op_alignaddrl(TCGv dst,TCGv s1,TCGv s2)4079 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4080 {
4081 #ifdef TARGET_SPARC64
4082 TCGv tmp = tcg_temp_new();
4083
4084 tcg_gen_add_tl(tmp, s1, s2);
4085 tcg_gen_andi_tl(dst, tmp, -8);
4086 tcg_gen_neg_tl(tmp, tmp);
4087 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4088 #else
4089 g_assert_not_reached();
4090 #endif
4091 }
4092
TRANS(ALIGNADDR,VIS1,do_rrr,a,gen_op_alignaddr)4093 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4094 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4095
4096 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4097 {
4098 #ifdef TARGET_SPARC64
4099 tcg_gen_add_tl(dst, s1, s2);
4100 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4101 #else
4102 g_assert_not_reached();
4103 #endif
4104 }
4105
TRANS(BMASK,VIS2,do_rrr,a,gen_op_bmask)4106 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4107
4108 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4109 {
4110 func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4111 return true;
4112 }
4113
4114 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4115 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4116 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4117
do_shift_r(DisasContext * dc,arg_shiftr * a,bool l,bool u)4118 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4119 {
4120 TCGv dst, src1, src2;
4121
4122 /* Reject 64-bit shifts for sparc32. */
4123 if (avail_32(dc) && a->x) {
4124 return false;
4125 }
4126
4127 src2 = tcg_temp_new();
4128 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4129 src1 = gen_load_gpr(dc, a->rs1);
4130 dst = gen_dest_gpr(dc, a->rd);
4131
4132 if (l) {
4133 tcg_gen_shl_tl(dst, src1, src2);
4134 if (!a->x) {
4135 tcg_gen_ext32u_tl(dst, dst);
4136 }
4137 } else if (u) {
4138 if (!a->x) {
4139 tcg_gen_ext32u_tl(dst, src1);
4140 src1 = dst;
4141 }
4142 tcg_gen_shr_tl(dst, src1, src2);
4143 } else {
4144 if (!a->x) {
4145 tcg_gen_ext32s_tl(dst, src1);
4146 src1 = dst;
4147 }
4148 tcg_gen_sar_tl(dst, src1, src2);
4149 }
4150 gen_store_gpr(dc, a->rd, dst);
4151 return advance_pc(dc);
4152 }
4153
TRANS(SLL_r,ALL,do_shift_r,a,true,true)4154 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4155 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4156 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4157
4158 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4159 {
4160 TCGv dst, src1;
4161
4162 /* Reject 64-bit shifts for sparc32. */
4163 if (avail_32(dc) && (a->x || a->i >= 32)) {
4164 return false;
4165 }
4166
4167 src1 = gen_load_gpr(dc, a->rs1);
4168 dst = gen_dest_gpr(dc, a->rd);
4169
4170 if (avail_32(dc) || a->x) {
4171 if (l) {
4172 tcg_gen_shli_tl(dst, src1, a->i);
4173 } else if (u) {
4174 tcg_gen_shri_tl(dst, src1, a->i);
4175 } else {
4176 tcg_gen_sari_tl(dst, src1, a->i);
4177 }
4178 } else {
4179 if (l) {
4180 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4181 } else if (u) {
4182 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4183 } else {
4184 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4185 }
4186 }
4187 gen_store_gpr(dc, a->rd, dst);
4188 return advance_pc(dc);
4189 }
4190
TRANS(SLL_i,ALL,do_shift_i,a,true,true)4191 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4192 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4193 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4194
4195 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4196 {
4197 /* For simplicity, we under-decoded the rs2 form. */
4198 if (!imm && rs2_or_imm & ~0x1f) {
4199 return NULL;
4200 }
4201 if (imm || rs2_or_imm == 0) {
4202 return tcg_constant_tl(rs2_or_imm);
4203 } else {
4204 return cpu_regs[rs2_or_imm];
4205 }
4206 }
4207
do_mov_cond(DisasContext * dc,DisasCompare * cmp,int rd,TCGv src2)4208 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4209 {
4210 TCGv dst = gen_load_gpr(dc, rd);
4211 TCGv c2 = tcg_constant_tl(cmp->c2);
4212
4213 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4214 gen_store_gpr(dc, rd, dst);
4215 return advance_pc(dc);
4216 }
4217
trans_MOVcc(DisasContext * dc,arg_MOVcc * a)4218 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4219 {
4220 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4221 DisasCompare cmp;
4222
4223 if (src2 == NULL) {
4224 return false;
4225 }
4226 gen_compare(&cmp, a->cc, a->cond, dc);
4227 return do_mov_cond(dc, &cmp, a->rd, src2);
4228 }
4229
trans_MOVfcc(DisasContext * dc,arg_MOVfcc * a)4230 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4231 {
4232 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4233 DisasCompare cmp;
4234
4235 if (src2 == NULL) {
4236 return false;
4237 }
4238 gen_fcompare(&cmp, a->cc, a->cond);
4239 return do_mov_cond(dc, &cmp, a->rd, src2);
4240 }
4241
trans_MOVR(DisasContext * dc,arg_MOVR * a)4242 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4243 {
4244 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4245 DisasCompare cmp;
4246
4247 if (src2 == NULL) {
4248 return false;
4249 }
4250 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4251 return false;
4252 }
4253 return do_mov_cond(dc, &cmp, a->rd, src2);
4254 }
4255
do_add_special(DisasContext * dc,arg_r_r_ri * a,bool (* func)(DisasContext * dc,int rd,TCGv src))4256 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4257 bool (*func)(DisasContext *dc, int rd, TCGv src))
4258 {
4259 TCGv src1, sum;
4260
4261 /* For simplicity, we under-decoded the rs2 form. */
4262 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4263 return false;
4264 }
4265
4266 /*
4267 * Always load the sum into a new temporary.
4268 * This is required to capture the value across a window change,
4269 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4270 */
4271 sum = tcg_temp_new();
4272 src1 = gen_load_gpr(dc, a->rs1);
4273 if (a->imm || a->rs2_or_imm == 0) {
4274 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4275 } else {
4276 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4277 }
4278 return func(dc, a->rd, sum);
4279 }
4280
do_jmpl(DisasContext * dc,int rd,TCGv src)4281 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4282 {
4283 /*
4284 * Preserve pc across advance, so that we can delay
4285 * the writeback to rd until after src is consumed.
4286 */
4287 target_ulong cur_pc = dc->pc;
4288
4289 gen_check_align(dc, src, 3);
4290
4291 gen_mov_pc_npc(dc);
4292 tcg_gen_mov_tl(cpu_npc, src);
4293 gen_address_mask(dc, cpu_npc);
4294 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4295
4296 dc->npc = DYNAMIC_PC_LOOKUP;
4297 return true;
4298 }
4299
TRANS(JMPL,ALL,do_add_special,a,do_jmpl)4300 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4301
4302 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4303 {
4304 if (!supervisor(dc)) {
4305 return raise_priv(dc);
4306 }
4307
4308 gen_check_align(dc, src, 3);
4309
4310 gen_mov_pc_npc(dc);
4311 tcg_gen_mov_tl(cpu_npc, src);
4312 gen_helper_rett(tcg_env);
4313
4314 dc->npc = DYNAMIC_PC;
4315 return true;
4316 }
4317
4318 TRANS(RETT, 32, do_add_special, a, do_rett)
4319
do_return(DisasContext * dc,int rd,TCGv src)4320 static bool do_return(DisasContext *dc, int rd, TCGv src)
4321 {
4322 gen_check_align(dc, src, 3);
4323 gen_helper_restore(tcg_env);
4324
4325 gen_mov_pc_npc(dc);
4326 tcg_gen_mov_tl(cpu_npc, src);
4327 gen_address_mask(dc, cpu_npc);
4328
4329 dc->npc = DYNAMIC_PC_LOOKUP;
4330 return true;
4331 }
4332
4333 TRANS(RETURN, 64, do_add_special, a, do_return)
4334
do_save(DisasContext * dc,int rd,TCGv src)4335 static bool do_save(DisasContext *dc, int rd, TCGv src)
4336 {
4337 gen_helper_save(tcg_env);
4338 gen_store_gpr(dc, rd, src);
4339 return advance_pc(dc);
4340 }
4341
TRANS(SAVE,ALL,do_add_special,a,do_save)4342 TRANS(SAVE, ALL, do_add_special, a, do_save)
4343
4344 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4345 {
4346 gen_helper_restore(tcg_env);
4347 gen_store_gpr(dc, rd, src);
4348 return advance_pc(dc);
4349 }
4350
TRANS(RESTORE,ALL,do_add_special,a,do_restore)4351 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4352
4353 static bool do_done_retry(DisasContext *dc, bool done)
4354 {
4355 if (!supervisor(dc)) {
4356 return raise_priv(dc);
4357 }
4358 dc->npc = DYNAMIC_PC;
4359 dc->pc = DYNAMIC_PC;
4360 translator_io_start(&dc->base);
4361 if (done) {
4362 gen_helper_done(tcg_env);
4363 } else {
4364 gen_helper_retry(tcg_env);
4365 }
4366 return true;
4367 }
4368
4369 TRANS(DONE, 64, do_done_retry, true)
4370 TRANS(RETRY, 64, do_done_retry, false)
4371
4372 /*
4373 * Major opcode 11 -- load and store instructions
4374 */
4375
gen_ldst_addr(DisasContext * dc,int rs1,bool imm,int rs2_or_imm)4376 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4377 {
4378 TCGv addr, tmp = NULL;
4379
4380 /* For simplicity, we under-decoded the rs2 form. */
4381 if (!imm && rs2_or_imm & ~0x1f) {
4382 return NULL;
4383 }
4384
4385 addr = gen_load_gpr(dc, rs1);
4386 if (rs2_or_imm) {
4387 tmp = tcg_temp_new();
4388 if (imm) {
4389 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4390 } else {
4391 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4392 }
4393 addr = tmp;
4394 }
4395 if (AM_CHECK(dc)) {
4396 if (!tmp) {
4397 tmp = tcg_temp_new();
4398 }
4399 tcg_gen_ext32u_tl(tmp, addr);
4400 addr = tmp;
4401 }
4402 return addr;
4403 }
4404
do_ld_gpr(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4405 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4406 {
4407 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4408 DisasASI da;
4409
4410 if (addr == NULL) {
4411 return false;
4412 }
4413 da = resolve_asi(dc, a->asi, mop);
4414
4415 reg = gen_dest_gpr(dc, a->rd);
4416 gen_ld_asi(dc, &da, reg, addr);
4417 gen_store_gpr(dc, a->rd, reg);
4418 return advance_pc(dc);
4419 }
4420
TRANS(LDUW,ALL,do_ld_gpr,a,MO_TEUL)4421 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4422 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4423 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4424 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4425 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4426 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4427 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4428
4429 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4430 {
4431 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4432 DisasASI da;
4433
4434 if (addr == NULL) {
4435 return false;
4436 }
4437 da = resolve_asi(dc, a->asi, mop);
4438
4439 reg = gen_load_gpr(dc, a->rd);
4440 gen_st_asi(dc, &da, reg, addr);
4441 return advance_pc(dc);
4442 }
4443
TRANS(STW,ALL,do_st_gpr,a,MO_TEUL)4444 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4445 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4446 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4447 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4448
4449 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4450 {
4451 TCGv addr;
4452 DisasASI da;
4453
4454 if (a->rd & 1) {
4455 return false;
4456 }
4457 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4458 if (addr == NULL) {
4459 return false;
4460 }
4461 da = resolve_asi(dc, a->asi, MO_TEUQ);
4462 gen_ldda_asi(dc, &da, addr, a->rd);
4463 return advance_pc(dc);
4464 }
4465
trans_STD(DisasContext * dc,arg_r_r_ri_asi * a)4466 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4467 {
4468 TCGv addr;
4469 DisasASI da;
4470
4471 if (a->rd & 1) {
4472 return false;
4473 }
4474 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4475 if (addr == NULL) {
4476 return false;
4477 }
4478 da = resolve_asi(dc, a->asi, MO_TEUQ);
4479 gen_stda_asi(dc, &da, addr, a->rd);
4480 return advance_pc(dc);
4481 }
4482
trans_LDSTUB(DisasContext * dc,arg_r_r_ri_asi * a)4483 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4484 {
4485 TCGv addr, reg;
4486 DisasASI da;
4487
4488 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4489 if (addr == NULL) {
4490 return false;
4491 }
4492 da = resolve_asi(dc, a->asi, MO_UB);
4493
4494 reg = gen_dest_gpr(dc, a->rd);
4495 gen_ldstub_asi(dc, &da, reg, addr);
4496 gen_store_gpr(dc, a->rd, reg);
4497 return advance_pc(dc);
4498 }
4499
trans_SWAP(DisasContext * dc,arg_r_r_ri_asi * a)4500 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4501 {
4502 TCGv addr, dst, src;
4503 DisasASI da;
4504
4505 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4506 if (addr == NULL) {
4507 return false;
4508 }
4509 da = resolve_asi(dc, a->asi, MO_TEUL);
4510
4511 dst = gen_dest_gpr(dc, a->rd);
4512 src = gen_load_gpr(dc, a->rd);
4513 gen_swap_asi(dc, &da, dst, src, addr);
4514 gen_store_gpr(dc, a->rd, dst);
4515 return advance_pc(dc);
4516 }
4517
do_casa(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4518 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4519 {
4520 TCGv addr, o, n, c;
4521 DisasASI da;
4522
4523 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4524 if (addr == NULL) {
4525 return false;
4526 }
4527 da = resolve_asi(dc, a->asi, mop);
4528
4529 o = gen_dest_gpr(dc, a->rd);
4530 n = gen_load_gpr(dc, a->rd);
4531 c = gen_load_gpr(dc, a->rs2_or_imm);
4532 gen_cas_asi(dc, &da, o, n, c, addr);
4533 gen_store_gpr(dc, a->rd, o);
4534 return advance_pc(dc);
4535 }
4536
TRANS(CASA,CASA,do_casa,a,MO_TEUL)4537 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4538 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4539
4540 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4541 {
4542 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4543 DisasASI da;
4544
4545 if (addr == NULL) {
4546 return false;
4547 }
4548 if (gen_trap_if_nofpu_fpexception(dc)) {
4549 return true;
4550 }
4551 if (sz == MO_128 && gen_trap_float128(dc)) {
4552 return true;
4553 }
4554 da = resolve_asi(dc, a->asi, MO_TE | sz);
4555 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4556 gen_update_fprs_dirty(dc, a->rd);
4557 return advance_pc(dc);
4558 }
4559
TRANS(LDF,ALL,do_ld_fpr,a,MO_32)4560 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4561 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4562 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4563
4564 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4565 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4566 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4567
4568 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4569 {
4570 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4571 DisasASI da;
4572
4573 if (addr == NULL) {
4574 return false;
4575 }
4576 /* Store insns are ok in fp_exception_pending state. */
4577 if (gen_trap_ifnofpu(dc)) {
4578 return true;
4579 }
4580 if (sz == MO_128 && gen_trap_float128(dc)) {
4581 return true;
4582 }
4583 da = resolve_asi(dc, a->asi, MO_TE | sz);
4584 gen_stf_asi(dc, &da, sz, addr, a->rd);
4585 return advance_pc(dc);
4586 }
4587
TRANS(STF,ALL,do_st_fpr,a,MO_32)4588 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4589 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4590 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4591
4592 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4593 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4594 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4595
4596 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4597 {
4598 TCGv addr;
4599
4600 if (!avail_32(dc)) {
4601 return false;
4602 }
4603 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4604 if (addr == NULL) {
4605 return false;
4606 }
4607 if (!supervisor(dc)) {
4608 return raise_priv(dc);
4609 }
4610 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4611 if (gen_trap_ifnofpu(dc)) {
4612 return true;
4613 }
4614 if (!dc->fsr_qne) {
4615 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4616 return true;
4617 }
4618
4619 /* Store the single element from the queue. */
4620 TCGv_i64 fq = tcg_temp_new_i64();
4621 tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4622 tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4623
4624 /* Mark the queue empty, transitioning to fp_execute state. */
4625 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4626 offsetof(CPUSPARCState, fsr_qne));
4627 dc->fsr_qne = 0;
4628
4629 return advance_pc(dc);
4630 #else
4631 qemu_build_not_reached();
4632 #endif
4633 }
4634
trans_LDFSR(DisasContext * dc,arg_r_r_ri * a)4635 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4636 {
4637 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4638 TCGv_i32 tmp;
4639
4640 if (addr == NULL) {
4641 return false;
4642 }
4643 if (gen_trap_if_nofpu_fpexception(dc)) {
4644 return true;
4645 }
4646
4647 tmp = tcg_temp_new_i32();
4648 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4649
4650 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4651 /* LDFSR does not change FCC[1-3]. */
4652
4653 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4654 return advance_pc(dc);
4655 }
4656
do_ldxfsr(DisasContext * dc,arg_r_r_ri * a,bool entire)4657 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4658 {
4659 #ifdef TARGET_SPARC64
4660 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4661 TCGv_i64 t64;
4662 TCGv_i32 lo, hi;
4663
4664 if (addr == NULL) {
4665 return false;
4666 }
4667 if (gen_trap_if_nofpu_fpexception(dc)) {
4668 return true;
4669 }
4670
4671 t64 = tcg_temp_new_i64();
4672 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4673
4674 lo = tcg_temp_new_i32();
4675 hi = cpu_fcc[3];
4676 tcg_gen_extr_i64_i32(lo, hi, t64);
4677 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4678 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4679 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4680 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4681
4682 if (entire) {
4683 gen_helper_set_fsr_nofcc(tcg_env, lo);
4684 } else {
4685 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4686 }
4687 return advance_pc(dc);
4688 #else
4689 return false;
4690 #endif
4691 }
4692
4693 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
TRANS(LDXEFSR,VIS3B,do_ldxfsr,a,true)4694 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4695
4696 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4697 {
4698 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4699 TCGv fsr;
4700
4701 if (addr == NULL) {
4702 return false;
4703 }
4704 /* Store insns are ok in fp_exception_pending state. */
4705 if (gen_trap_ifnofpu(dc)) {
4706 return true;
4707 }
4708
4709 fsr = tcg_temp_new();
4710 gen_helper_get_fsr(fsr, tcg_env);
4711 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4712 return advance_pc(dc);
4713 }
4714
TRANS(STFSR,ALL,do_stfsr,a,MO_TEUL)4715 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4716 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4717
4718 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4719 {
4720 if (gen_trap_ifnofpu(dc)) {
4721 return true;
4722 }
4723 gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4724 return advance_pc(dc);
4725 }
4726
4727 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4728 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4729
do_dc(DisasContext * dc,int rd,int64_t c)4730 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4731 {
4732 if (gen_trap_ifnofpu(dc)) {
4733 return true;
4734 }
4735 gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4736 return advance_pc(dc);
4737 }
4738
4739 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4740 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4741
do_ff(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i32,TCGv_i32))4742 static bool do_ff(DisasContext *dc, arg_r_r *a,
4743 void (*func)(TCGv_i32, TCGv_i32))
4744 {
4745 TCGv_i32 tmp;
4746
4747 if (gen_trap_if_nofpu_fpexception(dc)) {
4748 return true;
4749 }
4750
4751 tmp = gen_load_fpr_F(dc, a->rs);
4752 func(tmp, tmp);
4753 gen_store_fpr_F(dc, a->rd, tmp);
4754 return advance_pc(dc);
4755 }
4756
TRANS(FMOVs,ALL,do_ff,a,gen_op_fmovs)4757 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4758 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4759 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4760 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4761 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4762
4763 static bool do_fd(DisasContext *dc, arg_r_r *a,
4764 void (*func)(TCGv_i32, TCGv_i64))
4765 {
4766 TCGv_i32 dst;
4767 TCGv_i64 src;
4768
4769 if (gen_trap_ifnofpu(dc)) {
4770 return true;
4771 }
4772
4773 dst = tcg_temp_new_i32();
4774 src = gen_load_fpr_D(dc, a->rs);
4775 func(dst, src);
4776 gen_store_fpr_F(dc, a->rd, dst);
4777 return advance_pc(dc);
4778 }
4779
TRANS(FPACK16,VIS1,do_fd,a,gen_op_fpack16)4780 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4781 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4782
4783 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4784 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4785 {
4786 TCGv_i32 tmp;
4787
4788 if (gen_trap_if_nofpu_fpexception(dc)) {
4789 return true;
4790 }
4791
4792 tmp = gen_load_fpr_F(dc, a->rs);
4793 func(tmp, tcg_env, tmp);
4794 gen_store_fpr_F(dc, a->rd, tmp);
4795 return advance_pc(dc);
4796 }
4797
TRANS(FSQRTs,ALL,do_env_ff,a,gen_helper_fsqrts)4798 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4799 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4800 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4801
4802 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4803 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4804 {
4805 TCGv_i32 dst;
4806 TCGv_i64 src;
4807
4808 if (gen_trap_if_nofpu_fpexception(dc)) {
4809 return true;
4810 }
4811
4812 dst = tcg_temp_new_i32();
4813 src = gen_load_fpr_D(dc, a->rs);
4814 func(dst, tcg_env, src);
4815 gen_store_fpr_F(dc, a->rd, dst);
4816 return advance_pc(dc);
4817 }
4818
TRANS(FdTOs,ALL,do_env_fd,a,gen_helper_fdtos)4819 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4820 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4821 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4822
4823 static bool do_dd(DisasContext *dc, arg_r_r *a,
4824 void (*func)(TCGv_i64, TCGv_i64))
4825 {
4826 TCGv_i64 dst, src;
4827
4828 if (gen_trap_if_nofpu_fpexception(dc)) {
4829 return true;
4830 }
4831
4832 dst = tcg_temp_new_i64();
4833 src = gen_load_fpr_D(dc, a->rs);
4834 func(dst, src);
4835 gen_store_fpr_D(dc, a->rd, dst);
4836 return advance_pc(dc);
4837 }
4838
4839 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4840 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4841 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
TRANS(FSRCd,VIS1,do_dd,a,tcg_gen_mov_i64)4842 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4843 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4844
4845 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4846 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4847 {
4848 TCGv_i64 dst, src;
4849
4850 if (gen_trap_if_nofpu_fpexception(dc)) {
4851 return true;
4852 }
4853
4854 dst = tcg_temp_new_i64();
4855 src = gen_load_fpr_D(dc, a->rs);
4856 func(dst, tcg_env, src);
4857 gen_store_fpr_D(dc, a->rd, dst);
4858 return advance_pc(dc);
4859 }
4860
TRANS(FSQRTd,ALL,do_env_dd,a,gen_helper_fsqrtd)4861 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4862 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4863 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4864
4865 static bool do_df(DisasContext *dc, arg_r_r *a,
4866 void (*func)(TCGv_i64, TCGv_i32))
4867 {
4868 TCGv_i64 dst;
4869 TCGv_i32 src;
4870
4871 if (gen_trap_ifnofpu(dc)) {
4872 return true;
4873 }
4874
4875 dst = tcg_temp_new_i64();
4876 src = gen_load_fpr_F(dc, a->rs);
4877 func(dst, src);
4878 gen_store_fpr_D(dc, a->rd, dst);
4879 return advance_pc(dc);
4880 }
4881
TRANS(FEXPAND,VIS1,do_df,a,gen_helper_fexpand)4882 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4883
4884 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4885 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4886 {
4887 TCGv_i64 dst;
4888 TCGv_i32 src;
4889
4890 if (gen_trap_if_nofpu_fpexception(dc)) {
4891 return true;
4892 }
4893
4894 dst = tcg_temp_new_i64();
4895 src = gen_load_fpr_F(dc, a->rs);
4896 func(dst, tcg_env, src);
4897 gen_store_fpr_D(dc, a->rd, dst);
4898 return advance_pc(dc);
4899 }
4900
TRANS(FiTOd,ALL,do_env_df,a,gen_helper_fitod)4901 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4902 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4903 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4904
4905 static bool do_qq(DisasContext *dc, arg_r_r *a,
4906 void (*func)(TCGv_i128, TCGv_i128))
4907 {
4908 TCGv_i128 t;
4909
4910 if (gen_trap_ifnofpu(dc)) {
4911 return true;
4912 }
4913 if (gen_trap_float128(dc)) {
4914 return true;
4915 }
4916
4917 gen_op_clear_ieee_excp_and_FTT();
4918 t = gen_load_fpr_Q(dc, a->rs);
4919 func(t, t);
4920 gen_store_fpr_Q(dc, a->rd, t);
4921 return advance_pc(dc);
4922 }
4923
4924 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4925 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4926 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4927
do_env_qq(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128))4928 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4929 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4930 {
4931 TCGv_i128 t;
4932
4933 if (gen_trap_if_nofpu_fpexception(dc)) {
4934 return true;
4935 }
4936 if (gen_trap_float128(dc)) {
4937 return true;
4938 }
4939
4940 t = gen_load_fpr_Q(dc, a->rs);
4941 func(t, tcg_env, t);
4942 gen_store_fpr_Q(dc, a->rd, t);
4943 return advance_pc(dc);
4944 }
4945
TRANS(FSQRTq,ALL,do_env_qq,a,gen_helper_fsqrtq)4946 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4947
4948 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4949 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4950 {
4951 TCGv_i128 src;
4952 TCGv_i32 dst;
4953
4954 if (gen_trap_if_nofpu_fpexception(dc)) {
4955 return true;
4956 }
4957 if (gen_trap_float128(dc)) {
4958 return true;
4959 }
4960
4961 src = gen_load_fpr_Q(dc, a->rs);
4962 dst = tcg_temp_new_i32();
4963 func(dst, tcg_env, src);
4964 gen_store_fpr_F(dc, a->rd, dst);
4965 return advance_pc(dc);
4966 }
4967
TRANS(FqTOs,ALL,do_env_fq,a,gen_helper_fqtos)4968 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4969 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4970
4971 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4972 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4973 {
4974 TCGv_i128 src;
4975 TCGv_i64 dst;
4976
4977 if (gen_trap_if_nofpu_fpexception(dc)) {
4978 return true;
4979 }
4980 if (gen_trap_float128(dc)) {
4981 return true;
4982 }
4983
4984 src = gen_load_fpr_Q(dc, a->rs);
4985 dst = tcg_temp_new_i64();
4986 func(dst, tcg_env, src);
4987 gen_store_fpr_D(dc, a->rd, dst);
4988 return advance_pc(dc);
4989 }
4990
TRANS(FqTOd,ALL,do_env_dq,a,gen_helper_fqtod)4991 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4992 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4993
4994 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4995 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4996 {
4997 TCGv_i32 src;
4998 TCGv_i128 dst;
4999
5000 if (gen_trap_if_nofpu_fpexception(dc)) {
5001 return true;
5002 }
5003 if (gen_trap_float128(dc)) {
5004 return true;
5005 }
5006
5007 src = gen_load_fpr_F(dc, a->rs);
5008 dst = tcg_temp_new_i128();
5009 func(dst, tcg_env, src);
5010 gen_store_fpr_Q(dc, a->rd, dst);
5011 return advance_pc(dc);
5012 }
5013
TRANS(FiTOq,ALL,do_env_qf,a,gen_helper_fitoq)5014 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
5015 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
5016
5017 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
5018 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
5019 {
5020 TCGv_i64 src;
5021 TCGv_i128 dst;
5022
5023 if (gen_trap_if_nofpu_fpexception(dc)) {
5024 return true;
5025 }
5026
5027 src = gen_load_fpr_D(dc, a->rs);
5028 dst = tcg_temp_new_i128();
5029 func(dst, tcg_env, src);
5030 gen_store_fpr_Q(dc, a->rd, dst);
5031 return advance_pc(dc);
5032 }
5033
TRANS(FdTOq,ALL,do_env_qd,a,gen_helper_fdtoq)5034 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5035 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5036
5037 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5038 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5039 {
5040 TCGv_i32 src1, src2;
5041
5042 if (gen_trap_ifnofpu(dc)) {
5043 return true;
5044 }
5045
5046 src1 = gen_load_fpr_F(dc, a->rs1);
5047 src2 = gen_load_fpr_F(dc, a->rs2);
5048 func(src1, src1, src2);
5049 gen_store_fpr_F(dc, a->rd, src1);
5050 return advance_pc(dc);
5051 }
5052
TRANS(FPADD16s,VIS1,do_fff,a,tcg_gen_vec_add16_i32)5053 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5054 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5055 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5056 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5057 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5058 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5059 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5060 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5061 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5062 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5063 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5064 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5065
5066 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5067 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5068 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5069
5070 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5071 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5072 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5073 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5074
5075 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5076 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5077 {
5078 TCGv_i32 src1, src2;
5079
5080 if (gen_trap_if_nofpu_fpexception(dc)) {
5081 return true;
5082 }
5083
5084 src1 = gen_load_fpr_F(dc, a->rs1);
5085 src2 = gen_load_fpr_F(dc, a->rs2);
5086 func(src1, tcg_env, src1, src2);
5087 gen_store_fpr_F(dc, a->rd, src1);
5088 return advance_pc(dc);
5089 }
5090
TRANS(FADDs,ALL,do_env_fff,a,gen_helper_fadds)5091 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5092 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5093 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5094 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5095 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5096 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5097
5098 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5099 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5100 {
5101 TCGv_i64 dst;
5102 TCGv_i32 src1, src2;
5103
5104 if (gen_trap_ifnofpu(dc)) {
5105 return true;
5106 }
5107
5108 dst = tcg_temp_new_i64();
5109 src1 = gen_load_fpr_F(dc, a->rs1);
5110 src2 = gen_load_fpr_F(dc, a->rs2);
5111 func(dst, src1, src2);
5112 gen_store_fpr_D(dc, a->rd, dst);
5113 return advance_pc(dc);
5114 }
5115
TRANS(FMUL8x16AU,VIS1,do_dff,a,gen_op_fmul8x16au)5116 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5117 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5118 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5119 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5120 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5121
5122 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5123 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5124 {
5125 TCGv_i64 dst, src2;
5126 TCGv_i32 src1;
5127
5128 if (gen_trap_ifnofpu(dc)) {
5129 return true;
5130 }
5131
5132 dst = tcg_temp_new_i64();
5133 src1 = gen_load_fpr_F(dc, a->rs1);
5134 src2 = gen_load_fpr_D(dc, a->rs2);
5135 func(dst, src1, src2);
5136 gen_store_fpr_D(dc, a->rd, dst);
5137 return advance_pc(dc);
5138 }
5139
TRANS(FMUL8x16,VIS1,do_dfd,a,gen_helper_fmul8x16)5140 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5141
5142 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5143 void (*func)(unsigned, uint32_t, uint32_t,
5144 uint32_t, uint32_t, uint32_t))
5145 {
5146 if (gen_trap_ifnofpu(dc)) {
5147 return true;
5148 }
5149
5150 func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5151 gen_offset_fpr_D(a->rs2), 8, 8);
5152 return advance_pc(dc);
5153 }
5154
TRANS(FPADD8,VIS4,do_gvec_ddd,a,MO_8,tcg_gen_gvec_add)5155 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5156 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5157 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5158
5159 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5160 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5161 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5162
5163 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5164 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5165
5166 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5167 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5168 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5169 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5170 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5171
5172 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5173 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5174 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5175 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5176 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5177
5178 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5179 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5180 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5181 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5182 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5183 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5184
5185 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5186 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5187 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5188 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5189 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5190 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5191
5192 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5193 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5194 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5195 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5196 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5197 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5198
5199 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5200 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5201 {
5202 TCGv_i64 dst, src1, src2;
5203
5204 if (gen_trap_ifnofpu(dc)) {
5205 return true;
5206 }
5207
5208 dst = tcg_temp_new_i64();
5209 src1 = gen_load_fpr_D(dc, a->rs1);
5210 src2 = gen_load_fpr_D(dc, a->rs2);
5211 func(dst, src1, src2);
5212 gen_store_fpr_D(dc, a->rd, dst);
5213 return advance_pc(dc);
5214 }
5215
TRANS(FMUL8SUx16,VIS1,do_ddd,a,gen_helper_fmul8sux16)5216 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5217 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5218
5219 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5220 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5221 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5222 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5223 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5224 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5225 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5226 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5227
5228 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5229 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5230 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5231
5232 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5233 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5234 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5235
5236 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5237 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5238 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5239 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5240
5241 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5242 void (*func)(TCGv, TCGv_i64, TCGv_i64))
5243 {
5244 TCGv_i64 src1, src2;
5245 TCGv dst;
5246
5247 if (gen_trap_ifnofpu(dc)) {
5248 return true;
5249 }
5250
5251 dst = gen_dest_gpr(dc, a->rd);
5252 src1 = gen_load_fpr_D(dc, a->rs1);
5253 src2 = gen_load_fpr_D(dc, a->rs2);
5254 func(dst, src1, src2);
5255 gen_store_gpr(dc, a->rd, dst);
5256 return advance_pc(dc);
5257 }
5258
TRANS(FPCMPLE16,VIS1,do_rdd,a,gen_helper_fcmple16)5259 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5260 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5261 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5262 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5263 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5264 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5265
5266 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5267 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5268 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5269 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5270 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5271 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5272
5273 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5274 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5275 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5276 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5277 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5278 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5279
5280 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5281 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5282 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5283
5284 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5285 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5286 {
5287 TCGv_i64 dst, src1, src2;
5288
5289 if (gen_trap_if_nofpu_fpexception(dc)) {
5290 return true;
5291 }
5292
5293 dst = tcg_temp_new_i64();
5294 src1 = gen_load_fpr_D(dc, a->rs1);
5295 src2 = gen_load_fpr_D(dc, a->rs2);
5296 func(dst, tcg_env, src1, src2);
5297 gen_store_fpr_D(dc, a->rd, dst);
5298 return advance_pc(dc);
5299 }
5300
TRANS(FADDd,ALL,do_env_ddd,a,gen_helper_faddd)5301 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5302 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5303 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5304 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5305 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5306 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5307
5308 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5309 {
5310 TCGv_i64 dst;
5311 TCGv_i32 src1, src2;
5312
5313 if (gen_trap_if_nofpu_fpexception(dc)) {
5314 return true;
5315 }
5316 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5317 return raise_unimpfpop(dc);
5318 }
5319
5320 dst = tcg_temp_new_i64();
5321 src1 = gen_load_fpr_F(dc, a->rs1);
5322 src2 = gen_load_fpr_F(dc, a->rs2);
5323 gen_helper_fsmuld(dst, tcg_env, src1, src2);
5324 gen_store_fpr_D(dc, a->rd, dst);
5325 return advance_pc(dc);
5326 }
5327
trans_FNsMULd(DisasContext * dc,arg_r_r_r * a)5328 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5329 {
5330 TCGv_i64 dst;
5331 TCGv_i32 src1, src2;
5332
5333 if (!avail_VIS3(dc)) {
5334 return false;
5335 }
5336 if (gen_trap_ifnofpu(dc)) {
5337 return true;
5338 }
5339 dst = tcg_temp_new_i64();
5340 src1 = gen_load_fpr_F(dc, a->rs1);
5341 src2 = gen_load_fpr_F(dc, a->rs2);
5342 gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5343 gen_store_fpr_D(dc, a->rd, dst);
5344 return advance_pc(dc);
5345 }
5346
do_ffff(DisasContext * dc,arg_r_r_r_r * a,void (* func)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_i32))5347 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5348 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5349 {
5350 TCGv_i32 dst, src1, src2, src3;
5351
5352 if (gen_trap_ifnofpu(dc)) {
5353 return true;
5354 }
5355
5356 src1 = gen_load_fpr_F(dc, a->rs1);
5357 src2 = gen_load_fpr_F(dc, a->rs2);
5358 src3 = gen_load_fpr_F(dc, a->rs3);
5359 dst = tcg_temp_new_i32();
5360 func(dst, src1, src2, src3);
5361 gen_store_fpr_F(dc, a->rd, dst);
5362 return advance_pc(dc);
5363 }
5364
TRANS(FMADDs,FMAF,do_ffff,a,gen_op_fmadds)5365 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5366 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5367 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5368 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5369
5370 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5371 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5372 {
5373 TCGv_i64 dst, src1, src2, src3;
5374
5375 if (gen_trap_ifnofpu(dc)) {
5376 return true;
5377 }
5378
5379 dst = tcg_temp_new_i64();
5380 src1 = gen_load_fpr_D(dc, a->rs1);
5381 src2 = gen_load_fpr_D(dc, a->rs2);
5382 src3 = gen_load_fpr_D(dc, a->rs3);
5383 func(dst, src1, src2, src3);
5384 gen_store_fpr_D(dc, a->rd, dst);
5385 return advance_pc(dc);
5386 }
5387
TRANS(PDIST,VIS1,do_dddd,a,gen_helper_pdist)5388 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5389 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5390 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5391 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5392 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5393 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5394 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5395
5396 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5397 {
5398 TCGv_i64 dst, src1, src2;
5399 TCGv src3;
5400
5401 if (!avail_VIS4(dc)) {
5402 return false;
5403 }
5404 if (gen_trap_ifnofpu(dc)) {
5405 return true;
5406 }
5407
5408 dst = tcg_temp_new_i64();
5409 src1 = gen_load_fpr_D(dc, a->rd);
5410 src2 = gen_load_fpr_D(dc, a->rs2);
5411 src3 = gen_load_gpr(dc, a->rs1);
5412 gen_op_faligndata_i(dst, src1, src2, src3);
5413 gen_store_fpr_D(dc, a->rd, dst);
5414 return advance_pc(dc);
5415 }
5416
do_env_qqq(DisasContext * dc,arg_r_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128,TCGv_i128))5417 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5418 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5419 {
5420 TCGv_i128 src1, src2;
5421
5422 if (gen_trap_if_nofpu_fpexception(dc)) {
5423 return true;
5424 }
5425 if (gen_trap_float128(dc)) {
5426 return true;
5427 }
5428
5429 src1 = gen_load_fpr_Q(dc, a->rs1);
5430 src2 = gen_load_fpr_Q(dc, a->rs2);
5431 func(src1, tcg_env, src1, src2);
5432 gen_store_fpr_Q(dc, a->rd, src1);
5433 return advance_pc(dc);
5434 }
5435
TRANS(FADDq,ALL,do_env_qqq,a,gen_helper_faddq)5436 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5437 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5438 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5439 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5440
5441 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5442 {
5443 TCGv_i64 src1, src2;
5444 TCGv_i128 dst;
5445
5446 if (gen_trap_if_nofpu_fpexception(dc)) {
5447 return true;
5448 }
5449 if (gen_trap_float128(dc)) {
5450 return true;
5451 }
5452
5453 src1 = gen_load_fpr_D(dc, a->rs1);
5454 src2 = gen_load_fpr_D(dc, a->rs2);
5455 dst = tcg_temp_new_i128();
5456 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5457 gen_store_fpr_Q(dc, a->rd, dst);
5458 return advance_pc(dc);
5459 }
5460
do_fmovr(DisasContext * dc,arg_FMOVRs * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5461 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5462 void (*func)(DisasContext *, DisasCompare *, int, int))
5463 {
5464 DisasCompare cmp;
5465
5466 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5467 return false;
5468 }
5469 if (gen_trap_ifnofpu(dc)) {
5470 return true;
5471 }
5472 if (is_128 && gen_trap_float128(dc)) {
5473 return true;
5474 }
5475
5476 gen_op_clear_ieee_excp_and_FTT();
5477 func(dc, &cmp, a->rd, a->rs2);
5478 return advance_pc(dc);
5479 }
5480
5481 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5482 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5483 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5484
do_fmovcc(DisasContext * dc,arg_FMOVscc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5485 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5486 void (*func)(DisasContext *, DisasCompare *, int, int))
5487 {
5488 DisasCompare cmp;
5489
5490 if (gen_trap_ifnofpu(dc)) {
5491 return true;
5492 }
5493 if (is_128 && gen_trap_float128(dc)) {
5494 return true;
5495 }
5496
5497 gen_op_clear_ieee_excp_and_FTT();
5498 gen_compare(&cmp, a->cc, a->cond, dc);
5499 func(dc, &cmp, a->rd, a->rs2);
5500 return advance_pc(dc);
5501 }
5502
5503 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5504 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5505 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5506
do_fmovfcc(DisasContext * dc,arg_FMOVsfcc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5507 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5508 void (*func)(DisasContext *, DisasCompare *, int, int))
5509 {
5510 DisasCompare cmp;
5511
5512 if (gen_trap_ifnofpu(dc)) {
5513 return true;
5514 }
5515 if (is_128 && gen_trap_float128(dc)) {
5516 return true;
5517 }
5518
5519 gen_op_clear_ieee_excp_and_FTT();
5520 gen_fcompare(&cmp, a->cc, a->cond);
5521 func(dc, &cmp, a->rd, a->rs2);
5522 return advance_pc(dc);
5523 }
5524
5525 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5526 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5527 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5528
do_fcmps(DisasContext * dc,arg_FCMPs * a,bool e)5529 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5530 {
5531 TCGv_i32 src1, src2;
5532
5533 if (avail_32(dc) && a->cc != 0) {
5534 return false;
5535 }
5536 if (gen_trap_if_nofpu_fpexception(dc)) {
5537 return true;
5538 }
5539
5540 src1 = gen_load_fpr_F(dc, a->rs1);
5541 src2 = gen_load_fpr_F(dc, a->rs2);
5542 if (e) {
5543 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5544 } else {
5545 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5546 }
5547 return advance_pc(dc);
5548 }
5549
TRANS(FCMPs,ALL,do_fcmps,a,false)5550 TRANS(FCMPs, ALL, do_fcmps, a, false)
5551 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5552
5553 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5554 {
5555 TCGv_i64 src1, src2;
5556
5557 if (avail_32(dc) && a->cc != 0) {
5558 return false;
5559 }
5560 if (gen_trap_if_nofpu_fpexception(dc)) {
5561 return true;
5562 }
5563
5564 src1 = gen_load_fpr_D(dc, a->rs1);
5565 src2 = gen_load_fpr_D(dc, a->rs2);
5566 if (e) {
5567 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5568 } else {
5569 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5570 }
5571 return advance_pc(dc);
5572 }
5573
TRANS(FCMPd,ALL,do_fcmpd,a,false)5574 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5575 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5576
5577 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5578 {
5579 TCGv_i128 src1, src2;
5580
5581 if (avail_32(dc) && a->cc != 0) {
5582 return false;
5583 }
5584 if (gen_trap_if_nofpu_fpexception(dc)) {
5585 return true;
5586 }
5587 if (gen_trap_float128(dc)) {
5588 return true;
5589 }
5590
5591 src1 = gen_load_fpr_Q(dc, a->rs1);
5592 src2 = gen_load_fpr_Q(dc, a->rs2);
5593 if (e) {
5594 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5595 } else {
5596 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5597 }
5598 return advance_pc(dc);
5599 }
5600
TRANS(FCMPq,ALL,do_fcmpq,a,false)5601 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5602 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5603
5604 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5605 {
5606 TCGv_i32 src1, src2;
5607
5608 if (!avail_VIS3(dc)) {
5609 return false;
5610 }
5611 if (gen_trap_ifnofpu(dc)) {
5612 return true;
5613 }
5614
5615 src1 = gen_load_fpr_F(dc, a->rs1);
5616 src2 = gen_load_fpr_F(dc, a->rs2);
5617 gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5618 return advance_pc(dc);
5619 }
5620
trans_FLCMPd(DisasContext * dc,arg_FLCMPd * a)5621 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5622 {
5623 TCGv_i64 src1, src2;
5624
5625 if (!avail_VIS3(dc)) {
5626 return false;
5627 }
5628 if (gen_trap_ifnofpu(dc)) {
5629 return true;
5630 }
5631
5632 src1 = gen_load_fpr_D(dc, a->rs1);
5633 src2 = gen_load_fpr_D(dc, a->rs2);
5634 gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5635 return advance_pc(dc);
5636 }
5637
do_movf2r(DisasContext * dc,arg_r_r * a,int (* offset)(unsigned int),void (* load)(TCGv,TCGv_ptr,tcg_target_long))5638 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5639 int (*offset)(unsigned int),
5640 void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5641 {
5642 TCGv dst;
5643
5644 if (gen_trap_ifnofpu(dc)) {
5645 return true;
5646 }
5647 dst = gen_dest_gpr(dc, a->rd);
5648 load(dst, tcg_env, offset(a->rs));
5649 gen_store_gpr(dc, a->rd, dst);
5650 return advance_pc(dc);
5651 }
5652
TRANS(MOVsTOsw,VIS3B,do_movf2r,a,gen_offset_fpr_F,tcg_gen_ld32s_tl)5653 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5654 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5655 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5656
5657 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5658 int (*offset)(unsigned int),
5659 void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5660 {
5661 TCGv src;
5662
5663 if (gen_trap_ifnofpu(dc)) {
5664 return true;
5665 }
5666 src = gen_load_gpr(dc, a->rs);
5667 store(src, tcg_env, offset(a->rd));
5668 return advance_pc(dc);
5669 }
5670
TRANS(MOVwTOs,VIS3B,do_movr2f,a,gen_offset_fpr_F,tcg_gen_st32_tl)5671 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5672 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5673
5674 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5675 {
5676 DisasContext *dc = container_of(dcbase, DisasContext, base);
5677 int bound;
5678
5679 dc->pc = dc->base.pc_first;
5680 dc->npc = (target_ulong)dc->base.tb->cs_base;
5681 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5682 dc->def = &cpu_env(cs)->def;
5683 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5684 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5685 #ifndef CONFIG_USER_ONLY
5686 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5687 # ifdef TARGET_SPARC64
5688 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5689 # else
5690 dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5691 # endif
5692 #endif
5693 #ifdef TARGET_SPARC64
5694 dc->fprs_dirty = 0;
5695 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5696 #endif
5697 /*
5698 * if we reach a page boundary, we stop generation so that the
5699 * PC of a TT_TFAULT exception is always in the right page
5700 */
5701 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5702 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5703 }
5704
sparc_tr_tb_start(DisasContextBase * db,CPUState * cs)5705 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5706 {
5707 }
5708
sparc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)5709 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5710 {
5711 DisasContext *dc = container_of(dcbase, DisasContext, base);
5712 target_ulong npc = dc->npc;
5713
5714 if (npc & 3) {
5715 switch (npc) {
5716 case JUMP_PC:
5717 assert(dc->jump_pc[1] == dc->pc + 4);
5718 npc = dc->jump_pc[0] | JUMP_PC;
5719 break;
5720 case DYNAMIC_PC:
5721 case DYNAMIC_PC_LOOKUP:
5722 npc = DYNAMIC_PC;
5723 break;
5724 default:
5725 g_assert_not_reached();
5726 }
5727 }
5728 tcg_gen_insn_start(dc->pc, npc);
5729 }
5730
sparc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)5731 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5732 {
5733 DisasContext *dc = container_of(dcbase, DisasContext, base);
5734 unsigned int insn;
5735
5736 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5737 dc->base.pc_next += 4;
5738
5739 if (!decode(dc, insn)) {
5740 gen_exception(dc, TT_ILL_INSN);
5741 }
5742
5743 if (dc->base.is_jmp == DISAS_NORETURN) {
5744 return;
5745 }
5746 if (dc->pc != dc->base.pc_next) {
5747 dc->base.is_jmp = DISAS_TOO_MANY;
5748 }
5749 }
5750
sparc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)5751 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5752 {
5753 DisasContext *dc = container_of(dcbase, DisasContext, base);
5754 DisasDelayException *e, *e_next;
5755 bool may_lookup;
5756
5757 finishing_insn(dc);
5758
5759 switch (dc->base.is_jmp) {
5760 case DISAS_NEXT:
5761 case DISAS_TOO_MANY:
5762 if (((dc->pc | dc->npc) & 3) == 0) {
5763 /* static PC and NPC: we can use direct chaining */
5764 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5765 break;
5766 }
5767
5768 may_lookup = true;
5769 if (dc->pc & 3) {
5770 switch (dc->pc) {
5771 case DYNAMIC_PC_LOOKUP:
5772 break;
5773 case DYNAMIC_PC:
5774 may_lookup = false;
5775 break;
5776 default:
5777 g_assert_not_reached();
5778 }
5779 } else {
5780 tcg_gen_movi_tl(cpu_pc, dc->pc);
5781 }
5782
5783 if (dc->npc & 3) {
5784 switch (dc->npc) {
5785 case JUMP_PC:
5786 gen_generic_branch(dc);
5787 break;
5788 case DYNAMIC_PC:
5789 may_lookup = false;
5790 break;
5791 case DYNAMIC_PC_LOOKUP:
5792 break;
5793 default:
5794 g_assert_not_reached();
5795 }
5796 } else {
5797 tcg_gen_movi_tl(cpu_npc, dc->npc);
5798 }
5799 if (may_lookup) {
5800 tcg_gen_lookup_and_goto_ptr();
5801 } else {
5802 tcg_gen_exit_tb(NULL, 0);
5803 }
5804 break;
5805
5806 case DISAS_NORETURN:
5807 break;
5808
5809 case DISAS_EXIT:
5810 /* Exit TB */
5811 save_state(dc);
5812 tcg_gen_exit_tb(NULL, 0);
5813 break;
5814
5815 default:
5816 g_assert_not_reached();
5817 }
5818
5819 for (e = dc->delay_excp_list; e ; e = e_next) {
5820 gen_set_label(e->lab);
5821
5822 tcg_gen_movi_tl(cpu_pc, e->pc);
5823 if (e->npc % 4 == 0) {
5824 tcg_gen_movi_tl(cpu_npc, e->npc);
5825 }
5826 gen_helper_raise_exception(tcg_env, e->excp);
5827
5828 e_next = e->next;
5829 g_free(e);
5830 }
5831 }
5832
5833 static const TranslatorOps sparc_tr_ops = {
5834 .init_disas_context = sparc_tr_init_disas_context,
5835 .tb_start = sparc_tr_tb_start,
5836 .insn_start = sparc_tr_insn_start,
5837 .translate_insn = sparc_tr_translate_insn,
5838 .tb_stop = sparc_tr_tb_stop,
5839 };
5840
sparc_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)5841 void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
5842 int *max_insns, vaddr pc, void *host_pc)
5843 {
5844 DisasContext dc = {};
5845
5846 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5847 }
5848
sparc_tcg_init(void)5849 void sparc_tcg_init(void)
5850 {
5851 static const char gregnames[32][4] = {
5852 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5853 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5854 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5855 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5856 };
5857
5858 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5859 #ifdef TARGET_SPARC64
5860 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5861 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5862 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5863 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5864 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5865 #else
5866 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5867 #endif
5868 };
5869
5870 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5871 #ifdef TARGET_SPARC64
5872 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5873 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5874 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5875 #endif
5876 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5877 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5878 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5879 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5880 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5881 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5882 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5883 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5884 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5885 };
5886
5887 unsigned int i;
5888
5889 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5890 offsetof(CPUSPARCState, regwptr),
5891 "regwptr");
5892
5893 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5894 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5895 }
5896
5897 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5898 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5899 }
5900
5901 cpu_regs[0] = NULL;
5902 for (i = 1; i < 8; ++i) {
5903 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5904 offsetof(CPUSPARCState, gregs[i]),
5905 gregnames[i]);
5906 }
5907
5908 for (i = 8; i < 32; ++i) {
5909 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5910 (i - 8) * sizeof(target_ulong),
5911 gregnames[i]);
5912 }
5913 }
5914