1 /*
2 SPARC translation
3
4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
6
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22
23 #include "cpu.h"
24 #include "exec/helper-proto.h"
25 #include "exec/target_page.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "asi.h"
34 #include "target/sparc/translate.h"
35
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef HELPER_H
39
40 #ifdef TARGET_SPARC64
41 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
42 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
43 # define gen_helper_rett(E) qemu_build_not_reached()
44 # define gen_helper_power_down(E) qemu_build_not_reached()
45 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
46 #else
47 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
48 # define gen_helper_done(E) qemu_build_not_reached()
49 # define gen_helper_flushw(E) qemu_build_not_reached()
50 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
51 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
52 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
53 # define gen_helper_restored(E) qemu_build_not_reached()
54 # define gen_helper_retry(E) qemu_build_not_reached()
55 # define gen_helper_saved(E) qemu_build_not_reached()
56 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
57 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
58 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
59 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_cmask8 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_cmask16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_cmask32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq8 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt8 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple8 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne8 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule8 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpule16 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpule32 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt8 ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fcmpugt16 ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fcmpugt32 ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fslas16 ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fslas32 ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
101 # define gen_helper_xmulx ({ qemu_build_not_reached(); NULL; })
102 # define gen_helper_xmulxhi ({ qemu_build_not_reached(); NULL; })
103 # define MAXTL_MASK 0
104 #endif
105
106 #define DISAS_EXIT DISAS_TARGET_0
107
108 /* global register indexes */
109 static TCGv_ptr cpu_regwptr;
110 static TCGv cpu_pc, cpu_npc;
111 static TCGv cpu_regs[32];
112 static TCGv cpu_y;
113 static TCGv cpu_tbr;
114 static TCGv cpu_cond;
115 static TCGv cpu_cc_N;
116 static TCGv cpu_cc_V;
117 static TCGv cpu_icc_Z;
118 static TCGv cpu_icc_C;
119 #ifdef TARGET_SPARC64
120 static TCGv cpu_xcc_Z;
121 static TCGv cpu_xcc_C;
122 static TCGv_i32 cpu_fprs;
123 static TCGv cpu_gsr;
124 #else
125 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
126 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #endif
128
129 #ifdef TARGET_SPARC64
130 #define cpu_cc_Z cpu_xcc_Z
131 #define cpu_cc_C cpu_xcc_C
132 #else
133 #define cpu_cc_Z cpu_icc_Z
134 #define cpu_cc_C cpu_icc_C
135 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
136 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 #endif
138
139 /* Floating point comparison registers */
140 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
141
142 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
143 #ifdef TARGET_SPARC64
144 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
145 # define env64_field_offsetof(X) env_field_offsetof(X)
146 #else
147 # define env32_field_offsetof(X) env_field_offsetof(X)
148 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
149 #endif
150
151 typedef struct DisasCompare {
152 TCGCond cond;
153 TCGv c1;
154 int c2;
155 } DisasCompare;
156
157 typedef struct DisasDelayException {
158 struct DisasDelayException *next;
159 TCGLabel *lab;
160 TCGv_i32 excp;
161 /* Saved state at parent insn. */
162 target_ulong pc;
163 target_ulong npc;
164 } DisasDelayException;
165
166 typedef struct DisasContext {
167 DisasContextBase base;
168 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
169 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
170
171 /* Used when JUMP_PC value is used. */
172 DisasCompare jump;
173 target_ulong jump_pc[2];
174
175 int mem_idx;
176 bool cpu_cond_live;
177 bool fpu_enabled;
178 bool address_mask_32bit;
179 #ifndef CONFIG_USER_ONLY
180 bool supervisor;
181 #ifdef TARGET_SPARC64
182 bool hypervisor;
183 #else
184 bool fsr_qne;
185 #endif
186 #endif
187
188 sparc_def_t *def;
189 #ifdef TARGET_SPARC64
190 int fprs_dirty;
191 int asi;
192 #endif
193 DisasDelayException *delay_excp_list;
194 } DisasContext;
195
196 // This function uses non-native bit order
197 #define GET_FIELD(X, FROM, TO) \
198 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
199
200 // This function uses the order in the manuals, i.e. bit 0 is 2^0
201 #define GET_FIELD_SP(X, FROM, TO) \
202 GET_FIELD(X, 31 - (TO), 31 - (FROM))
203
204 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
205 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
206
207 #define UA2005_HTRAP_MASK 0xff
208 #define V8_TRAP_MASK 0x7f
209
210 #define IS_IMM (insn & (1<<13))
211
gen_update_fprs_dirty(DisasContext * dc,int rd)212 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
213 {
214 #if defined(TARGET_SPARC64)
215 int bit = (rd < 32) ? 1 : 2;
216 /* If we know we've already set this bit within the TB,
217 we can avoid setting it again. */
218 if (!(dc->fprs_dirty & bit)) {
219 dc->fprs_dirty |= bit;
220 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
221 }
222 #endif
223 }
224
225 /* floating point registers moves */
226
gen_offset_fpr_F(unsigned int reg)227 static int gen_offset_fpr_F(unsigned int reg)
228 {
229 int ret;
230
231 tcg_debug_assert(reg < 32);
232 ret= offsetof(CPUSPARCState, fpr[reg / 2]);
233 if (reg & 1) {
234 ret += offsetof(CPU_DoubleU, l.lower);
235 } else {
236 ret += offsetof(CPU_DoubleU, l.upper);
237 }
238 return ret;
239 }
240
gen_load_fpr_F(DisasContext * dc,unsigned int src)241 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
242 {
243 TCGv_i32 ret = tcg_temp_new_i32();
244 tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
245 return ret;
246 }
247
gen_store_fpr_F(DisasContext * dc,unsigned int dst,TCGv_i32 v)248 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
249 {
250 tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
251 gen_update_fprs_dirty(dc, dst);
252 }
253
gen_offset_fpr_D(unsigned int reg)254 static int gen_offset_fpr_D(unsigned int reg)
255 {
256 tcg_debug_assert(reg < 64);
257 tcg_debug_assert(reg % 2 == 0);
258 return offsetof(CPUSPARCState, fpr[reg / 2]);
259 }
260
gen_load_fpr_D(DisasContext * dc,unsigned int src)261 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
262 {
263 TCGv_i64 ret = tcg_temp_new_i64();
264 tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
265 return ret;
266 }
267
gen_store_fpr_D(DisasContext * dc,unsigned int dst,TCGv_i64 v)268 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
269 {
270 tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
271 gen_update_fprs_dirty(dc, dst);
272 }
273
gen_load_fpr_Q(DisasContext * dc,unsigned int src)274 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
275 {
276 TCGv_i128 ret = tcg_temp_new_i128();
277 TCGv_i64 h = gen_load_fpr_D(dc, src);
278 TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
279
280 tcg_gen_concat_i64_i128(ret, l, h);
281 return ret;
282 }
283
gen_store_fpr_Q(DisasContext * dc,unsigned int dst,TCGv_i128 v)284 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
285 {
286 TCGv_i64 h = tcg_temp_new_i64();
287 TCGv_i64 l = tcg_temp_new_i64();
288
289 tcg_gen_extr_i128_i64(l, h, v);
290 gen_store_fpr_D(dc, dst, h);
291 gen_store_fpr_D(dc, dst + 2, l);
292 }
293
294 /* moves */
295 #ifdef CONFIG_USER_ONLY
296 #define supervisor(dc) 0
297 #define hypervisor(dc) 0
298 #else
299 #ifdef TARGET_SPARC64
300 #define hypervisor(dc) (dc->hypervisor)
301 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
302 #else
303 #define supervisor(dc) (dc->supervisor)
304 #define hypervisor(dc) 0
305 #endif
306 #endif
307
308 #if !defined(TARGET_SPARC64)
309 # define AM_CHECK(dc) false
310 #elif defined(TARGET_ABI32)
311 # define AM_CHECK(dc) true
312 #elif defined(CONFIG_USER_ONLY)
313 # define AM_CHECK(dc) false
314 #else
315 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
316 #endif
317
gen_address_mask(DisasContext * dc,TCGv addr)318 static void gen_address_mask(DisasContext *dc, TCGv addr)
319 {
320 if (AM_CHECK(dc)) {
321 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
322 }
323 }
324
address_mask_i(DisasContext * dc,target_ulong addr)325 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
326 {
327 return AM_CHECK(dc) ? (uint32_t)addr : addr;
328 }
329
gen_load_gpr(DisasContext * dc,int reg)330 static TCGv gen_load_gpr(DisasContext *dc, int reg)
331 {
332 if (reg > 0) {
333 assert(reg < 32);
334 return cpu_regs[reg];
335 } else {
336 TCGv t = tcg_temp_new();
337 tcg_gen_movi_tl(t, 0);
338 return t;
339 }
340 }
341
gen_store_gpr(DisasContext * dc,int reg,TCGv v)342 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
343 {
344 if (reg > 0) {
345 assert(reg < 32);
346 tcg_gen_mov_tl(cpu_regs[reg], v);
347 }
348 }
349
gen_dest_gpr(DisasContext * dc,int reg)350 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
351 {
352 if (reg > 0) {
353 assert(reg < 32);
354 return cpu_regs[reg];
355 } else {
356 return tcg_temp_new();
357 }
358 }
359
use_goto_tb(DisasContext * s,target_ulong pc,target_ulong npc)360 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
361 {
362 return translator_use_goto_tb(&s->base, pc) &&
363 translator_use_goto_tb(&s->base, npc);
364 }
365
gen_goto_tb(DisasContext * s,int tb_num,target_ulong pc,target_ulong npc)366 static void gen_goto_tb(DisasContext *s, int tb_num,
367 target_ulong pc, target_ulong npc)
368 {
369 if (use_goto_tb(s, pc, npc)) {
370 /* jump to same page: we can use a direct jump */
371 tcg_gen_goto_tb(tb_num);
372 tcg_gen_movi_tl(cpu_pc, pc);
373 tcg_gen_movi_tl(cpu_npc, npc);
374 tcg_gen_exit_tb(s->base.tb, tb_num);
375 } else {
376 /* jump to another page: we can use an indirect jump */
377 tcg_gen_movi_tl(cpu_pc, pc);
378 tcg_gen_movi_tl(cpu_npc, npc);
379 tcg_gen_lookup_and_goto_ptr();
380 }
381 }
382
gen_carry32(void)383 static TCGv gen_carry32(void)
384 {
385 if (TARGET_LONG_BITS == 64) {
386 TCGv t = tcg_temp_new();
387 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
388 return t;
389 }
390 return cpu_icc_C;
391 }
392
gen_op_addcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)393 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
394 {
395 TCGv z = tcg_constant_tl(0);
396
397 if (cin) {
398 tcg_gen_addcio_tl(cpu_cc_N, cpu_cc_C, src1, src2, cin);
399 } else {
400 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
401 }
402 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
403 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
404 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
405 if (TARGET_LONG_BITS == 64) {
406 /*
407 * Carry-in to bit 32 is result ^ src1 ^ src2.
408 * We already have the src xor term in Z, from computation of V.
409 */
410 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
411 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
412 }
413 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
414 tcg_gen_mov_tl(dst, cpu_cc_N);
415 }
416
gen_op_addcc(TCGv dst,TCGv src1,TCGv src2)417 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
418 {
419 gen_op_addcc_int(dst, src1, src2, NULL);
420 }
421
gen_op_taddcc(TCGv dst,TCGv src1,TCGv src2)422 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
423 {
424 TCGv t = tcg_temp_new();
425
426 /* Save the tag bits around modification of dst. */
427 tcg_gen_or_tl(t, src1, src2);
428
429 gen_op_addcc(dst, src1, src2);
430
431 /* Incorprate tag bits into icc.V */
432 tcg_gen_andi_tl(t, t, 3);
433 tcg_gen_neg_tl(t, t);
434 tcg_gen_ext32u_tl(t, t);
435 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
436 }
437
gen_op_addc(TCGv dst,TCGv src1,TCGv src2)438 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
439 {
440 tcg_gen_add_tl(dst, src1, src2);
441 tcg_gen_add_tl(dst, dst, gen_carry32());
442 }
443
gen_op_addccc(TCGv dst,TCGv src1,TCGv src2)444 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
445 {
446 gen_op_addcc_int(dst, src1, src2, gen_carry32());
447 }
448
gen_op_addxc(TCGv dst,TCGv src1,TCGv src2)449 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
450 {
451 tcg_gen_add_tl(dst, src1, src2);
452 tcg_gen_add_tl(dst, dst, cpu_cc_C);
453 }
454
gen_op_addxccc(TCGv dst,TCGv src1,TCGv src2)455 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
456 {
457 gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
458 }
459
gen_op_subcc_int(TCGv dst,TCGv src1,TCGv src2,TCGv cin)460 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
461 {
462 TCGv z = tcg_constant_tl(0);
463
464 if (cin) {
465 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
466 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
467 } else {
468 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
469 }
470 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
471 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
472 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
473 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
474 #ifdef TARGET_SPARC64
475 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
476 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
477 #endif
478 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
479 tcg_gen_mov_tl(dst, cpu_cc_N);
480 }
481
gen_op_subcc(TCGv dst,TCGv src1,TCGv src2)482 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
483 {
484 gen_op_subcc_int(dst, src1, src2, NULL);
485 }
486
gen_op_tsubcc(TCGv dst,TCGv src1,TCGv src2)487 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
488 {
489 TCGv t = tcg_temp_new();
490
491 /* Save the tag bits around modification of dst. */
492 tcg_gen_or_tl(t, src1, src2);
493
494 gen_op_subcc(dst, src1, src2);
495
496 /* Incorprate tag bits into icc.V */
497 tcg_gen_andi_tl(t, t, 3);
498 tcg_gen_neg_tl(t, t);
499 tcg_gen_ext32u_tl(t, t);
500 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
501 }
502
gen_op_subc(TCGv dst,TCGv src1,TCGv src2)503 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
504 {
505 tcg_gen_sub_tl(dst, src1, src2);
506 tcg_gen_sub_tl(dst, dst, gen_carry32());
507 }
508
gen_op_subccc(TCGv dst,TCGv src1,TCGv src2)509 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
510 {
511 gen_op_subcc_int(dst, src1, src2, gen_carry32());
512 }
513
gen_op_subxc(TCGv dst,TCGv src1,TCGv src2)514 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
515 {
516 tcg_gen_sub_tl(dst, src1, src2);
517 tcg_gen_sub_tl(dst, dst, cpu_cc_C);
518 }
519
gen_op_subxccc(TCGv dst,TCGv src1,TCGv src2)520 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
521 {
522 gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
523 }
524
gen_op_mulscc(TCGv dst,TCGv src1,TCGv src2)525 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
526 {
527 TCGv zero = tcg_constant_tl(0);
528 TCGv one = tcg_constant_tl(1);
529 TCGv t_src1 = tcg_temp_new();
530 TCGv t_src2 = tcg_temp_new();
531 TCGv t0 = tcg_temp_new();
532
533 tcg_gen_ext32u_tl(t_src1, src1);
534 tcg_gen_ext32u_tl(t_src2, src2);
535
536 /*
537 * if (!(env->y & 1))
538 * src2 = 0;
539 */
540 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
541
542 /*
543 * b2 = src1 & 1;
544 * y = (b2 << 31) | (y >> 1);
545 */
546 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
547 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
548
549 // b1 = N ^ V;
550 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
551
552 /*
553 * src1 = (b1 << 31) | (src1 >> 1)
554 */
555 tcg_gen_andi_tl(t0, t0, 1u << 31);
556 tcg_gen_shri_tl(t_src1, t_src1, 1);
557 tcg_gen_or_tl(t_src1, t_src1, t0);
558
559 gen_op_addcc(dst, t_src1, t_src2);
560 }
561
gen_op_multiply(TCGv dst,TCGv src1,TCGv src2,int sign_ext)562 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
563 {
564 #if TARGET_LONG_BITS == 32
565 if (sign_ext) {
566 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
567 } else {
568 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
569 }
570 #else
571 TCGv t0 = tcg_temp_new_i64();
572 TCGv t1 = tcg_temp_new_i64();
573
574 if (sign_ext) {
575 tcg_gen_ext32s_i64(t0, src1);
576 tcg_gen_ext32s_i64(t1, src2);
577 } else {
578 tcg_gen_ext32u_i64(t0, src1);
579 tcg_gen_ext32u_i64(t1, src2);
580 }
581
582 tcg_gen_mul_i64(dst, t0, t1);
583 tcg_gen_shri_i64(cpu_y, dst, 32);
584 #endif
585 }
586
gen_op_umul(TCGv dst,TCGv src1,TCGv src2)587 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
588 {
589 /* zero-extend truncated operands before multiplication */
590 gen_op_multiply(dst, src1, src2, 0);
591 }
592
gen_op_smul(TCGv dst,TCGv src1,TCGv src2)593 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
594 {
595 /* sign-extend truncated operands before multiplication */
596 gen_op_multiply(dst, src1, src2, 1);
597 }
598
gen_op_umulxhi(TCGv dst,TCGv src1,TCGv src2)599 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
600 {
601 TCGv discard = tcg_temp_new();
602 tcg_gen_mulu2_tl(discard, dst, src1, src2);
603 }
604
gen_op_fpmaddx(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)605 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
606 TCGv_i64 src2, TCGv_i64 src3)
607 {
608 TCGv_i64 t = tcg_temp_new_i64();
609
610 tcg_gen_mul_i64(t, src1, src2);
611 tcg_gen_add_i64(dst, src3, t);
612 }
613
gen_op_fpmaddxhi(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2,TCGv_i64 src3)614 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
615 TCGv_i64 src2, TCGv_i64 src3)
616 {
617 TCGv_i64 l = tcg_temp_new_i64();
618 TCGv_i64 h = tcg_temp_new_i64();
619 TCGv_i64 z = tcg_constant_i64(0);
620
621 tcg_gen_mulu2_i64(l, h, src1, src2);
622 tcg_gen_add2_i64(l, dst, l, h, src3, z);
623 }
624
gen_op_sdiv(TCGv dst,TCGv src1,TCGv src2)625 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
626 {
627 #ifdef TARGET_SPARC64
628 gen_helper_sdiv(dst, tcg_env, src1, src2);
629 tcg_gen_ext32s_tl(dst, dst);
630 #else
631 TCGv_i64 t64 = tcg_temp_new_i64();
632 gen_helper_sdiv(t64, tcg_env, src1, src2);
633 tcg_gen_trunc_i64_tl(dst, t64);
634 #endif
635 }
636
gen_op_udivcc(TCGv dst,TCGv src1,TCGv src2)637 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
638 {
639 TCGv_i64 t64;
640
641 #ifdef TARGET_SPARC64
642 t64 = cpu_cc_V;
643 #else
644 t64 = tcg_temp_new_i64();
645 #endif
646
647 gen_helper_udiv(t64, tcg_env, src1, src2);
648
649 #ifdef TARGET_SPARC64
650 tcg_gen_ext32u_tl(cpu_cc_N, t64);
651 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
652 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
653 tcg_gen_movi_tl(cpu_icc_C, 0);
654 #else
655 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
656 #endif
657 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
658 tcg_gen_movi_tl(cpu_cc_C, 0);
659 tcg_gen_mov_tl(dst, cpu_cc_N);
660 }
661
gen_op_sdivcc(TCGv dst,TCGv src1,TCGv src2)662 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
663 {
664 TCGv_i64 t64;
665
666 #ifdef TARGET_SPARC64
667 t64 = cpu_cc_V;
668 #else
669 t64 = tcg_temp_new_i64();
670 #endif
671
672 gen_helper_sdiv(t64, tcg_env, src1, src2);
673
674 #ifdef TARGET_SPARC64
675 tcg_gen_ext32s_tl(cpu_cc_N, t64);
676 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
677 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
678 tcg_gen_movi_tl(cpu_icc_C, 0);
679 #else
680 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
681 #endif
682 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
683 tcg_gen_movi_tl(cpu_cc_C, 0);
684 tcg_gen_mov_tl(dst, cpu_cc_N);
685 }
686
gen_op_taddcctv(TCGv dst,TCGv src1,TCGv src2)687 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
688 {
689 gen_helper_taddcctv(dst, tcg_env, src1, src2);
690 }
691
gen_op_tsubcctv(TCGv dst,TCGv src1,TCGv src2)692 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
693 {
694 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
695 }
696
gen_op_popc(TCGv dst,TCGv src1,TCGv src2)697 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
698 {
699 tcg_gen_ctpop_tl(dst, src2);
700 }
701
gen_op_lzcnt(TCGv dst,TCGv src)702 static void gen_op_lzcnt(TCGv dst, TCGv src)
703 {
704 tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
705 }
706
707 #ifndef TARGET_SPARC64
gen_helper_array8(TCGv dst,TCGv src1,TCGv src2)708 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
709 {
710 g_assert_not_reached();
711 }
712 #endif
713
gen_op_array16(TCGv dst,TCGv src1,TCGv src2)714 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
715 {
716 gen_helper_array8(dst, src1, src2);
717 tcg_gen_shli_tl(dst, dst, 1);
718 }
719
gen_op_array32(TCGv dst,TCGv src1,TCGv src2)720 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
721 {
722 gen_helper_array8(dst, src1, src2);
723 tcg_gen_shli_tl(dst, dst, 2);
724 }
725
gen_op_fpack16(TCGv_i32 dst,TCGv_i64 src)726 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
727 {
728 #ifdef TARGET_SPARC64
729 gen_helper_fpack16(dst, cpu_gsr, src);
730 #else
731 g_assert_not_reached();
732 #endif
733 }
734
gen_op_fpackfix(TCGv_i32 dst,TCGv_i64 src)735 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
736 {
737 #ifdef TARGET_SPARC64
738 gen_helper_fpackfix(dst, cpu_gsr, src);
739 #else
740 g_assert_not_reached();
741 #endif
742 }
743
gen_op_fpack32(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)744 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
745 {
746 #ifdef TARGET_SPARC64
747 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
748 #else
749 g_assert_not_reached();
750 #endif
751 }
752
gen_op_fpadds16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)753 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
754 {
755 TCGv_i32 t[2];
756
757 for (int i = 0; i < 2; i++) {
758 TCGv_i32 u = tcg_temp_new_i32();
759 TCGv_i32 v = tcg_temp_new_i32();
760
761 tcg_gen_sextract_i32(u, src1, i * 16, 16);
762 tcg_gen_sextract_i32(v, src2, i * 16, 16);
763 tcg_gen_add_i32(u, u, v);
764 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
765 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
766 t[i] = u;
767 }
768 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
769 }
770
gen_op_fpsubs16s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)771 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
772 {
773 TCGv_i32 t[2];
774
775 for (int i = 0; i < 2; i++) {
776 TCGv_i32 u = tcg_temp_new_i32();
777 TCGv_i32 v = tcg_temp_new_i32();
778
779 tcg_gen_sextract_i32(u, src1, i * 16, 16);
780 tcg_gen_sextract_i32(v, src2, i * 16, 16);
781 tcg_gen_sub_i32(u, u, v);
782 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
783 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
784 t[i] = u;
785 }
786 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
787 }
788
gen_op_fpadds32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)789 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
790 {
791 TCGv_i32 r = tcg_temp_new_i32();
792 TCGv_i32 t = tcg_temp_new_i32();
793 TCGv_i32 v = tcg_temp_new_i32();
794 TCGv_i32 z = tcg_constant_i32(0);
795
796 tcg_gen_add_i32(r, src1, src2);
797 tcg_gen_xor_i32(t, src1, src2);
798 tcg_gen_xor_i32(v, r, src2);
799 tcg_gen_andc_i32(v, v, t);
800
801 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
802 tcg_gen_addi_i32(t, t, INT32_MAX);
803
804 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
805 }
806
gen_op_fpsubs32s(TCGv_i32 d,TCGv_i32 src1,TCGv_i32 src2)807 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
808 {
809 TCGv_i32 r = tcg_temp_new_i32();
810 TCGv_i32 t = tcg_temp_new_i32();
811 TCGv_i32 v = tcg_temp_new_i32();
812 TCGv_i32 z = tcg_constant_i32(0);
813
814 tcg_gen_sub_i32(r, src1, src2);
815 tcg_gen_xor_i32(t, src1, src2);
816 tcg_gen_xor_i32(v, r, src1);
817 tcg_gen_and_i32(v, v, t);
818
819 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
820 tcg_gen_addi_i32(t, t, INT32_MAX);
821
822 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
823 }
824
gen_op_faligndata_i(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2,TCGv gsr)825 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
826 TCGv_i64 s2, TCGv gsr)
827 {
828 #ifdef TARGET_SPARC64
829 TCGv t1, t2, shift;
830
831 t1 = tcg_temp_new();
832 t2 = tcg_temp_new();
833 shift = tcg_temp_new();
834
835 tcg_gen_andi_tl(shift, gsr, 7);
836 tcg_gen_shli_tl(shift, shift, 3);
837 tcg_gen_shl_tl(t1, s1, shift);
838
839 /*
840 * A shift of 64 does not produce 0 in TCG. Divide this into a
841 * shift of (up to 63) followed by a constant shift of 1.
842 */
843 tcg_gen_xori_tl(shift, shift, 63);
844 tcg_gen_shr_tl(t2, s2, shift);
845 tcg_gen_shri_tl(t2, t2, 1);
846
847 tcg_gen_or_tl(dst, t1, t2);
848 #else
849 g_assert_not_reached();
850 #endif
851 }
852
gen_op_faligndata_g(TCGv_i64 dst,TCGv_i64 s1,TCGv_i64 s2)853 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
854 {
855 gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
856 }
857
gen_op_bshuffle(TCGv_i64 dst,TCGv_i64 src1,TCGv_i64 src2)858 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
859 {
860 #ifdef TARGET_SPARC64
861 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
862 #else
863 g_assert_not_reached();
864 #endif
865 }
866
gen_op_pdistn(TCGv dst,TCGv_i64 src1,TCGv_i64 src2)867 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
868 {
869 #ifdef TARGET_SPARC64
870 gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
871 #else
872 g_assert_not_reached();
873 #endif
874 }
875
gen_op_fmul8x16al(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)876 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
877 {
878 tcg_gen_ext16s_i32(src2, src2);
879 gen_helper_fmul8x16a(dst, src1, src2);
880 }
881
gen_op_fmul8x16au(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)882 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
883 {
884 tcg_gen_sari_i32(src2, src2, 16);
885 gen_helper_fmul8x16a(dst, src1, src2);
886 }
887
gen_op_fmuld8ulx16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)888 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
889 {
890 TCGv_i32 t0 = tcg_temp_new_i32();
891 TCGv_i32 t1 = tcg_temp_new_i32();
892 TCGv_i32 t2 = tcg_temp_new_i32();
893
894 tcg_gen_ext8u_i32(t0, src1);
895 tcg_gen_ext16s_i32(t1, src2);
896 tcg_gen_mul_i32(t0, t0, t1);
897
898 tcg_gen_extract_i32(t1, src1, 16, 8);
899 tcg_gen_sextract_i32(t2, src2, 16, 16);
900 tcg_gen_mul_i32(t1, t1, t2);
901
902 tcg_gen_concat_i32_i64(dst, t0, t1);
903 }
904
gen_op_fmuld8sux16(TCGv_i64 dst,TCGv_i32 src1,TCGv_i32 src2)905 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
906 {
907 TCGv_i32 t0 = tcg_temp_new_i32();
908 TCGv_i32 t1 = tcg_temp_new_i32();
909 TCGv_i32 t2 = tcg_temp_new_i32();
910
911 /*
912 * The insn description talks about extracting the upper 8 bits
913 * of the signed 16-bit input rs1, performing the multiply, then
914 * shifting left by 8 bits. Instead, zap the lower 8 bits of
915 * the rs1 input, which avoids the need for two shifts.
916 */
917 tcg_gen_ext16s_i32(t0, src1);
918 tcg_gen_andi_i32(t0, t0, ~0xff);
919 tcg_gen_ext16s_i32(t1, src2);
920 tcg_gen_mul_i32(t0, t0, t1);
921
922 tcg_gen_sextract_i32(t1, src1, 16, 16);
923 tcg_gen_andi_i32(t1, t1, ~0xff);
924 tcg_gen_sextract_i32(t2, src2, 16, 16);
925 tcg_gen_mul_i32(t1, t1, t2);
926
927 tcg_gen_concat_i32_i64(dst, t0, t1);
928 }
929
930 #ifdef TARGET_SPARC64
gen_vec_fchksm16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)931 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
932 TCGv_vec src1, TCGv_vec src2)
933 {
934 TCGv_vec a = tcg_temp_new_vec_matching(dst);
935 TCGv_vec c = tcg_temp_new_vec_matching(dst);
936
937 tcg_gen_add_vec(vece, a, src1, src2);
938 tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
939 /* Vector cmp produces -1 for true, so subtract to add carry. */
940 tcg_gen_sub_vec(vece, dst, a, c);
941 }
942
gen_op_fchksm16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)943 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
944 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
945 {
946 static const TCGOpcode vecop_list[] = {
947 INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
948 };
949 static const GVecGen3 op = {
950 .fni8 = gen_helper_fchksm16,
951 .fniv = gen_vec_fchksm16,
952 .opt_opc = vecop_list,
953 .vece = MO_16,
954 };
955 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
956 }
957
gen_vec_fmean16(unsigned vece,TCGv_vec dst,TCGv_vec src1,TCGv_vec src2)958 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
959 TCGv_vec src1, TCGv_vec src2)
960 {
961 TCGv_vec t = tcg_temp_new_vec_matching(dst);
962
963 tcg_gen_or_vec(vece, t, src1, src2);
964 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
965 tcg_gen_sari_vec(vece, src1, src1, 1);
966 tcg_gen_sari_vec(vece, src2, src2, 1);
967 tcg_gen_add_vec(vece, dst, src1, src2);
968 tcg_gen_add_vec(vece, dst, dst, t);
969 }
970
gen_op_fmean16(unsigned vece,uint32_t dofs,uint32_t aofs,uint32_t bofs,uint32_t oprsz,uint32_t maxsz)971 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
972 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
973 {
974 static const TCGOpcode vecop_list[] = {
975 INDEX_op_add_vec, INDEX_op_sari_vec,
976 };
977 static const GVecGen3 op = {
978 .fni8 = gen_helper_fmean16,
979 .fniv = gen_vec_fmean16,
980 .opt_opc = vecop_list,
981 .vece = MO_16,
982 };
983 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
984 }
985 #else
986 #define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
987 #define gen_op_fmean16 ({ qemu_build_not_reached(); NULL; })
988 #endif
989
finishing_insn(DisasContext * dc)990 static void finishing_insn(DisasContext *dc)
991 {
992 /*
993 * From here, there is no future path through an unwinding exception.
994 * If the current insn cannot raise an exception, the computation of
995 * cpu_cond may be able to be elided.
996 */
997 if (dc->cpu_cond_live) {
998 tcg_gen_discard_tl(cpu_cond);
999 dc->cpu_cond_live = false;
1000 }
1001 }
1002
gen_generic_branch(DisasContext * dc)1003 static void gen_generic_branch(DisasContext *dc)
1004 {
1005 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1006 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1007 TCGv c2 = tcg_constant_tl(dc->jump.c2);
1008
1009 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1010 }
1011
1012 /* call this function before using the condition register as it may
1013 have been set for a jump */
flush_cond(DisasContext * dc)1014 static void flush_cond(DisasContext *dc)
1015 {
1016 if (dc->npc == JUMP_PC) {
1017 gen_generic_branch(dc);
1018 dc->npc = DYNAMIC_PC_LOOKUP;
1019 }
1020 }
1021
save_npc(DisasContext * dc)1022 static void save_npc(DisasContext *dc)
1023 {
1024 if (dc->npc & 3) {
1025 switch (dc->npc) {
1026 case JUMP_PC:
1027 gen_generic_branch(dc);
1028 dc->npc = DYNAMIC_PC_LOOKUP;
1029 break;
1030 case DYNAMIC_PC:
1031 case DYNAMIC_PC_LOOKUP:
1032 break;
1033 default:
1034 g_assert_not_reached();
1035 }
1036 } else {
1037 tcg_gen_movi_tl(cpu_npc, dc->npc);
1038 }
1039 }
1040
save_state(DisasContext * dc)1041 static void save_state(DisasContext *dc)
1042 {
1043 tcg_gen_movi_tl(cpu_pc, dc->pc);
1044 save_npc(dc);
1045 }
1046
gen_exception(DisasContext * dc,int which)1047 static void gen_exception(DisasContext *dc, int which)
1048 {
1049 finishing_insn(dc);
1050 save_state(dc);
1051 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1052 dc->base.is_jmp = DISAS_NORETURN;
1053 }
1054
delay_exceptionv(DisasContext * dc,TCGv_i32 excp)1055 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1056 {
1057 DisasDelayException *e = g_new0(DisasDelayException, 1);
1058
1059 e->next = dc->delay_excp_list;
1060 dc->delay_excp_list = e;
1061
1062 e->lab = gen_new_label();
1063 e->excp = excp;
1064 e->pc = dc->pc;
1065 /* Caller must have used flush_cond before branch. */
1066 assert(e->npc != JUMP_PC);
1067 e->npc = dc->npc;
1068
1069 return e->lab;
1070 }
1071
delay_exception(DisasContext * dc,int excp)1072 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1073 {
1074 return delay_exceptionv(dc, tcg_constant_i32(excp));
1075 }
1076
gen_check_align(DisasContext * dc,TCGv addr,int mask)1077 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1078 {
1079 TCGv t = tcg_temp_new();
1080 TCGLabel *lab;
1081
1082 tcg_gen_andi_tl(t, addr, mask);
1083
1084 flush_cond(dc);
1085 lab = delay_exception(dc, TT_UNALIGNED);
1086 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1087 }
1088
gen_mov_pc_npc(DisasContext * dc)1089 static void gen_mov_pc_npc(DisasContext *dc)
1090 {
1091 finishing_insn(dc);
1092
1093 if (dc->npc & 3) {
1094 switch (dc->npc) {
1095 case JUMP_PC:
1096 gen_generic_branch(dc);
1097 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1098 dc->pc = DYNAMIC_PC_LOOKUP;
1099 break;
1100 case DYNAMIC_PC:
1101 case DYNAMIC_PC_LOOKUP:
1102 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1103 dc->pc = dc->npc;
1104 break;
1105 default:
1106 g_assert_not_reached();
1107 }
1108 } else {
1109 dc->pc = dc->npc;
1110 }
1111 }
1112
gen_compare(DisasCompare * cmp,bool xcc,unsigned int cond,DisasContext * dc)1113 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1114 DisasContext *dc)
1115 {
1116 TCGv t1;
1117
1118 cmp->c1 = t1 = tcg_temp_new();
1119 cmp->c2 = 0;
1120
1121 switch (cond & 7) {
1122 case 0x0: /* never */
1123 cmp->cond = TCG_COND_NEVER;
1124 cmp->c1 = tcg_constant_tl(0);
1125 break;
1126
1127 case 0x1: /* eq: Z */
1128 cmp->cond = TCG_COND_EQ;
1129 if (TARGET_LONG_BITS == 32 || xcc) {
1130 tcg_gen_mov_tl(t1, cpu_cc_Z);
1131 } else {
1132 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1133 }
1134 break;
1135
1136 case 0x2: /* le: Z | (N ^ V) */
1137 /*
1138 * Simplify:
1139 * cc_Z || (N ^ V) < 0 NE
1140 * cc_Z && !((N ^ V) < 0) EQ
1141 * cc_Z & ~((N ^ V) >> TLB) EQ
1142 */
1143 cmp->cond = TCG_COND_EQ;
1144 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1145 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1146 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1147 if (TARGET_LONG_BITS == 64 && !xcc) {
1148 tcg_gen_ext32u_tl(t1, t1);
1149 }
1150 break;
1151
1152 case 0x3: /* lt: N ^ V */
1153 cmp->cond = TCG_COND_LT;
1154 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1155 if (TARGET_LONG_BITS == 64 && !xcc) {
1156 tcg_gen_ext32s_tl(t1, t1);
1157 }
1158 break;
1159
1160 case 0x4: /* leu: Z | C */
1161 /*
1162 * Simplify:
1163 * cc_Z == 0 || cc_C != 0 NE
1164 * cc_Z != 0 && cc_C == 0 EQ
1165 * cc_Z & (cc_C ? 0 : -1) EQ
1166 * cc_Z & (cc_C - 1) EQ
1167 */
1168 cmp->cond = TCG_COND_EQ;
1169 if (TARGET_LONG_BITS == 32 || xcc) {
1170 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1171 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1172 } else {
1173 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1174 tcg_gen_subi_tl(t1, t1, 1);
1175 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1176 tcg_gen_ext32u_tl(t1, t1);
1177 }
1178 break;
1179
1180 case 0x5: /* ltu: C */
1181 cmp->cond = TCG_COND_NE;
1182 if (TARGET_LONG_BITS == 32 || xcc) {
1183 tcg_gen_mov_tl(t1, cpu_cc_C);
1184 } else {
1185 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1186 }
1187 break;
1188
1189 case 0x6: /* neg: N */
1190 cmp->cond = TCG_COND_LT;
1191 if (TARGET_LONG_BITS == 32 || xcc) {
1192 tcg_gen_mov_tl(t1, cpu_cc_N);
1193 } else {
1194 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1195 }
1196 break;
1197
1198 case 0x7: /* vs: V */
1199 cmp->cond = TCG_COND_LT;
1200 if (TARGET_LONG_BITS == 32 || xcc) {
1201 tcg_gen_mov_tl(t1, cpu_cc_V);
1202 } else {
1203 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1204 }
1205 break;
1206 }
1207 if (cond & 8) {
1208 cmp->cond = tcg_invert_cond(cmp->cond);
1209 }
1210 }
1211
gen_fcompare(DisasCompare * cmp,unsigned int cc,unsigned int cond)1212 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1213 {
1214 TCGv_i32 fcc = cpu_fcc[cc];
1215 TCGv_i32 c1 = fcc;
1216 int c2 = 0;
1217 TCGCond tcond;
1218
1219 /*
1220 * FCC values:
1221 * 0 =
1222 * 1 <
1223 * 2 >
1224 * 3 unordered
1225 */
1226 switch (cond & 7) {
1227 case 0x0: /* fbn */
1228 tcond = TCG_COND_NEVER;
1229 break;
1230 case 0x1: /* fbne : !0 */
1231 tcond = TCG_COND_NE;
1232 break;
1233 case 0x2: /* fblg : 1 or 2 */
1234 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1235 c1 = tcg_temp_new_i32();
1236 tcg_gen_addi_i32(c1, fcc, -1);
1237 c2 = 1;
1238 tcond = TCG_COND_LEU;
1239 break;
1240 case 0x3: /* fbul : 1 or 3 */
1241 c1 = tcg_temp_new_i32();
1242 tcg_gen_andi_i32(c1, fcc, 1);
1243 tcond = TCG_COND_NE;
1244 break;
1245 case 0x4: /* fbl : 1 */
1246 c2 = 1;
1247 tcond = TCG_COND_EQ;
1248 break;
1249 case 0x5: /* fbug : 2 or 3 */
1250 c2 = 2;
1251 tcond = TCG_COND_GEU;
1252 break;
1253 case 0x6: /* fbg : 2 */
1254 c2 = 2;
1255 tcond = TCG_COND_EQ;
1256 break;
1257 case 0x7: /* fbu : 3 */
1258 c2 = 3;
1259 tcond = TCG_COND_EQ;
1260 break;
1261 }
1262 if (cond & 8) {
1263 tcond = tcg_invert_cond(tcond);
1264 }
1265
1266 cmp->cond = tcond;
1267 cmp->c2 = c2;
1268 cmp->c1 = tcg_temp_new();
1269 tcg_gen_extu_i32_tl(cmp->c1, c1);
1270 }
1271
gen_compare_reg(DisasCompare * cmp,int cond,TCGv r_src)1272 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1273 {
1274 static const TCGCond cond_reg[4] = {
1275 TCG_COND_NEVER, /* reserved */
1276 TCG_COND_EQ,
1277 TCG_COND_LE,
1278 TCG_COND_LT,
1279 };
1280 TCGCond tcond;
1281
1282 if ((cond & 3) == 0) {
1283 return false;
1284 }
1285 tcond = cond_reg[cond & 3];
1286 if (cond & 4) {
1287 tcond = tcg_invert_cond(tcond);
1288 }
1289
1290 cmp->cond = tcond;
1291 cmp->c1 = tcg_temp_new();
1292 cmp->c2 = 0;
1293 tcg_gen_mov_tl(cmp->c1, r_src);
1294 return true;
1295 }
1296
gen_op_clear_ieee_excp_and_FTT(void)1297 static void gen_op_clear_ieee_excp_and_FTT(void)
1298 {
1299 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1300 offsetof(CPUSPARCState, fsr_cexc_ftt));
1301 }
1302
gen_op_fmovs(TCGv_i32 dst,TCGv_i32 src)1303 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1304 {
1305 gen_op_clear_ieee_excp_and_FTT();
1306 tcg_gen_mov_i32(dst, src);
1307 }
1308
gen_op_fnegs(TCGv_i32 dst,TCGv_i32 src)1309 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1310 {
1311 gen_op_clear_ieee_excp_and_FTT();
1312 tcg_gen_xori_i32(dst, src, 1u << 31);
1313 }
1314
gen_op_fabss(TCGv_i32 dst,TCGv_i32 src)1315 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1316 {
1317 gen_op_clear_ieee_excp_and_FTT();
1318 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1319 }
1320
gen_op_fmovd(TCGv_i64 dst,TCGv_i64 src)1321 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1322 {
1323 gen_op_clear_ieee_excp_and_FTT();
1324 tcg_gen_mov_i64(dst, src);
1325 }
1326
gen_op_fnegd(TCGv_i64 dst,TCGv_i64 src)1327 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1328 {
1329 gen_op_clear_ieee_excp_and_FTT();
1330 tcg_gen_xori_i64(dst, src, 1ull << 63);
1331 }
1332
gen_op_fabsd(TCGv_i64 dst,TCGv_i64 src)1333 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1334 {
1335 gen_op_clear_ieee_excp_and_FTT();
1336 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1337 }
1338
gen_op_fnegq(TCGv_i128 dst,TCGv_i128 src)1339 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1340 {
1341 TCGv_i64 l = tcg_temp_new_i64();
1342 TCGv_i64 h = tcg_temp_new_i64();
1343
1344 tcg_gen_extr_i128_i64(l, h, src);
1345 tcg_gen_xori_i64(h, h, 1ull << 63);
1346 tcg_gen_concat_i64_i128(dst, l, h);
1347 }
1348
gen_op_fabsq(TCGv_i128 dst,TCGv_i128 src)1349 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1350 {
1351 TCGv_i64 l = tcg_temp_new_i64();
1352 TCGv_i64 h = tcg_temp_new_i64();
1353
1354 tcg_gen_extr_i128_i64(l, h, src);
1355 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1356 tcg_gen_concat_i64_i128(dst, l, h);
1357 }
1358
gen_op_fmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1359 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1360 {
1361 TCGv_i32 z = tcg_constant_i32(0);
1362 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, z);
1363 }
1364
gen_op_fmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1365 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1366 {
1367 TCGv_i32 z = tcg_constant_i32(0);
1368 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, z);
1369 }
1370
gen_op_fmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1371 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1372 {
1373 TCGv_i32 z = tcg_constant_i32(0);
1374 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1375 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1376 }
1377
gen_op_fmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1378 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1379 {
1380 TCGv_i32 z = tcg_constant_i32(0);
1381 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1382 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1383 }
1384
gen_op_fnmsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1386 {
1387 TCGv_i32 z = tcg_constant_i32(0);
1388 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1389 float_muladd_negate_result);
1390 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1391 }
1392
gen_op_fnmsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1393 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1394 {
1395 TCGv_i32 z = tcg_constant_i32(0);
1396 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c |
1397 float_muladd_negate_result);
1398 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1399 }
1400
gen_op_fnmadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2,TCGv_i32 s3)1401 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1402 {
1403 TCGv_i32 z = tcg_constant_i32(0);
1404 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1405 gen_helper_fmadds(d, tcg_env, s1, s2, s3, z, op);
1406 }
1407
gen_op_fnmaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2,TCGv_i64 s3)1408 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1409 {
1410 TCGv_i32 z = tcg_constant_i32(0);
1411 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1412 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, z, op);
1413 }
1414
1415 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
gen_op_fhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1416 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1417 {
1418 TCGv_i32 fone = tcg_constant_i32(float32_one);
1419 TCGv_i32 mone = tcg_constant_i32(-1);
1420 TCGv_i32 op = tcg_constant_i32(0);
1421 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1422 }
1423
gen_op_fhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1424 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1425 {
1426 TCGv_i64 fone = tcg_constant_i64(float64_one);
1427 TCGv_i32 mone = tcg_constant_i32(-1);
1428 TCGv_i32 op = tcg_constant_i32(0);
1429 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1430 }
1431
1432 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
gen_op_fhsubs(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1433 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1434 {
1435 TCGv_i32 fone = tcg_constant_i32(float32_one);
1436 TCGv_i32 mone = tcg_constant_i32(-1);
1437 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1438 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1439 }
1440
gen_op_fhsubd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1441 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1442 {
1443 TCGv_i64 fone = tcg_constant_i64(float64_one);
1444 TCGv_i32 mone = tcg_constant_i32(-1);
1445 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_c);
1446 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1447 }
1448
1449 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
gen_op_fnhadds(TCGv_i32 d,TCGv_i32 s1,TCGv_i32 s2)1450 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1451 {
1452 TCGv_i32 fone = tcg_constant_i32(float32_one);
1453 TCGv_i32 mone = tcg_constant_i32(-1);
1454 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1455 gen_helper_fmadds(d, tcg_env, fone, s1, s2, mone, op);
1456 }
1457
gen_op_fnhaddd(TCGv_i64 d,TCGv_i64 s1,TCGv_i64 s2)1458 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1459 {
1460 TCGv_i64 fone = tcg_constant_i64(float64_one);
1461 TCGv_i32 mone = tcg_constant_i32(-1);
1462 TCGv_i32 op = tcg_constant_i32(float_muladd_negate_result);
1463 gen_helper_fmaddd(d, tcg_env, fone, s1, s2, mone, op);
1464 }
1465
gen_op_fpexception_im(DisasContext * dc,int ftt)1466 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1467 {
1468 /*
1469 * CEXC is only set when succesfully completing an FPop,
1470 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1471 * Thus we can simply store FTT into this field.
1472 */
1473 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1474 offsetof(CPUSPARCState, fsr_cexc_ftt));
1475 gen_exception(dc, TT_FP_EXCP);
1476 }
1477
gen_trap_ifnofpu(DisasContext * dc)1478 static bool gen_trap_ifnofpu(DisasContext *dc)
1479 {
1480 #if !defined(CONFIG_USER_ONLY)
1481 if (!dc->fpu_enabled) {
1482 gen_exception(dc, TT_NFPU_INSN);
1483 return true;
1484 }
1485 #endif
1486 return false;
1487 }
1488
gen_trap_iffpexception(DisasContext * dc)1489 static bool gen_trap_iffpexception(DisasContext *dc)
1490 {
1491 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
1492 /*
1493 * There are 3 states for the sparc32 fpu:
1494 * Normally the fpu is in fp_execute, and all insns are allowed.
1495 * When an exception is signaled, it moves to fp_exception_pending state.
1496 * Upon seeing the next FPop, the fpu moves to fp_exception state,
1497 * populates the FQ, and generates an fp_exception trap.
1498 * The fpu remains in fp_exception state until FQ becomes empty
1499 * after execution of a STDFQ instruction. While the fpu is in
1500 * fp_exception state, and FPop, fp load or fp branch insn will
1501 * return to fp_exception_pending state, set FSR.FTT to sequence_error,
1502 * and the insn will not be entered into the FQ.
1503 *
1504 * In QEMU, we do not model the fp_exception_pending state and
1505 * instead populate FQ and raise the exception immediately.
1506 * But we can still honor fp_exception state by noticing when
1507 * the FQ is not empty.
1508 */
1509 if (dc->fsr_qne) {
1510 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
1511 return true;
1512 }
1513 #endif
1514 return false;
1515 }
1516
gen_trap_if_nofpu_fpexception(DisasContext * dc)1517 static bool gen_trap_if_nofpu_fpexception(DisasContext *dc)
1518 {
1519 return gen_trap_ifnofpu(dc) || gen_trap_iffpexception(dc);
1520 }
1521
1522 /* asi moves */
1523 typedef enum {
1524 GET_ASI_HELPER,
1525 GET_ASI_EXCP,
1526 GET_ASI_DIRECT,
1527 GET_ASI_DTWINX,
1528 GET_ASI_CODE,
1529 GET_ASI_BLOCK,
1530 GET_ASI_SHORT,
1531 GET_ASI_BCOPY,
1532 GET_ASI_BFILL,
1533 } ASIType;
1534
1535 typedef struct {
1536 ASIType type;
1537 int asi;
1538 int mem_idx;
1539 MemOp memop;
1540 } DisasASI;
1541
1542 /*
1543 * Build DisasASI.
1544 * For asi == -1, treat as non-asi.
1545 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1546 */
resolve_asi(DisasContext * dc,int asi,MemOp memop)1547 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1548 {
1549 ASIType type = GET_ASI_HELPER;
1550 int mem_idx = dc->mem_idx;
1551
1552 if (asi == -1) {
1553 /* Artificial "non-asi" case. */
1554 type = GET_ASI_DIRECT;
1555 goto done;
1556 }
1557
1558 #ifndef TARGET_SPARC64
1559 /* Before v9, all asis are immediate and privileged. */
1560 if (asi < 0) {
1561 gen_exception(dc, TT_ILL_INSN);
1562 type = GET_ASI_EXCP;
1563 } else if (supervisor(dc)
1564 /* Note that LEON accepts ASI_USERDATA in user mode, for
1565 use with CASA. Also note that previous versions of
1566 QEMU allowed (and old versions of gcc emitted) ASI_P
1567 for LEON, which is incorrect. */
1568 || (asi == ASI_USERDATA
1569 && (dc->def->features & CPU_FEATURE_CASA))) {
1570 switch (asi) {
1571 case ASI_USERDATA: /* User data access */
1572 mem_idx = MMU_USER_IDX;
1573 type = GET_ASI_DIRECT;
1574 break;
1575 case ASI_KERNELDATA: /* Supervisor data access */
1576 mem_idx = MMU_KERNEL_IDX;
1577 type = GET_ASI_DIRECT;
1578 break;
1579 case ASI_USERTXT: /* User text access */
1580 mem_idx = MMU_USER_IDX;
1581 type = GET_ASI_CODE;
1582 break;
1583 case ASI_KERNELTXT: /* Supervisor text access */
1584 mem_idx = MMU_KERNEL_IDX;
1585 type = GET_ASI_CODE;
1586 break;
1587 case ASI_M_BYPASS: /* MMU passthrough */
1588 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1589 mem_idx = MMU_PHYS_IDX;
1590 type = GET_ASI_DIRECT;
1591 break;
1592 case ASI_M_BCOPY: /* Block copy, sta access */
1593 mem_idx = MMU_KERNEL_IDX;
1594 type = GET_ASI_BCOPY;
1595 break;
1596 case ASI_M_BFILL: /* Block fill, stda access */
1597 mem_idx = MMU_KERNEL_IDX;
1598 type = GET_ASI_BFILL;
1599 break;
1600 }
1601
1602 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1603 * permissions check in get_physical_address(..).
1604 */
1605 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1606 } else {
1607 gen_exception(dc, TT_PRIV_INSN);
1608 type = GET_ASI_EXCP;
1609 }
1610 #else
1611 if (asi < 0) {
1612 asi = dc->asi;
1613 }
1614 /* With v9, all asis below 0x80 are privileged. */
1615 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1616 down that bit into DisasContext. For the moment that's ok,
1617 since the direct implementations below doesn't have any ASIs
1618 in the restricted [0x30, 0x7f] range, and the check will be
1619 done properly in the helper. */
1620 if (!supervisor(dc) && asi < 0x80) {
1621 gen_exception(dc, TT_PRIV_ACT);
1622 type = GET_ASI_EXCP;
1623 } else {
1624 switch (asi) {
1625 case ASI_REAL: /* Bypass */
1626 case ASI_REAL_IO: /* Bypass, non-cacheable */
1627 case ASI_REAL_L: /* Bypass LE */
1628 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1629 case ASI_TWINX_REAL: /* Real address, twinx */
1630 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1631 case ASI_QUAD_LDD_PHYS:
1632 case ASI_QUAD_LDD_PHYS_L:
1633 mem_idx = MMU_PHYS_IDX;
1634 break;
1635 case ASI_N: /* Nucleus */
1636 case ASI_NL: /* Nucleus LE */
1637 case ASI_TWINX_N:
1638 case ASI_TWINX_NL:
1639 case ASI_NUCLEUS_QUAD_LDD:
1640 case ASI_NUCLEUS_QUAD_LDD_L:
1641 if (hypervisor(dc)) {
1642 mem_idx = MMU_PHYS_IDX;
1643 } else {
1644 mem_idx = MMU_NUCLEUS_IDX;
1645 }
1646 break;
1647 case ASI_AIUP: /* As if user primary */
1648 case ASI_AIUPL: /* As if user primary LE */
1649 case ASI_TWINX_AIUP:
1650 case ASI_TWINX_AIUP_L:
1651 case ASI_BLK_AIUP_4V:
1652 case ASI_BLK_AIUP_L_4V:
1653 case ASI_BLK_AIUP:
1654 case ASI_BLK_AIUPL:
1655 case ASI_MON_AIUP:
1656 mem_idx = MMU_USER_IDX;
1657 break;
1658 case ASI_AIUS: /* As if user secondary */
1659 case ASI_AIUSL: /* As if user secondary LE */
1660 case ASI_TWINX_AIUS:
1661 case ASI_TWINX_AIUS_L:
1662 case ASI_BLK_AIUS_4V:
1663 case ASI_BLK_AIUS_L_4V:
1664 case ASI_BLK_AIUS:
1665 case ASI_BLK_AIUSL:
1666 case ASI_MON_AIUS:
1667 mem_idx = MMU_USER_SECONDARY_IDX;
1668 break;
1669 case ASI_S: /* Secondary */
1670 case ASI_SL: /* Secondary LE */
1671 case ASI_TWINX_S:
1672 case ASI_TWINX_SL:
1673 case ASI_BLK_COMMIT_S:
1674 case ASI_BLK_S:
1675 case ASI_BLK_SL:
1676 case ASI_FL8_S:
1677 case ASI_FL8_SL:
1678 case ASI_FL16_S:
1679 case ASI_FL16_SL:
1680 case ASI_MON_S:
1681 if (mem_idx == MMU_USER_IDX) {
1682 mem_idx = MMU_USER_SECONDARY_IDX;
1683 } else if (mem_idx == MMU_KERNEL_IDX) {
1684 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1685 }
1686 break;
1687 case ASI_P: /* Primary */
1688 case ASI_PL: /* Primary LE */
1689 case ASI_TWINX_P:
1690 case ASI_TWINX_PL:
1691 case ASI_BLK_COMMIT_P:
1692 case ASI_BLK_P:
1693 case ASI_BLK_PL:
1694 case ASI_FL8_P:
1695 case ASI_FL8_PL:
1696 case ASI_FL16_P:
1697 case ASI_FL16_PL:
1698 case ASI_MON_P:
1699 break;
1700 }
1701 switch (asi) {
1702 case ASI_REAL:
1703 case ASI_REAL_IO:
1704 case ASI_REAL_L:
1705 case ASI_REAL_IO_L:
1706 case ASI_N:
1707 case ASI_NL:
1708 case ASI_AIUP:
1709 case ASI_AIUPL:
1710 case ASI_AIUS:
1711 case ASI_AIUSL:
1712 case ASI_S:
1713 case ASI_SL:
1714 case ASI_P:
1715 case ASI_PL:
1716 case ASI_MON_P:
1717 case ASI_MON_S:
1718 case ASI_MON_AIUP:
1719 case ASI_MON_AIUS:
1720 type = GET_ASI_DIRECT;
1721 break;
1722 case ASI_TWINX_REAL:
1723 case ASI_TWINX_REAL_L:
1724 case ASI_TWINX_N:
1725 case ASI_TWINX_NL:
1726 case ASI_TWINX_AIUP:
1727 case ASI_TWINX_AIUP_L:
1728 case ASI_TWINX_AIUS:
1729 case ASI_TWINX_AIUS_L:
1730 case ASI_TWINX_P:
1731 case ASI_TWINX_PL:
1732 case ASI_TWINX_S:
1733 case ASI_TWINX_SL:
1734 case ASI_QUAD_LDD_PHYS:
1735 case ASI_QUAD_LDD_PHYS_L:
1736 case ASI_NUCLEUS_QUAD_LDD:
1737 case ASI_NUCLEUS_QUAD_LDD_L:
1738 type = GET_ASI_DTWINX;
1739 break;
1740 case ASI_BLK_COMMIT_P:
1741 case ASI_BLK_COMMIT_S:
1742 case ASI_BLK_AIUP_4V:
1743 case ASI_BLK_AIUP_L_4V:
1744 case ASI_BLK_AIUP:
1745 case ASI_BLK_AIUPL:
1746 case ASI_BLK_AIUS_4V:
1747 case ASI_BLK_AIUS_L_4V:
1748 case ASI_BLK_AIUS:
1749 case ASI_BLK_AIUSL:
1750 case ASI_BLK_S:
1751 case ASI_BLK_SL:
1752 case ASI_BLK_P:
1753 case ASI_BLK_PL:
1754 type = GET_ASI_BLOCK;
1755 break;
1756 case ASI_FL8_S:
1757 case ASI_FL8_SL:
1758 case ASI_FL8_P:
1759 case ASI_FL8_PL:
1760 memop = MO_UB;
1761 type = GET_ASI_SHORT;
1762 break;
1763 case ASI_FL16_S:
1764 case ASI_FL16_SL:
1765 case ASI_FL16_P:
1766 case ASI_FL16_PL:
1767 memop = MO_TEUW;
1768 type = GET_ASI_SHORT;
1769 break;
1770 }
1771 /* The little-endian asis all have bit 3 set. */
1772 if (asi & 8) {
1773 memop ^= MO_BSWAP;
1774 }
1775 }
1776 #endif
1777
1778 done:
1779 return (DisasASI){ type, asi, mem_idx, memop };
1780 }
1781
1782 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
gen_helper_ld_asi(TCGv_i64 r,TCGv_env e,TCGv a,TCGv_i32 asi,TCGv_i32 mop)1783 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1784 TCGv_i32 asi, TCGv_i32 mop)
1785 {
1786 g_assert_not_reached();
1787 }
1788
gen_helper_st_asi(TCGv_env e,TCGv a,TCGv_i64 r,TCGv_i32 asi,TCGv_i32 mop)1789 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1790 TCGv_i32 asi, TCGv_i32 mop)
1791 {
1792 g_assert_not_reached();
1793 }
1794 #endif
1795
gen_ld_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1796 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1797 {
1798 switch (da->type) {
1799 case GET_ASI_EXCP:
1800 break;
1801 case GET_ASI_DTWINX: /* Reserved for ldda. */
1802 gen_exception(dc, TT_ILL_INSN);
1803 break;
1804 case GET_ASI_DIRECT:
1805 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1806 break;
1807
1808 case GET_ASI_CODE:
1809 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1810 {
1811 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1812 TCGv_i64 t64 = tcg_temp_new_i64();
1813
1814 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1815 tcg_gen_trunc_i64_tl(dst, t64);
1816 }
1817 break;
1818 #else
1819 g_assert_not_reached();
1820 #endif
1821
1822 default:
1823 {
1824 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1825 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1826
1827 save_state(dc);
1828 #ifdef TARGET_SPARC64
1829 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1830 #else
1831 {
1832 TCGv_i64 t64 = tcg_temp_new_i64();
1833 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1834 tcg_gen_trunc_i64_tl(dst, t64);
1835 }
1836 #endif
1837 }
1838 break;
1839 }
1840 }
1841
gen_st_asi(DisasContext * dc,DisasASI * da,TCGv src,TCGv addr)1842 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1843 {
1844 switch (da->type) {
1845 case GET_ASI_EXCP:
1846 break;
1847
1848 case GET_ASI_DTWINX: /* Reserved for stda. */
1849 if (TARGET_LONG_BITS == 32) {
1850 gen_exception(dc, TT_ILL_INSN);
1851 break;
1852 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1853 /* Pre OpenSPARC CPUs don't have these */
1854 gen_exception(dc, TT_ILL_INSN);
1855 break;
1856 }
1857 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1858 /* fall through */
1859
1860 case GET_ASI_DIRECT:
1861 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1862 break;
1863
1864 case GET_ASI_BCOPY:
1865 assert(TARGET_LONG_BITS == 32);
1866 /*
1867 * Copy 32 bytes from the address in SRC to ADDR.
1868 *
1869 * From Ross RT625 hyperSPARC manual, section 4.6:
1870 * "Block Copy and Block Fill will work only on cache line boundaries."
1871 *
1872 * It does not specify if an unaliged address is truncated or trapped.
1873 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1874 * is obviously wrong. The only place I can see this used is in the
1875 * Linux kernel which begins with page alignment, advancing by 32,
1876 * so is always aligned. Assume truncation as the simpler option.
1877 *
1878 * Since the loads and stores are paired, allow the copy to happen
1879 * in the host endianness. The copy need not be atomic.
1880 */
1881 {
1882 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1883 TCGv saddr = tcg_temp_new();
1884 TCGv daddr = tcg_temp_new();
1885 TCGv_i128 tmp = tcg_temp_new_i128();
1886
1887 tcg_gen_andi_tl(saddr, src, -32);
1888 tcg_gen_andi_tl(daddr, addr, -32);
1889 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1890 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1891 tcg_gen_addi_tl(saddr, saddr, 16);
1892 tcg_gen_addi_tl(daddr, daddr, 16);
1893 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1894 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1895 }
1896 break;
1897
1898 default:
1899 {
1900 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1901 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1902
1903 save_state(dc);
1904 #ifdef TARGET_SPARC64
1905 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1906 #else
1907 {
1908 TCGv_i64 t64 = tcg_temp_new_i64();
1909 tcg_gen_extu_tl_i64(t64, src);
1910 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1911 }
1912 #endif
1913
1914 /* A write to a TLB register may alter page maps. End the TB. */
1915 dc->npc = DYNAMIC_PC;
1916 }
1917 break;
1918 }
1919 }
1920
gen_swap_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv src,TCGv addr)1921 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1922 TCGv dst, TCGv src, TCGv addr)
1923 {
1924 switch (da->type) {
1925 case GET_ASI_EXCP:
1926 break;
1927 case GET_ASI_DIRECT:
1928 tcg_gen_atomic_xchg_tl(dst, addr, src,
1929 da->mem_idx, da->memop | MO_ALIGN);
1930 break;
1931 default:
1932 /* ??? Should be DAE_invalid_asi. */
1933 gen_exception(dc, TT_DATA_ACCESS);
1934 break;
1935 }
1936 }
1937
gen_cas_asi(DisasContext * dc,DisasASI * da,TCGv oldv,TCGv newv,TCGv cmpv,TCGv addr)1938 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1939 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1940 {
1941 switch (da->type) {
1942 case GET_ASI_EXCP:
1943 return;
1944 case GET_ASI_DIRECT:
1945 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1946 da->mem_idx, da->memop | MO_ALIGN);
1947 break;
1948 default:
1949 /* ??? Should be DAE_invalid_asi. */
1950 gen_exception(dc, TT_DATA_ACCESS);
1951 break;
1952 }
1953 }
1954
gen_ldstub_asi(DisasContext * dc,DisasASI * da,TCGv dst,TCGv addr)1955 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1956 {
1957 switch (da->type) {
1958 case GET_ASI_EXCP:
1959 break;
1960 case GET_ASI_DIRECT:
1961 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1962 da->mem_idx, MO_UB);
1963 break;
1964 default:
1965 /* ??? In theory, this should be raise DAE_invalid_asi.
1966 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1967 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1968 gen_helper_exit_atomic(tcg_env);
1969 } else {
1970 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1971 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1972 TCGv_i64 s64, t64;
1973
1974 save_state(dc);
1975 t64 = tcg_temp_new_i64();
1976 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1977
1978 s64 = tcg_constant_i64(0xff);
1979 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1980
1981 tcg_gen_trunc_i64_tl(dst, t64);
1982
1983 /* End the TB. */
1984 dc->npc = DYNAMIC_PC;
1985 }
1986 break;
1987 }
1988 }
1989
gen_ldf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)1990 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1991 TCGv addr, int rd)
1992 {
1993 MemOp memop = da->memop;
1994 MemOp size = memop & MO_SIZE;
1995 TCGv_i32 d32;
1996 TCGv_i64 d64, l64;
1997 TCGv addr_tmp;
1998
1999 /* TODO: Use 128-bit load/store below. */
2000 if (size == MO_128) {
2001 memop = (memop & ~MO_SIZE) | MO_64;
2002 }
2003
2004 switch (da->type) {
2005 case GET_ASI_EXCP:
2006 break;
2007
2008 case GET_ASI_DIRECT:
2009 memop |= MO_ALIGN_4;
2010 switch (size) {
2011 case MO_32:
2012 d32 = tcg_temp_new_i32();
2013 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
2014 gen_store_fpr_F(dc, rd, d32);
2015 break;
2016
2017 case MO_64:
2018 d64 = tcg_temp_new_i64();
2019 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2020 gen_store_fpr_D(dc, rd, d64);
2021 break;
2022
2023 case MO_128:
2024 d64 = tcg_temp_new_i64();
2025 l64 = tcg_temp_new_i64();
2026 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
2027 addr_tmp = tcg_temp_new();
2028 tcg_gen_addi_tl(addr_tmp, addr, 8);
2029 tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
2030 gen_store_fpr_D(dc, rd, d64);
2031 gen_store_fpr_D(dc, rd + 2, l64);
2032 break;
2033 default:
2034 g_assert_not_reached();
2035 }
2036 break;
2037
2038 case GET_ASI_BLOCK:
2039 /* Valid for lddfa on aligned registers only. */
2040 if (orig_size == MO_64 && (rd & 7) == 0) {
2041 /* The first operation checks required alignment. */
2042 addr_tmp = tcg_temp_new();
2043 d64 = tcg_temp_new_i64();
2044 for (int i = 0; ; ++i) {
2045 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2046 memop | (i == 0 ? MO_ALIGN_64 : 0));
2047 gen_store_fpr_D(dc, rd + 2 * i, d64);
2048 if (i == 7) {
2049 break;
2050 }
2051 tcg_gen_addi_tl(addr_tmp, addr, 8);
2052 addr = addr_tmp;
2053 }
2054 } else {
2055 gen_exception(dc, TT_ILL_INSN);
2056 }
2057 break;
2058
2059 case GET_ASI_SHORT:
2060 /* Valid for lddfa only. */
2061 if (orig_size == MO_64) {
2062 d64 = tcg_temp_new_i64();
2063 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2064 gen_store_fpr_D(dc, rd, d64);
2065 } else {
2066 gen_exception(dc, TT_ILL_INSN);
2067 }
2068 break;
2069
2070 default:
2071 {
2072 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2073 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2074
2075 save_state(dc);
2076 /* According to the table in the UA2011 manual, the only
2077 other asis that are valid for ldfa/lddfa/ldqfa are
2078 the NO_FAULT asis. We still need a helper for these,
2079 but we can just use the integer asi helper for them. */
2080 switch (size) {
2081 case MO_32:
2082 d64 = tcg_temp_new_i64();
2083 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2084 d32 = tcg_temp_new_i32();
2085 tcg_gen_extrl_i64_i32(d32, d64);
2086 gen_store_fpr_F(dc, rd, d32);
2087 break;
2088 case MO_64:
2089 d64 = tcg_temp_new_i64();
2090 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2091 gen_store_fpr_D(dc, rd, d64);
2092 break;
2093 case MO_128:
2094 d64 = tcg_temp_new_i64();
2095 l64 = tcg_temp_new_i64();
2096 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2097 addr_tmp = tcg_temp_new();
2098 tcg_gen_addi_tl(addr_tmp, addr, 8);
2099 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2100 gen_store_fpr_D(dc, rd, d64);
2101 gen_store_fpr_D(dc, rd + 2, l64);
2102 break;
2103 default:
2104 g_assert_not_reached();
2105 }
2106 }
2107 break;
2108 }
2109 }
2110
gen_stf_asi(DisasContext * dc,DisasASI * da,MemOp orig_size,TCGv addr,int rd)2111 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2112 TCGv addr, int rd)
2113 {
2114 MemOp memop = da->memop;
2115 MemOp size = memop & MO_SIZE;
2116 TCGv_i32 d32;
2117 TCGv_i64 d64;
2118 TCGv addr_tmp;
2119
2120 /* TODO: Use 128-bit load/store below. */
2121 if (size == MO_128) {
2122 memop = (memop & ~MO_SIZE) | MO_64;
2123 }
2124
2125 switch (da->type) {
2126 case GET_ASI_EXCP:
2127 break;
2128
2129 case GET_ASI_DIRECT:
2130 memop |= MO_ALIGN_4;
2131 switch (size) {
2132 case MO_32:
2133 d32 = gen_load_fpr_F(dc, rd);
2134 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2135 break;
2136 case MO_64:
2137 d64 = gen_load_fpr_D(dc, rd);
2138 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2139 break;
2140 case MO_128:
2141 /* Only 4-byte alignment required. However, it is legal for the
2142 cpu to signal the alignment fault, and the OS trap handler is
2143 required to fix it up. Requiring 16-byte alignment here avoids
2144 having to probe the second page before performing the first
2145 write. */
2146 d64 = gen_load_fpr_D(dc, rd);
2147 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2148 addr_tmp = tcg_temp_new();
2149 tcg_gen_addi_tl(addr_tmp, addr, 8);
2150 d64 = gen_load_fpr_D(dc, rd + 2);
2151 tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2152 break;
2153 default:
2154 g_assert_not_reached();
2155 }
2156 break;
2157
2158 case GET_ASI_BLOCK:
2159 /* Valid for stdfa on aligned registers only. */
2160 if (orig_size == MO_64 && (rd & 7) == 0) {
2161 /* The first operation checks required alignment. */
2162 addr_tmp = tcg_temp_new();
2163 for (int i = 0; ; ++i) {
2164 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2165 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2166 memop | (i == 0 ? MO_ALIGN_64 : 0));
2167 if (i == 7) {
2168 break;
2169 }
2170 tcg_gen_addi_tl(addr_tmp, addr, 8);
2171 addr = addr_tmp;
2172 }
2173 } else {
2174 gen_exception(dc, TT_ILL_INSN);
2175 }
2176 break;
2177
2178 case GET_ASI_SHORT:
2179 /* Valid for stdfa only. */
2180 if (orig_size == MO_64) {
2181 d64 = gen_load_fpr_D(dc, rd);
2182 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2183 } else {
2184 gen_exception(dc, TT_ILL_INSN);
2185 }
2186 break;
2187
2188 default:
2189 /* According to the table in the UA2011 manual, the only
2190 other asis that are valid for ldfa/lddfa/ldqfa are
2191 the PST* asis, which aren't currently handled. */
2192 gen_exception(dc, TT_ILL_INSN);
2193 break;
2194 }
2195 }
2196
gen_ldda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2197 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2198 {
2199 TCGv hi = gen_dest_gpr(dc, rd);
2200 TCGv lo = gen_dest_gpr(dc, rd + 1);
2201
2202 switch (da->type) {
2203 case GET_ASI_EXCP:
2204 return;
2205
2206 case GET_ASI_DTWINX:
2207 #ifdef TARGET_SPARC64
2208 {
2209 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2210 TCGv_i128 t = tcg_temp_new_i128();
2211
2212 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2213 /*
2214 * Note that LE twinx acts as if each 64-bit register result is
2215 * byte swapped. We perform one 128-bit LE load, so must swap
2216 * the order of the writebacks.
2217 */
2218 if ((mop & MO_BSWAP) == MO_TE) {
2219 tcg_gen_extr_i128_i64(lo, hi, t);
2220 } else {
2221 tcg_gen_extr_i128_i64(hi, lo, t);
2222 }
2223 }
2224 break;
2225 #else
2226 g_assert_not_reached();
2227 #endif
2228
2229 case GET_ASI_DIRECT:
2230 {
2231 TCGv_i64 tmp = tcg_temp_new_i64();
2232
2233 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2234
2235 /* Note that LE ldda acts as if each 32-bit register
2236 result is byte swapped. Having just performed one
2237 64-bit bswap, we need now to swap the writebacks. */
2238 if ((da->memop & MO_BSWAP) == MO_TE) {
2239 tcg_gen_extr_i64_tl(lo, hi, tmp);
2240 } else {
2241 tcg_gen_extr_i64_tl(hi, lo, tmp);
2242 }
2243 }
2244 break;
2245
2246 case GET_ASI_CODE:
2247 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2248 {
2249 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2250 TCGv_i64 tmp = tcg_temp_new_i64();
2251
2252 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2253
2254 /* See above. */
2255 if ((da->memop & MO_BSWAP) == MO_TE) {
2256 tcg_gen_extr_i64_tl(lo, hi, tmp);
2257 } else {
2258 tcg_gen_extr_i64_tl(hi, lo, tmp);
2259 }
2260 }
2261 break;
2262 #else
2263 g_assert_not_reached();
2264 #endif
2265
2266 default:
2267 /* ??? In theory we've handled all of the ASIs that are valid
2268 for ldda, and this should raise DAE_invalid_asi. However,
2269 real hardware allows others. This can be seen with e.g.
2270 FreeBSD 10.3 wrt ASI_IC_TAG. */
2271 {
2272 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2273 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2274 TCGv_i64 tmp = tcg_temp_new_i64();
2275
2276 save_state(dc);
2277 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2278
2279 /* See above. */
2280 if ((da->memop & MO_BSWAP) == MO_TE) {
2281 tcg_gen_extr_i64_tl(lo, hi, tmp);
2282 } else {
2283 tcg_gen_extr_i64_tl(hi, lo, tmp);
2284 }
2285 }
2286 break;
2287 }
2288
2289 gen_store_gpr(dc, rd, hi);
2290 gen_store_gpr(dc, rd + 1, lo);
2291 }
2292
gen_stda_asi(DisasContext * dc,DisasASI * da,TCGv addr,int rd)2293 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2294 {
2295 TCGv hi = gen_load_gpr(dc, rd);
2296 TCGv lo = gen_load_gpr(dc, rd + 1);
2297
2298 switch (da->type) {
2299 case GET_ASI_EXCP:
2300 break;
2301
2302 case GET_ASI_DTWINX:
2303 #ifdef TARGET_SPARC64
2304 {
2305 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2306 TCGv_i128 t = tcg_temp_new_i128();
2307
2308 /*
2309 * Note that LE twinx acts as if each 64-bit register result is
2310 * byte swapped. We perform one 128-bit LE store, so must swap
2311 * the order of the construction.
2312 */
2313 if ((mop & MO_BSWAP) == MO_TE) {
2314 tcg_gen_concat_i64_i128(t, lo, hi);
2315 } else {
2316 tcg_gen_concat_i64_i128(t, hi, lo);
2317 }
2318 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2319 }
2320 break;
2321 #else
2322 g_assert_not_reached();
2323 #endif
2324
2325 case GET_ASI_DIRECT:
2326 {
2327 TCGv_i64 t64 = tcg_temp_new_i64();
2328
2329 /* Note that LE stda acts as if each 32-bit register result is
2330 byte swapped. We will perform one 64-bit LE store, so now
2331 we must swap the order of the construction. */
2332 if ((da->memop & MO_BSWAP) == MO_TE) {
2333 tcg_gen_concat_tl_i64(t64, lo, hi);
2334 } else {
2335 tcg_gen_concat_tl_i64(t64, hi, lo);
2336 }
2337 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2338 }
2339 break;
2340
2341 case GET_ASI_BFILL:
2342 assert(TARGET_LONG_BITS == 32);
2343 /*
2344 * Store 32 bytes of [rd:rd+1] to ADDR.
2345 * See comments for GET_ASI_COPY above.
2346 */
2347 {
2348 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2349 TCGv_i64 t8 = tcg_temp_new_i64();
2350 TCGv_i128 t16 = tcg_temp_new_i128();
2351 TCGv daddr = tcg_temp_new();
2352
2353 tcg_gen_concat_tl_i64(t8, lo, hi);
2354 tcg_gen_concat_i64_i128(t16, t8, t8);
2355 tcg_gen_andi_tl(daddr, addr, -32);
2356 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2357 tcg_gen_addi_tl(daddr, daddr, 16);
2358 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2359 }
2360 break;
2361
2362 default:
2363 /* ??? In theory we've handled all of the ASIs that are valid
2364 for stda, and this should raise DAE_invalid_asi. */
2365 {
2366 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2367 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2368 TCGv_i64 t64 = tcg_temp_new_i64();
2369
2370 /* See above. */
2371 if ((da->memop & MO_BSWAP) == MO_TE) {
2372 tcg_gen_concat_tl_i64(t64, lo, hi);
2373 } else {
2374 tcg_gen_concat_tl_i64(t64, hi, lo);
2375 }
2376
2377 save_state(dc);
2378 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2379 }
2380 break;
2381 }
2382 }
2383
gen_fmovs(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2384 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2385 {
2386 #ifdef TARGET_SPARC64
2387 TCGv_i32 c32, zero, dst, s1, s2;
2388 TCGv_i64 c64 = tcg_temp_new_i64();
2389
2390 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2391 or fold the comparison down to 32 bits and use movcond_i32. Choose
2392 the later. */
2393 c32 = tcg_temp_new_i32();
2394 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2395 tcg_gen_extrl_i64_i32(c32, c64);
2396
2397 s1 = gen_load_fpr_F(dc, rs);
2398 s2 = gen_load_fpr_F(dc, rd);
2399 dst = tcg_temp_new_i32();
2400 zero = tcg_constant_i32(0);
2401
2402 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2403
2404 gen_store_fpr_F(dc, rd, dst);
2405 #else
2406 qemu_build_not_reached();
2407 #endif
2408 }
2409
gen_fmovd(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2410 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2411 {
2412 #ifdef TARGET_SPARC64
2413 TCGv_i64 dst = tcg_temp_new_i64();
2414 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2415 gen_load_fpr_D(dc, rs),
2416 gen_load_fpr_D(dc, rd));
2417 gen_store_fpr_D(dc, rd, dst);
2418 #else
2419 qemu_build_not_reached();
2420 #endif
2421 }
2422
gen_fmovq(DisasContext * dc,DisasCompare * cmp,int rd,int rs)2423 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2424 {
2425 #ifdef TARGET_SPARC64
2426 TCGv c2 = tcg_constant_tl(cmp->c2);
2427 TCGv_i64 h = tcg_temp_new_i64();
2428 TCGv_i64 l = tcg_temp_new_i64();
2429
2430 tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2431 gen_load_fpr_D(dc, rs),
2432 gen_load_fpr_D(dc, rd));
2433 tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2434 gen_load_fpr_D(dc, rs + 2),
2435 gen_load_fpr_D(dc, rd + 2));
2436 gen_store_fpr_D(dc, rd, h);
2437 gen_store_fpr_D(dc, rd + 2, l);
2438 #else
2439 qemu_build_not_reached();
2440 #endif
2441 }
2442
2443 #ifdef TARGET_SPARC64
gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)2444 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2445 {
2446 TCGv_i32 r_tl = tcg_temp_new_i32();
2447
2448 /* load env->tl into r_tl */
2449 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2450
2451 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2452 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2453
2454 /* calculate offset to current trap state from env->ts, reuse r_tl */
2455 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2456 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2457
2458 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2459 {
2460 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2461 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2462 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2463 }
2464 }
2465 #endif
2466
extract_dfpreg(DisasContext * dc,int x)2467 static int extract_dfpreg(DisasContext *dc, int x)
2468 {
2469 int r = x & 0x1e;
2470 #ifdef TARGET_SPARC64
2471 r |= (x & 1) << 5;
2472 #endif
2473 return r;
2474 }
2475
extract_qfpreg(DisasContext * dc,int x)2476 static int extract_qfpreg(DisasContext *dc, int x)
2477 {
2478 int r = x & 0x1c;
2479 #ifdef TARGET_SPARC64
2480 r |= (x & 1) << 5;
2481 #endif
2482 return r;
2483 }
2484
2485 /* Include the auto-generated decoder. */
2486 #include "decode-insns.c.inc"
2487
2488 #define TRANS(NAME, AVAIL, FUNC, ...) \
2489 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2490 { return avail_##AVAIL(dc) && FUNC(dc, ## __VA_ARGS__); }
2491
2492 #define avail_ALL(C) true
2493 #ifdef TARGET_SPARC64
2494 # define avail_32(C) false
2495 # define avail_ASR17(C) false
2496 # define avail_CASA(C) true
2497 # define avail_DIV(C) true
2498 # define avail_MUL(C) true
2499 # define avail_POWERDOWN(C) false
2500 # define avail_64(C) true
2501 # define avail_FMAF(C) ((C)->def->features & CPU_FEATURE_FMAF)
2502 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2503 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2504 # define avail_IMA(C) ((C)->def->features & CPU_FEATURE_IMA)
2505 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2506 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2507 # define avail_VIS3(C) ((C)->def->features & CPU_FEATURE_VIS3)
2508 # define avail_VIS3B(C) avail_VIS3(C)
2509 # define avail_VIS4(C) ((C)->def->features & CPU_FEATURE_VIS4)
2510 #else
2511 # define avail_32(C) true
2512 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2513 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2514 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2515 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2516 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2517 # define avail_64(C) false
2518 # define avail_FMAF(C) false
2519 # define avail_GL(C) false
2520 # define avail_HYPV(C) false
2521 # define avail_IMA(C) false
2522 # define avail_VIS1(C) false
2523 # define avail_VIS2(C) false
2524 # define avail_VIS3(C) false
2525 # define avail_VIS3B(C) false
2526 # define avail_VIS4(C) false
2527 #endif
2528
2529 /*
2530 * We decoded bit 13 as imm, and bits [12:0] as rs2_or_imm.
2531 * For v9, if !imm, then the unused bits [12:5] must be zero.
2532 * For v7 and v8, the unused bits are ignored; clear them here.
2533 */
check_rs2(DisasContext * dc,int * rs2)2534 static bool check_rs2(DisasContext *dc, int *rs2)
2535 {
2536 if (unlikely(*rs2 & ~0x1f)) {
2537 if (avail_64(dc)) {
2538 return false;
2539 }
2540 *rs2 &= 0x1f;
2541 }
2542 return true;
2543 }
2544
check_r_r_ri(DisasContext * dc,arg_r_r_ri * a)2545 static bool check_r_r_ri(DisasContext *dc, arg_r_r_ri *a)
2546 {
2547 return a->imm || check_rs2(dc, &a->rs2_or_imm);
2548 }
2549
check_r_r_ri_cc(DisasContext * dc,arg_r_r_ri_cc * a)2550 static bool check_r_r_ri_cc(DisasContext *dc, arg_r_r_ri_cc *a)
2551 {
2552 return a->imm || check_rs2(dc, &a->rs2_or_imm);
2553 }
2554
2555 /* Default case for non jump instructions. */
advance_pc(DisasContext * dc)2556 static bool advance_pc(DisasContext *dc)
2557 {
2558 TCGLabel *l1;
2559
2560 finishing_insn(dc);
2561
2562 if (dc->npc & 3) {
2563 switch (dc->npc) {
2564 case DYNAMIC_PC:
2565 case DYNAMIC_PC_LOOKUP:
2566 dc->pc = dc->npc;
2567 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2568 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2569 break;
2570
2571 case JUMP_PC:
2572 /* we can do a static jump */
2573 l1 = gen_new_label();
2574 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2575
2576 /* jump not taken */
2577 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2578
2579 /* jump taken */
2580 gen_set_label(l1);
2581 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2582
2583 dc->base.is_jmp = DISAS_NORETURN;
2584 break;
2585
2586 default:
2587 g_assert_not_reached();
2588 }
2589 } else {
2590 dc->pc = dc->npc;
2591 dc->npc = dc->npc + 4;
2592 }
2593 return true;
2594 }
2595
2596 /*
2597 * Major opcodes 00 and 01 -- branches, call, and sethi
2598 */
2599
advance_jump_cond(DisasContext * dc,DisasCompare * cmp,bool annul,int disp)2600 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2601 bool annul, int disp)
2602 {
2603 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2604 target_ulong npc;
2605
2606 finishing_insn(dc);
2607
2608 if (cmp->cond == TCG_COND_ALWAYS) {
2609 if (annul) {
2610 dc->pc = dest;
2611 dc->npc = dest + 4;
2612 } else {
2613 gen_mov_pc_npc(dc);
2614 dc->npc = dest;
2615 }
2616 return true;
2617 }
2618
2619 if (cmp->cond == TCG_COND_NEVER) {
2620 npc = dc->npc;
2621 if (npc & 3) {
2622 gen_mov_pc_npc(dc);
2623 if (annul) {
2624 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2625 }
2626 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2627 } else {
2628 dc->pc = npc + (annul ? 4 : 0);
2629 dc->npc = dc->pc + 4;
2630 }
2631 return true;
2632 }
2633
2634 flush_cond(dc);
2635 npc = dc->npc;
2636
2637 if (annul) {
2638 TCGLabel *l1 = gen_new_label();
2639
2640 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2641 gen_goto_tb(dc, 0, npc, dest);
2642 gen_set_label(l1);
2643 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2644
2645 dc->base.is_jmp = DISAS_NORETURN;
2646 } else {
2647 if (npc & 3) {
2648 switch (npc) {
2649 case DYNAMIC_PC:
2650 case DYNAMIC_PC_LOOKUP:
2651 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2652 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2653 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2654 cmp->c1, tcg_constant_tl(cmp->c2),
2655 tcg_constant_tl(dest), cpu_npc);
2656 dc->pc = npc;
2657 break;
2658 default:
2659 g_assert_not_reached();
2660 }
2661 } else {
2662 dc->pc = npc;
2663 dc->npc = JUMP_PC;
2664 dc->jump = *cmp;
2665 dc->jump_pc[0] = dest;
2666 dc->jump_pc[1] = npc + 4;
2667
2668 /* The condition for cpu_cond is always NE -- normalize. */
2669 if (cmp->cond == TCG_COND_NE) {
2670 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2671 } else {
2672 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2673 }
2674 dc->cpu_cond_live = true;
2675 }
2676 }
2677 return true;
2678 }
2679
raise_priv(DisasContext * dc)2680 static bool raise_priv(DisasContext *dc)
2681 {
2682 gen_exception(dc, TT_PRIV_INSN);
2683 return true;
2684 }
2685
raise_unimpfpop(DisasContext * dc)2686 static bool raise_unimpfpop(DisasContext *dc)
2687 {
2688 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2689 return true;
2690 }
2691
gen_trap_float128(DisasContext * dc)2692 static bool gen_trap_float128(DisasContext *dc)
2693 {
2694 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2695 return false;
2696 }
2697 return raise_unimpfpop(dc);
2698 }
2699
do_bpcc(DisasContext * dc,arg_bcc * a)2700 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2701 {
2702 DisasCompare cmp;
2703
2704 gen_compare(&cmp, a->cc, a->cond, dc);
2705 return advance_jump_cond(dc, &cmp, a->a, a->i);
2706 }
2707
TRANS(Bicc,ALL,do_bpcc,a)2708 TRANS(Bicc, ALL, do_bpcc, a)
2709 TRANS(BPcc, 64, do_bpcc, a)
2710
2711 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2712 {
2713 DisasCompare cmp;
2714
2715 if (gen_trap_if_nofpu_fpexception(dc)) {
2716 return true;
2717 }
2718 gen_fcompare(&cmp, a->cc, a->cond);
2719 return advance_jump_cond(dc, &cmp, a->a, a->i);
2720 }
2721
2722 TRANS(FBPfcc, 64, do_fbpfcc, a)
TRANS(FBfcc,ALL,do_fbpfcc,a)2723 TRANS(FBfcc, ALL, do_fbpfcc, a)
2724
2725 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2726 {
2727 DisasCompare cmp;
2728
2729 if (!avail_64(dc)) {
2730 return false;
2731 }
2732 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2733 return false;
2734 }
2735 return advance_jump_cond(dc, &cmp, a->a, a->i);
2736 }
2737
trans_CALL(DisasContext * dc,arg_CALL * a)2738 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2739 {
2740 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2741
2742 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2743 gen_mov_pc_npc(dc);
2744 dc->npc = target;
2745 return true;
2746 }
2747
trans_NCP(DisasContext * dc,arg_NCP * a)2748 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2749 {
2750 /*
2751 * For sparc32, always generate the no-coprocessor exception.
2752 * For sparc64, always generate illegal instruction.
2753 */
2754 #ifdef TARGET_SPARC64
2755 return false;
2756 #else
2757 gen_exception(dc, TT_NCP_INSN);
2758 return true;
2759 #endif
2760 }
2761
trans_SETHI(DisasContext * dc,arg_SETHI * a)2762 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2763 {
2764 /* Special-case %g0 because that's the canonical nop. */
2765 if (a->rd) {
2766 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2767 }
2768 return advance_pc(dc);
2769 }
2770
2771 /*
2772 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2773 */
2774
do_tcc(DisasContext * dc,int cond,int cc,int rs1,bool imm,int rs2_or_imm)2775 static bool do_tcc(DisasContext *dc, int cond, int cc,
2776 int rs1, bool imm, int rs2_or_imm)
2777 {
2778 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2779 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2780 DisasCompare cmp;
2781 TCGLabel *lab;
2782 TCGv_i32 trap;
2783
2784 /* Trap never. */
2785 if (cond == 0) {
2786 return advance_pc(dc);
2787 }
2788
2789 /*
2790 * Immediate traps are the most common case. Since this value is
2791 * live across the branch, it really pays to evaluate the constant.
2792 */
2793 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2794 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2795 } else {
2796 trap = tcg_temp_new_i32();
2797 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2798 if (imm) {
2799 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2800 } else {
2801 TCGv_i32 t2 = tcg_temp_new_i32();
2802 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2803 tcg_gen_add_i32(trap, trap, t2);
2804 }
2805 tcg_gen_andi_i32(trap, trap, mask);
2806 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2807 }
2808
2809 finishing_insn(dc);
2810
2811 /* Trap always. */
2812 if (cond == 8) {
2813 save_state(dc);
2814 gen_helper_raise_exception(tcg_env, trap);
2815 dc->base.is_jmp = DISAS_NORETURN;
2816 return true;
2817 }
2818
2819 /* Conditional trap. */
2820 flush_cond(dc);
2821 lab = delay_exceptionv(dc, trap);
2822 gen_compare(&cmp, cc, cond, dc);
2823 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2824
2825 return advance_pc(dc);
2826 }
2827
trans_Tcc_r(DisasContext * dc,arg_Tcc_r * a)2828 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2829 {
2830 if (avail_32(dc) && a->cc) {
2831 return false;
2832 }
2833 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2834 }
2835
trans_Tcc_i_v7(DisasContext * dc,arg_Tcc_i_v7 * a)2836 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2837 {
2838 if (avail_64(dc)) {
2839 return false;
2840 }
2841 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2842 }
2843
trans_Tcc_i_v9(DisasContext * dc,arg_Tcc_i_v9 * a)2844 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2845 {
2846 if (avail_32(dc)) {
2847 return false;
2848 }
2849 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2850 }
2851
do_stbar(DisasContext * dc)2852 static bool do_stbar(DisasContext *dc)
2853 {
2854 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2855 return advance_pc(dc);
2856 }
2857
2858 TRANS(STBAR_v8, 32, do_stbar)
2859 TRANS(STBAR_v9, 64, do_stbar)
2860
trans_MEMBAR(DisasContext * dc,arg_MEMBAR * a)2861 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2862 {
2863 if (avail_32(dc)) {
2864 return false;
2865 }
2866 if (a->mmask) {
2867 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2868 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2869 }
2870 if (a->cmask) {
2871 /* For #Sync, etc, end the TB to recognize interrupts. */
2872 dc->base.is_jmp = DISAS_EXIT;
2873 }
2874 return advance_pc(dc);
2875 }
2876
do_rd_special(DisasContext * dc,bool priv,int rd,TCGv (* func)(DisasContext *,TCGv))2877 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2878 TCGv (*func)(DisasContext *, TCGv))
2879 {
2880 if (!priv) {
2881 return raise_priv(dc);
2882 }
2883 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2884 return advance_pc(dc);
2885 }
2886
do_rdy(DisasContext * dc,TCGv dst)2887 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2888 {
2889 return cpu_y;
2890 }
2891
2892 TRANS(RDY_v7, 32, do_rd_special, true, a->rd, do_rdy)
2893 TRANS(RDY_v9, 64, do_rd_special, true, a->rd, do_rdy)
2894
do_rd_leon3_config(DisasContext * dc,TCGv dst)2895 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2896 {
2897 gen_helper_rdasr17(dst, tcg_env);
2898 return dst;
2899 }
2900
2901 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2902
do_rdpic(DisasContext * dc,TCGv dst)2903 static TCGv do_rdpic(DisasContext *dc, TCGv dst)
2904 {
2905 return tcg_constant_tl(0);
2906 }
2907
2908 TRANS(RDPIC, HYPV, do_rd_special, supervisor(dc), a->rd, do_rdpic)
2909
2910
do_rdccr(DisasContext * dc,TCGv dst)2911 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2912 {
2913 gen_helper_rdccr(dst, tcg_env);
2914 return dst;
2915 }
2916
2917 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2918
do_rdasi(DisasContext * dc,TCGv dst)2919 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2920 {
2921 #ifdef TARGET_SPARC64
2922 return tcg_constant_tl(dc->asi);
2923 #else
2924 qemu_build_not_reached();
2925 #endif
2926 }
2927
2928 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2929
do_rdtick(DisasContext * dc,TCGv dst)2930 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2931 {
2932 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2933
2934 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2935 if (translator_io_start(&dc->base)) {
2936 dc->base.is_jmp = DISAS_EXIT;
2937 }
2938 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2939 tcg_constant_i32(dc->mem_idx));
2940 return dst;
2941 }
2942
2943 /* TODO: non-priv access only allowed when enabled. */
2944 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2945
do_rdpc(DisasContext * dc,TCGv dst)2946 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2947 {
2948 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2949 }
2950
2951 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2952
do_rdfprs(DisasContext * dc,TCGv dst)2953 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2954 {
2955 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2956 return dst;
2957 }
2958
2959 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2960
do_rdgsr(DisasContext * dc,TCGv dst)2961 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2962 {
2963 gen_trap_ifnofpu(dc);
2964 return cpu_gsr;
2965 }
2966
2967 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2968
do_rdsoftint(DisasContext * dc,TCGv dst)2969 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2970 {
2971 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2972 return dst;
2973 }
2974
2975 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2976
do_rdtick_cmpr(DisasContext * dc,TCGv dst)2977 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2978 {
2979 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2980 return dst;
2981 }
2982
2983 /* TODO: non-priv access only allowed when enabled. */
2984 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2985
do_rdstick(DisasContext * dc,TCGv dst)2986 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2987 {
2988 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2989
2990 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2991 if (translator_io_start(&dc->base)) {
2992 dc->base.is_jmp = DISAS_EXIT;
2993 }
2994 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2995 tcg_constant_i32(dc->mem_idx));
2996 return dst;
2997 }
2998
2999 /* TODO: non-priv access only allowed when enabled. */
3000 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
3001
do_rdstick_cmpr(DisasContext * dc,TCGv dst)3002 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
3003 {
3004 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
3005 return dst;
3006 }
3007
3008 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3009 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
3010
3011 /*
3012 * UltraSPARC-T1 Strand status.
3013 * HYPV check maybe not enough, UA2005 & UA2007 describe
3014 * this ASR as impl. dep
3015 */
do_rdstrand_status(DisasContext * dc,TCGv dst)3016 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
3017 {
3018 return tcg_constant_tl(1);
3019 }
3020
3021 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
3022
do_rdpsr(DisasContext * dc,TCGv dst)3023 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
3024 {
3025 gen_helper_rdpsr(dst, tcg_env);
3026 return dst;
3027 }
3028
3029 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
3030
do_rdhpstate(DisasContext * dc,TCGv dst)3031 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
3032 {
3033 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
3034 return dst;
3035 }
3036
3037 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
3038
do_rdhtstate(DisasContext * dc,TCGv dst)3039 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
3040 {
3041 TCGv_i32 tl = tcg_temp_new_i32();
3042 TCGv_ptr tp = tcg_temp_new_ptr();
3043
3044 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3045 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3046 tcg_gen_shli_i32(tl, tl, 3);
3047 tcg_gen_ext_i32_ptr(tp, tl);
3048 tcg_gen_add_ptr(tp, tp, tcg_env);
3049
3050 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
3051 return dst;
3052 }
3053
3054 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
3055
do_rdhintp(DisasContext * dc,TCGv dst)3056 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
3057 {
3058 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
3059 return dst;
3060 }
3061
3062 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
3063
do_rdhtba(DisasContext * dc,TCGv dst)3064 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
3065 {
3066 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
3067 return dst;
3068 }
3069
3070 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3071
do_rdhver(DisasContext * dc,TCGv dst)3072 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3073 {
3074 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3075 return dst;
3076 }
3077
3078 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3079
do_rdhstick_cmpr(DisasContext * dc,TCGv dst)3080 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3081 {
3082 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3083 return dst;
3084 }
3085
3086 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3087 do_rdhstick_cmpr)
3088
do_rdwim(DisasContext * dc,TCGv dst)3089 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3090 {
3091 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3092 return dst;
3093 }
3094
3095 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3096
do_rdtpc(DisasContext * dc,TCGv dst)3097 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3098 {
3099 #ifdef TARGET_SPARC64
3100 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3101
3102 gen_load_trap_state_at_tl(r_tsptr);
3103 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3104 return dst;
3105 #else
3106 qemu_build_not_reached();
3107 #endif
3108 }
3109
3110 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3111
do_rdtnpc(DisasContext * dc,TCGv dst)3112 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3113 {
3114 #ifdef TARGET_SPARC64
3115 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3116
3117 gen_load_trap_state_at_tl(r_tsptr);
3118 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3119 return dst;
3120 #else
3121 qemu_build_not_reached();
3122 #endif
3123 }
3124
3125 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3126
do_rdtstate(DisasContext * dc,TCGv dst)3127 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3128 {
3129 #ifdef TARGET_SPARC64
3130 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3131
3132 gen_load_trap_state_at_tl(r_tsptr);
3133 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3134 return dst;
3135 #else
3136 qemu_build_not_reached();
3137 #endif
3138 }
3139
3140 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3141
do_rdtt(DisasContext * dc,TCGv dst)3142 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3143 {
3144 #ifdef TARGET_SPARC64
3145 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3146
3147 gen_load_trap_state_at_tl(r_tsptr);
3148 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3149 return dst;
3150 #else
3151 qemu_build_not_reached();
3152 #endif
3153 }
3154
3155 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3156 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3157
do_rdtba(DisasContext * dc,TCGv dst)3158 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3159 {
3160 return cpu_tbr;
3161 }
3162
3163 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3164 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3165
do_rdpstate(DisasContext * dc,TCGv dst)3166 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3167 {
3168 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3169 return dst;
3170 }
3171
3172 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3173
do_rdtl(DisasContext * dc,TCGv dst)3174 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3175 {
3176 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3177 return dst;
3178 }
3179
3180 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3181
do_rdpil(DisasContext * dc,TCGv dst)3182 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3183 {
3184 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3185 return dst;
3186 }
3187
3188 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3189
do_rdcwp(DisasContext * dc,TCGv dst)3190 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3191 {
3192 gen_helper_rdcwp(dst, tcg_env);
3193 return dst;
3194 }
3195
3196 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3197
do_rdcansave(DisasContext * dc,TCGv dst)3198 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3199 {
3200 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3201 return dst;
3202 }
3203
3204 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3205
do_rdcanrestore(DisasContext * dc,TCGv dst)3206 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3207 {
3208 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3209 return dst;
3210 }
3211
3212 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3213 do_rdcanrestore)
3214
do_rdcleanwin(DisasContext * dc,TCGv dst)3215 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3216 {
3217 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3218 return dst;
3219 }
3220
3221 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3222
do_rdotherwin(DisasContext * dc,TCGv dst)3223 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3224 {
3225 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3226 return dst;
3227 }
3228
3229 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3230
do_rdwstate(DisasContext * dc,TCGv dst)3231 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3232 {
3233 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3234 return dst;
3235 }
3236
3237 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3238
do_rdgl(DisasContext * dc,TCGv dst)3239 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3240 {
3241 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3242 return dst;
3243 }
3244
3245 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3246
3247 /* UA2005 strand status */
do_rdssr(DisasContext * dc,TCGv dst)3248 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3249 {
3250 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3251 return dst;
3252 }
3253
3254 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3255
do_rdver(DisasContext * dc,TCGv dst)3256 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3257 {
3258 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3259 return dst;
3260 }
3261
3262 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3263
trans_FLUSHW(DisasContext * dc,arg_FLUSHW * a)3264 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3265 {
3266 if (avail_64(dc)) {
3267 gen_helper_flushw(tcg_env);
3268 return advance_pc(dc);
3269 }
3270 return false;
3271 }
3272
do_wr_special(DisasContext * dc,arg_r_r_ri * a,bool priv,void (* func)(DisasContext *,TCGv))3273 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3274 void (*func)(DisasContext *, TCGv))
3275 {
3276 TCGv src;
3277
3278 if (!check_r_r_ri(dc, a)) {
3279 return false;
3280 }
3281 if (!priv) {
3282 return raise_priv(dc);
3283 }
3284
3285 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3286 src = tcg_constant_tl(a->rs2_or_imm);
3287 } else {
3288 TCGv src1 = gen_load_gpr(dc, a->rs1);
3289 if (a->rs2_or_imm == 0) {
3290 src = src1;
3291 } else {
3292 src = tcg_temp_new();
3293 if (a->imm) {
3294 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3295 } else {
3296 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3297 }
3298 }
3299 }
3300 func(dc, src);
3301 return advance_pc(dc);
3302 }
3303
do_wry(DisasContext * dc,TCGv src)3304 static void do_wry(DisasContext *dc, TCGv src)
3305 {
3306 tcg_gen_ext32u_tl(cpu_y, src);
3307 }
3308
TRANS(WRY,ALL,do_wr_special,a,true,do_wry)3309 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3310
3311 static void do_wrccr(DisasContext *dc, TCGv src)
3312 {
3313 gen_helper_wrccr(tcg_env, src);
3314 }
3315
3316 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3317
do_wrasi(DisasContext * dc,TCGv src)3318 static void do_wrasi(DisasContext *dc, TCGv src)
3319 {
3320 TCGv tmp = tcg_temp_new();
3321
3322 tcg_gen_ext8u_tl(tmp, src);
3323 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3324 /* End TB to notice changed ASI. */
3325 dc->base.is_jmp = DISAS_EXIT;
3326 }
3327
3328 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3329
do_wrfprs(DisasContext * dc,TCGv src)3330 static void do_wrfprs(DisasContext *dc, TCGv src)
3331 {
3332 #ifdef TARGET_SPARC64
3333 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3334 dc->fprs_dirty = 0;
3335 dc->base.is_jmp = DISAS_EXIT;
3336 #else
3337 qemu_build_not_reached();
3338 #endif
3339 }
3340
3341 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3342
do_priv_nop(DisasContext * dc,bool priv)3343 static bool do_priv_nop(DisasContext *dc, bool priv)
3344 {
3345 if (!priv) {
3346 return raise_priv(dc);
3347 }
3348 return advance_pc(dc);
3349 }
3350
TRANS(WRPCR,HYPV,do_priv_nop,supervisor (dc))3351 TRANS(WRPCR, HYPV, do_priv_nop, supervisor(dc))
3352 TRANS(WRPIC, HYPV, do_priv_nop, supervisor(dc))
3353
3354 static void do_wrgsr(DisasContext *dc, TCGv src)
3355 {
3356 gen_trap_ifnofpu(dc);
3357 tcg_gen_mov_tl(cpu_gsr, src);
3358 }
3359
3360 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3361
do_wrsoftint_set(DisasContext * dc,TCGv src)3362 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3363 {
3364 gen_helper_set_softint(tcg_env, src);
3365 }
3366
3367 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3368
do_wrsoftint_clr(DisasContext * dc,TCGv src)3369 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3370 {
3371 gen_helper_clear_softint(tcg_env, src);
3372 }
3373
3374 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3375
do_wrsoftint(DisasContext * dc,TCGv src)3376 static void do_wrsoftint(DisasContext *dc, TCGv src)
3377 {
3378 gen_helper_write_softint(tcg_env, src);
3379 }
3380
3381 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3382
do_wrtick_cmpr(DisasContext * dc,TCGv src)3383 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3384 {
3385 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3386
3387 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3388 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3389 translator_io_start(&dc->base);
3390 gen_helper_tick_set_limit(r_tickptr, src);
3391 /* End TB to handle timer interrupt */
3392 dc->base.is_jmp = DISAS_EXIT;
3393 }
3394
3395 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3396
do_wrstick(DisasContext * dc,TCGv src)3397 static void do_wrstick(DisasContext *dc, TCGv src)
3398 {
3399 #ifdef TARGET_SPARC64
3400 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3401
3402 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3403 translator_io_start(&dc->base);
3404 gen_helper_tick_set_count(r_tickptr, src);
3405 /* End TB to handle timer interrupt */
3406 dc->base.is_jmp = DISAS_EXIT;
3407 #else
3408 qemu_build_not_reached();
3409 #endif
3410 }
3411
3412 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3413
do_wrstick_cmpr(DisasContext * dc,TCGv src)3414 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3415 {
3416 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3417
3418 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3419 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3420 translator_io_start(&dc->base);
3421 gen_helper_tick_set_limit(r_tickptr, src);
3422 /* End TB to handle timer interrupt */
3423 dc->base.is_jmp = DISAS_EXIT;
3424 }
3425
3426 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3427
do_wrpowerdown(DisasContext * dc,TCGv src)3428 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3429 {
3430 finishing_insn(dc);
3431 save_state(dc);
3432 gen_helper_power_down(tcg_env);
3433 }
3434
TRANS(WRPOWERDOWN,POWERDOWN,do_wr_special,a,supervisor (dc),do_wrpowerdown)3435 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3436
3437 static void do_wrmwait(DisasContext *dc, TCGv src)
3438 {
3439 /*
3440 * TODO: This is a stub version of mwait, which merely recognizes
3441 * interrupts immediately and does not wait.
3442 */
3443 dc->base.is_jmp = DISAS_EXIT;
3444 }
3445
TRANS(WRMWAIT,VIS4,do_wr_special,a,true,do_wrmwait)3446 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3447
3448 static void do_wrpsr(DisasContext *dc, TCGv src)
3449 {
3450 gen_helper_wrpsr(tcg_env, src);
3451 dc->base.is_jmp = DISAS_EXIT;
3452 }
3453
3454 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3455
do_wrwim(DisasContext * dc,TCGv src)3456 static void do_wrwim(DisasContext *dc, TCGv src)
3457 {
3458 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3459 TCGv tmp = tcg_temp_new();
3460
3461 tcg_gen_andi_tl(tmp, src, mask);
3462 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3463 }
3464
3465 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3466
do_wrtpc(DisasContext * dc,TCGv src)3467 static void do_wrtpc(DisasContext *dc, TCGv src)
3468 {
3469 #ifdef TARGET_SPARC64
3470 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3471
3472 gen_load_trap_state_at_tl(r_tsptr);
3473 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3474 #else
3475 qemu_build_not_reached();
3476 #endif
3477 }
3478
3479 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3480
do_wrtnpc(DisasContext * dc,TCGv src)3481 static void do_wrtnpc(DisasContext *dc, TCGv src)
3482 {
3483 #ifdef TARGET_SPARC64
3484 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3485
3486 gen_load_trap_state_at_tl(r_tsptr);
3487 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3488 #else
3489 qemu_build_not_reached();
3490 #endif
3491 }
3492
3493 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3494
do_wrtstate(DisasContext * dc,TCGv src)3495 static void do_wrtstate(DisasContext *dc, TCGv src)
3496 {
3497 #ifdef TARGET_SPARC64
3498 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3499
3500 gen_load_trap_state_at_tl(r_tsptr);
3501 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3502 #else
3503 qemu_build_not_reached();
3504 #endif
3505 }
3506
3507 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3508
do_wrtt(DisasContext * dc,TCGv src)3509 static void do_wrtt(DisasContext *dc, TCGv src)
3510 {
3511 #ifdef TARGET_SPARC64
3512 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3513
3514 gen_load_trap_state_at_tl(r_tsptr);
3515 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3516 #else
3517 qemu_build_not_reached();
3518 #endif
3519 }
3520
3521 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3522
do_wrtick(DisasContext * dc,TCGv src)3523 static void do_wrtick(DisasContext *dc, TCGv src)
3524 {
3525 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3526
3527 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3528 translator_io_start(&dc->base);
3529 gen_helper_tick_set_count(r_tickptr, src);
3530 /* End TB to handle timer interrupt */
3531 dc->base.is_jmp = DISAS_EXIT;
3532 }
3533
3534 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3535
do_wrtba(DisasContext * dc,TCGv src)3536 static void do_wrtba(DisasContext *dc, TCGv src)
3537 {
3538 tcg_gen_mov_tl(cpu_tbr, src);
3539 }
3540
3541 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3542
do_wrpstate(DisasContext * dc,TCGv src)3543 static void do_wrpstate(DisasContext *dc, TCGv src)
3544 {
3545 save_state(dc);
3546 if (translator_io_start(&dc->base)) {
3547 dc->base.is_jmp = DISAS_EXIT;
3548 }
3549 gen_helper_wrpstate(tcg_env, src);
3550 dc->npc = DYNAMIC_PC;
3551 }
3552
3553 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3554
do_wrtl(DisasContext * dc,TCGv src)3555 static void do_wrtl(DisasContext *dc, TCGv src)
3556 {
3557 save_state(dc);
3558 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3559 dc->npc = DYNAMIC_PC;
3560 }
3561
3562 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3563
do_wrpil(DisasContext * dc,TCGv src)3564 static void do_wrpil(DisasContext *dc, TCGv src)
3565 {
3566 if (translator_io_start(&dc->base)) {
3567 dc->base.is_jmp = DISAS_EXIT;
3568 }
3569 gen_helper_wrpil(tcg_env, src);
3570 }
3571
3572 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3573
do_wrcwp(DisasContext * dc,TCGv src)3574 static void do_wrcwp(DisasContext *dc, TCGv src)
3575 {
3576 gen_helper_wrcwp(tcg_env, src);
3577 }
3578
3579 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3580
do_wrcansave(DisasContext * dc,TCGv src)3581 static void do_wrcansave(DisasContext *dc, TCGv src)
3582 {
3583 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3584 }
3585
3586 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3587
do_wrcanrestore(DisasContext * dc,TCGv src)3588 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3589 {
3590 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3591 }
3592
3593 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3594
do_wrcleanwin(DisasContext * dc,TCGv src)3595 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3596 {
3597 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3598 }
3599
3600 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3601
do_wrotherwin(DisasContext * dc,TCGv src)3602 static void do_wrotherwin(DisasContext *dc, TCGv src)
3603 {
3604 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3605 }
3606
3607 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3608
do_wrwstate(DisasContext * dc,TCGv src)3609 static void do_wrwstate(DisasContext *dc, TCGv src)
3610 {
3611 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3612 }
3613
3614 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3615
do_wrgl(DisasContext * dc,TCGv src)3616 static void do_wrgl(DisasContext *dc, TCGv src)
3617 {
3618 gen_helper_wrgl(tcg_env, src);
3619 }
3620
TRANS(WRPR_gl,GL,do_wr_special,a,supervisor (dc),do_wrgl)3621 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3622
3623 /* UA2005 strand status */
3624 static void do_wrssr(DisasContext *dc, TCGv src)
3625 {
3626 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3627 }
3628
TRANS(WRPR_strand_status,HYPV,do_wr_special,a,hypervisor (dc),do_wrssr)3629 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3630
3631 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3632
3633 static void do_wrhpstate(DisasContext *dc, TCGv src)
3634 {
3635 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3636 dc->base.is_jmp = DISAS_EXIT;
3637 }
3638
TRANS(WRHPR_hpstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhpstate)3639 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3640
3641 static void do_wrhtstate(DisasContext *dc, TCGv src)
3642 {
3643 TCGv_i32 tl = tcg_temp_new_i32();
3644 TCGv_ptr tp = tcg_temp_new_ptr();
3645
3646 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3647 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3648 tcg_gen_shli_i32(tl, tl, 3);
3649 tcg_gen_ext_i32_ptr(tp, tl);
3650 tcg_gen_add_ptr(tp, tp, tcg_env);
3651
3652 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3653 }
3654
TRANS(WRHPR_htstate,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtstate)3655 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3656
3657 static void do_wrhintp(DisasContext *dc, TCGv src)
3658 {
3659 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3660 }
3661
TRANS(WRHPR_hintp,HYPV,do_wr_special,a,hypervisor (dc),do_wrhintp)3662 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3663
3664 static void do_wrhtba(DisasContext *dc, TCGv src)
3665 {
3666 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3667 }
3668
TRANS(WRHPR_htba,HYPV,do_wr_special,a,hypervisor (dc),do_wrhtba)3669 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3670
3671 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3672 {
3673 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3674
3675 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3676 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3677 translator_io_start(&dc->base);
3678 gen_helper_tick_set_limit(r_tickptr, src);
3679 /* End TB to handle timer interrupt */
3680 dc->base.is_jmp = DISAS_EXIT;
3681 }
3682
TRANS(WRHPR_hstick_cmpr,HYPV,do_wr_special,a,hypervisor (dc),do_wrhstick_cmpr)3683 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3684 do_wrhstick_cmpr)
3685
3686 static bool do_saved_restored(DisasContext *dc, bool saved)
3687 {
3688 if (!supervisor(dc)) {
3689 return raise_priv(dc);
3690 }
3691 if (saved) {
3692 gen_helper_saved(tcg_env);
3693 } else {
3694 gen_helper_restored(tcg_env);
3695 }
3696 return advance_pc(dc);
3697 }
3698
3699 TRANS(SAVED, 64, do_saved_restored, true)
3700 TRANS(RESTORED, 64, do_saved_restored, false)
3701
trans_NOP(DisasContext * dc,arg_NOP * a)3702 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3703 {
3704 return advance_pc(dc);
3705 }
3706
3707 /*
3708 * TODO: Need a feature bit for sparcv8.
3709 * In the meantime, treat all 32-bit cpus like sparcv7.
3710 */
3711 TRANS(NOP_v7, 32, trans_NOP, a)
3712 TRANS(NOP_v9, 64, trans_NOP, a)
3713
do_arith_int(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),bool logic_cc)3714 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3715 void (*func)(TCGv, TCGv, TCGv),
3716 void (*funci)(TCGv, TCGv, target_long),
3717 bool logic_cc)
3718 {
3719 TCGv dst, src1;
3720
3721 if (!check_r_r_ri_cc(dc, a)) {
3722 return false;
3723 }
3724
3725 if (logic_cc) {
3726 dst = cpu_cc_N;
3727 } else {
3728 dst = gen_dest_gpr(dc, a->rd);
3729 }
3730 src1 = gen_load_gpr(dc, a->rs1);
3731
3732 if (a->imm || a->rs2_or_imm == 0) {
3733 if (funci) {
3734 funci(dst, src1, a->rs2_or_imm);
3735 } else {
3736 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3737 }
3738 } else {
3739 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3740 }
3741
3742 if (logic_cc) {
3743 if (TARGET_LONG_BITS == 64) {
3744 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3745 tcg_gen_movi_tl(cpu_icc_C, 0);
3746 }
3747 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3748 tcg_gen_movi_tl(cpu_cc_C, 0);
3749 tcg_gen_movi_tl(cpu_cc_V, 0);
3750 }
3751
3752 gen_store_gpr(dc, a->rd, dst);
3753 return advance_pc(dc);
3754 }
3755
do_arith(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long),void (* func_cc)(TCGv,TCGv,TCGv))3756 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3757 void (*func)(TCGv, TCGv, TCGv),
3758 void (*funci)(TCGv, TCGv, target_long),
3759 void (*func_cc)(TCGv, TCGv, TCGv))
3760 {
3761 if (a->cc) {
3762 return do_arith_int(dc, a, func_cc, NULL, false);
3763 }
3764 return do_arith_int(dc, a, func, funci, false);
3765 }
3766
do_logic(DisasContext * dc,arg_r_r_ri_cc * a,void (* func)(TCGv,TCGv,TCGv),void (* funci)(TCGv,TCGv,target_long))3767 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3768 void (*func)(TCGv, TCGv, TCGv),
3769 void (*funci)(TCGv, TCGv, target_long))
3770 {
3771 return do_arith_int(dc, a, func, funci, a->cc);
3772 }
3773
TRANS(ADD,ALL,do_arith,a,tcg_gen_add_tl,tcg_gen_addi_tl,gen_op_addcc)3774 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3775 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3776 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3777 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3778
3779 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3780 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3781 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3782 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3783
3784 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3785 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3786 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3787 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3788 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3789
3790 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3791 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3792 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3793 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3794
3795 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3796 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3797
3798 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3799 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3800
3801 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3802 {
3803 /* OR with %g0 is the canonical alias for MOV. */
3804 if (!a->cc && a->rs1 == 0) {
3805 if (!check_r_r_ri_cc(dc, a)) {
3806 return false;
3807 }
3808 if (a->imm || a->rs2_or_imm == 0) {
3809 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3810 } else {
3811 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3812 }
3813 return advance_pc(dc);
3814 }
3815 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3816 }
3817
trans_UDIV(DisasContext * dc,arg_r_r_ri * a)3818 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3819 {
3820 TCGv_i64 t1, t2;
3821 TCGv dst;
3822
3823 if (!avail_DIV(dc)) {
3824 return false;
3825 }
3826 if (!check_r_r_ri(dc, a)) {
3827 return false;
3828 }
3829
3830 if (unlikely(a->rs2_or_imm == 0)) {
3831 gen_exception(dc, TT_DIV_ZERO);
3832 return true;
3833 }
3834
3835 if (a->imm) {
3836 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3837 } else {
3838 TCGLabel *lab;
3839 TCGv_i32 n2;
3840
3841 finishing_insn(dc);
3842 flush_cond(dc);
3843
3844 n2 = tcg_temp_new_i32();
3845 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3846
3847 lab = delay_exception(dc, TT_DIV_ZERO);
3848 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3849
3850 t2 = tcg_temp_new_i64();
3851 #ifdef TARGET_SPARC64
3852 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3853 #else
3854 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3855 #endif
3856 }
3857
3858 t1 = tcg_temp_new_i64();
3859 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3860
3861 tcg_gen_divu_i64(t1, t1, t2);
3862 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3863
3864 dst = gen_dest_gpr(dc, a->rd);
3865 tcg_gen_trunc_i64_tl(dst, t1);
3866 gen_store_gpr(dc, a->rd, dst);
3867 return advance_pc(dc);
3868 }
3869
trans_UDIVX(DisasContext * dc,arg_r_r_ri * a)3870 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3871 {
3872 TCGv dst, src1, src2;
3873
3874 if (!avail_64(dc)) {
3875 return false;
3876 }
3877 if (!check_r_r_ri(dc, a)) {
3878 return false;
3879 }
3880
3881 if (unlikely(a->rs2_or_imm == 0)) {
3882 gen_exception(dc, TT_DIV_ZERO);
3883 return true;
3884 }
3885
3886 if (a->imm) {
3887 src2 = tcg_constant_tl(a->rs2_or_imm);
3888 } else {
3889 TCGLabel *lab;
3890
3891 finishing_insn(dc);
3892 flush_cond(dc);
3893
3894 lab = delay_exception(dc, TT_DIV_ZERO);
3895 src2 = cpu_regs[a->rs2_or_imm];
3896 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3897 }
3898
3899 dst = gen_dest_gpr(dc, a->rd);
3900 src1 = gen_load_gpr(dc, a->rs1);
3901
3902 tcg_gen_divu_tl(dst, src1, src2);
3903 gen_store_gpr(dc, a->rd, dst);
3904 return advance_pc(dc);
3905 }
3906
trans_SDIVX(DisasContext * dc,arg_r_r_ri * a)3907 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3908 {
3909 TCGv dst, src1, src2;
3910
3911 if (!avail_64(dc)) {
3912 return false;
3913 }
3914 if (!check_r_r_ri(dc, a)) {
3915 return false;
3916 }
3917
3918 if (unlikely(a->rs2_or_imm == 0)) {
3919 gen_exception(dc, TT_DIV_ZERO);
3920 return true;
3921 }
3922
3923 dst = gen_dest_gpr(dc, a->rd);
3924 src1 = gen_load_gpr(dc, a->rs1);
3925
3926 if (a->imm) {
3927 if (unlikely(a->rs2_or_imm == -1)) {
3928 tcg_gen_neg_tl(dst, src1);
3929 gen_store_gpr(dc, a->rd, dst);
3930 return advance_pc(dc);
3931 }
3932 src2 = tcg_constant_tl(a->rs2_or_imm);
3933 } else {
3934 TCGLabel *lab;
3935 TCGv t1, t2;
3936
3937 finishing_insn(dc);
3938 flush_cond(dc);
3939
3940 lab = delay_exception(dc, TT_DIV_ZERO);
3941 src2 = cpu_regs[a->rs2_or_imm];
3942 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3943
3944 /*
3945 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3946 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3947 */
3948 t1 = tcg_temp_new();
3949 t2 = tcg_temp_new();
3950 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3951 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3952 tcg_gen_and_tl(t1, t1, t2);
3953 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3954 tcg_constant_tl(1), src2);
3955 src2 = t1;
3956 }
3957
3958 tcg_gen_div_tl(dst, src1, src2);
3959 gen_store_gpr(dc, a->rd, dst);
3960 return advance_pc(dc);
3961 }
3962
gen_edge(DisasContext * dc,arg_r_r_r * a,int width,bool cc,bool little_endian)3963 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3964 int width, bool cc, bool little_endian)
3965 {
3966 TCGv dst, s1, s2, l, r, t, m;
3967 uint64_t amask = address_mask_i(dc, -8);
3968
3969 dst = gen_dest_gpr(dc, a->rd);
3970 s1 = gen_load_gpr(dc, a->rs1);
3971 s2 = gen_load_gpr(dc, a->rs2);
3972
3973 if (cc) {
3974 gen_op_subcc(cpu_cc_N, s1, s2);
3975 }
3976
3977 l = tcg_temp_new();
3978 r = tcg_temp_new();
3979 t = tcg_temp_new();
3980
3981 switch (width) {
3982 case 8:
3983 tcg_gen_andi_tl(l, s1, 7);
3984 tcg_gen_andi_tl(r, s2, 7);
3985 tcg_gen_xori_tl(r, r, 7);
3986 m = tcg_constant_tl(0xff);
3987 break;
3988 case 16:
3989 tcg_gen_extract_tl(l, s1, 1, 2);
3990 tcg_gen_extract_tl(r, s2, 1, 2);
3991 tcg_gen_xori_tl(r, r, 3);
3992 m = tcg_constant_tl(0xf);
3993 break;
3994 case 32:
3995 tcg_gen_extract_tl(l, s1, 2, 1);
3996 tcg_gen_extract_tl(r, s2, 2, 1);
3997 tcg_gen_xori_tl(r, r, 1);
3998 m = tcg_constant_tl(0x3);
3999 break;
4000 default:
4001 abort();
4002 }
4003
4004 /* Compute Left Edge */
4005 if (little_endian) {
4006 tcg_gen_shl_tl(l, m, l);
4007 tcg_gen_and_tl(l, l, m);
4008 } else {
4009 tcg_gen_shr_tl(l, m, l);
4010 }
4011 /* Compute Right Edge */
4012 if (little_endian) {
4013 tcg_gen_shr_tl(r, m, r);
4014 } else {
4015 tcg_gen_shl_tl(r, m, r);
4016 tcg_gen_and_tl(r, r, m);
4017 }
4018
4019 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
4020 tcg_gen_xor_tl(t, s1, s2);
4021 tcg_gen_and_tl(r, r, l);
4022 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
4023
4024 gen_store_gpr(dc, a->rd, dst);
4025 return advance_pc(dc);
4026 }
4027
4028 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
4029 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
4030 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
4031 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
4032 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
4033 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
4034
4035 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
4036 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
4037 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
4038 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
4039 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
4040 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
4041
do_rr(DisasContext * dc,arg_r_r * a,void (* func)(TCGv,TCGv))4042 static bool do_rr(DisasContext *dc, arg_r_r *a,
4043 void (*func)(TCGv, TCGv))
4044 {
4045 TCGv dst = gen_dest_gpr(dc, a->rd);
4046 TCGv src = gen_load_gpr(dc, a->rs);
4047
4048 func(dst, src);
4049 gen_store_gpr(dc, a->rd, dst);
4050 return advance_pc(dc);
4051 }
4052
TRANS(LZCNT,VIS3,do_rr,a,gen_op_lzcnt)4053 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
4054
4055 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
4056 void (*func)(TCGv, TCGv, TCGv))
4057 {
4058 TCGv dst = gen_dest_gpr(dc, a->rd);
4059 TCGv src1 = gen_load_gpr(dc, a->rs1);
4060 TCGv src2 = gen_load_gpr(dc, a->rs2);
4061
4062 func(dst, src1, src2);
4063 gen_store_gpr(dc, a->rd, dst);
4064 return advance_pc(dc);
4065 }
4066
TRANS(ARRAY8,VIS1,do_rrr,a,gen_helper_array8)4067 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
4068 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
4069 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
4070
4071 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
4072 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
4073
4074 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
4075 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
4076
4077 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4078
4079 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4080 {
4081 #ifdef TARGET_SPARC64
4082 TCGv tmp = tcg_temp_new();
4083
4084 tcg_gen_add_tl(tmp, s1, s2);
4085 tcg_gen_andi_tl(dst, tmp, -8);
4086 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4087 #else
4088 g_assert_not_reached();
4089 #endif
4090 }
4091
gen_op_alignaddrl(TCGv dst,TCGv s1,TCGv s2)4092 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4093 {
4094 #ifdef TARGET_SPARC64
4095 TCGv tmp = tcg_temp_new();
4096
4097 tcg_gen_add_tl(tmp, s1, s2);
4098 tcg_gen_andi_tl(dst, tmp, -8);
4099 tcg_gen_neg_tl(tmp, tmp);
4100 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4101 #else
4102 g_assert_not_reached();
4103 #endif
4104 }
4105
TRANS(ALIGNADDR,VIS1,do_rrr,a,gen_op_alignaddr)4106 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4107 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4108
4109 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4110 {
4111 #ifdef TARGET_SPARC64
4112 tcg_gen_add_tl(dst, s1, s2);
4113 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4114 #else
4115 g_assert_not_reached();
4116 #endif
4117 }
4118
TRANS(BMASK,VIS2,do_rrr,a,gen_op_bmask)4119 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4120
4121 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4122 {
4123 func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4124 return true;
4125 }
4126
4127 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4128 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4129 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4130
do_shift_r(DisasContext * dc,arg_shiftr * a,bool l,bool u)4131 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4132 {
4133 TCGv dst, src1, src2;
4134
4135 /* Reject 64-bit shifts for sparc32. */
4136 if (avail_32(dc) && a->x) {
4137 return false;
4138 }
4139
4140 src2 = tcg_temp_new();
4141 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4142 src1 = gen_load_gpr(dc, a->rs1);
4143 dst = gen_dest_gpr(dc, a->rd);
4144
4145 if (l) {
4146 tcg_gen_shl_tl(dst, src1, src2);
4147 if (!a->x) {
4148 tcg_gen_ext32u_tl(dst, dst);
4149 }
4150 } else if (u) {
4151 if (!a->x) {
4152 tcg_gen_ext32u_tl(dst, src1);
4153 src1 = dst;
4154 }
4155 tcg_gen_shr_tl(dst, src1, src2);
4156 } else {
4157 if (!a->x) {
4158 tcg_gen_ext32s_tl(dst, src1);
4159 src1 = dst;
4160 }
4161 tcg_gen_sar_tl(dst, src1, src2);
4162 }
4163 gen_store_gpr(dc, a->rd, dst);
4164 return advance_pc(dc);
4165 }
4166
TRANS(SLL_r,ALL,do_shift_r,a,true,true)4167 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4168 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4169 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4170
4171 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4172 {
4173 TCGv dst, src1;
4174
4175 /* Reject 64-bit shifts for sparc32. */
4176 if (avail_32(dc) && (a->x || a->i >= 32)) {
4177 return false;
4178 }
4179
4180 src1 = gen_load_gpr(dc, a->rs1);
4181 dst = gen_dest_gpr(dc, a->rd);
4182
4183 if (avail_32(dc) || a->x) {
4184 if (l) {
4185 tcg_gen_shli_tl(dst, src1, a->i);
4186 } else if (u) {
4187 tcg_gen_shri_tl(dst, src1, a->i);
4188 } else {
4189 tcg_gen_sari_tl(dst, src1, a->i);
4190 }
4191 } else {
4192 if (l) {
4193 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4194 } else if (u) {
4195 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4196 } else {
4197 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4198 }
4199 }
4200 gen_store_gpr(dc, a->rd, dst);
4201 return advance_pc(dc);
4202 }
4203
TRANS(SLL_i,ALL,do_shift_i,a,true,true)4204 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4205 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4206 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4207
4208 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4209 {
4210 if (!imm && !check_rs2(dc, &rs2_or_imm)) {
4211 return NULL;
4212 }
4213 if (imm || rs2_or_imm == 0) {
4214 return tcg_constant_tl(rs2_or_imm);
4215 } else {
4216 return cpu_regs[rs2_or_imm];
4217 }
4218 }
4219
do_mov_cond(DisasContext * dc,DisasCompare * cmp,int rd,TCGv src2)4220 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4221 {
4222 TCGv dst = gen_load_gpr(dc, rd);
4223 TCGv c2 = tcg_constant_tl(cmp->c2);
4224
4225 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4226 gen_store_gpr(dc, rd, dst);
4227 return advance_pc(dc);
4228 }
4229
trans_MOVcc(DisasContext * dc,arg_MOVcc * a)4230 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4231 {
4232 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4233 DisasCompare cmp;
4234
4235 if (src2 == NULL) {
4236 return false;
4237 }
4238 gen_compare(&cmp, a->cc, a->cond, dc);
4239 return do_mov_cond(dc, &cmp, a->rd, src2);
4240 }
4241
trans_MOVfcc(DisasContext * dc,arg_MOVfcc * a)4242 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4243 {
4244 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4245 DisasCompare cmp;
4246
4247 if (src2 == NULL) {
4248 return false;
4249 }
4250 gen_fcompare(&cmp, a->cc, a->cond);
4251 return do_mov_cond(dc, &cmp, a->rd, src2);
4252 }
4253
trans_MOVR(DisasContext * dc,arg_MOVR * a)4254 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4255 {
4256 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4257 DisasCompare cmp;
4258
4259 if (src2 == NULL) {
4260 return false;
4261 }
4262 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4263 return false;
4264 }
4265 return do_mov_cond(dc, &cmp, a->rd, src2);
4266 }
4267
do_add_special(DisasContext * dc,arg_r_r_ri * a,bool (* func)(DisasContext * dc,int rd,TCGv src))4268 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4269 bool (*func)(DisasContext *dc, int rd, TCGv src))
4270 {
4271 TCGv src1, sum;
4272
4273 if (!check_r_r_ri(dc, a)) {
4274 return false;
4275 }
4276
4277 /*
4278 * Always load the sum into a new temporary.
4279 * This is required to capture the value across a window change,
4280 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4281 */
4282 sum = tcg_temp_new();
4283 src1 = gen_load_gpr(dc, a->rs1);
4284 if (a->imm || a->rs2_or_imm == 0) {
4285 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4286 } else {
4287 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4288 }
4289 return func(dc, a->rd, sum);
4290 }
4291
do_jmpl(DisasContext * dc,int rd,TCGv src)4292 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4293 {
4294 /*
4295 * Preserve pc across advance, so that we can delay
4296 * the writeback to rd until after src is consumed.
4297 */
4298 target_ulong cur_pc = dc->pc;
4299
4300 gen_check_align(dc, src, 3);
4301
4302 gen_mov_pc_npc(dc);
4303 tcg_gen_mov_tl(cpu_npc, src);
4304 gen_address_mask(dc, cpu_npc);
4305 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4306
4307 dc->npc = DYNAMIC_PC_LOOKUP;
4308 return true;
4309 }
4310
TRANS(JMPL,ALL,do_add_special,a,do_jmpl)4311 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4312
4313 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4314 {
4315 if (!supervisor(dc)) {
4316 return raise_priv(dc);
4317 }
4318
4319 gen_check_align(dc, src, 3);
4320
4321 gen_mov_pc_npc(dc);
4322 tcg_gen_mov_tl(cpu_npc, src);
4323 gen_helper_rett(tcg_env);
4324
4325 dc->npc = DYNAMIC_PC;
4326 return true;
4327 }
4328
4329 TRANS(RETT, 32, do_add_special, a, do_rett)
4330
do_return(DisasContext * dc,int rd,TCGv src)4331 static bool do_return(DisasContext *dc, int rd, TCGv src)
4332 {
4333 gen_check_align(dc, src, 3);
4334 gen_helper_restore(tcg_env);
4335
4336 gen_mov_pc_npc(dc);
4337 tcg_gen_mov_tl(cpu_npc, src);
4338 gen_address_mask(dc, cpu_npc);
4339
4340 dc->npc = DYNAMIC_PC_LOOKUP;
4341 return true;
4342 }
4343
4344 TRANS(RETURN, 64, do_add_special, a, do_return)
4345
do_save(DisasContext * dc,int rd,TCGv src)4346 static bool do_save(DisasContext *dc, int rd, TCGv src)
4347 {
4348 gen_helper_save(tcg_env);
4349 gen_store_gpr(dc, rd, src);
4350 return advance_pc(dc);
4351 }
4352
TRANS(SAVE,ALL,do_add_special,a,do_save)4353 TRANS(SAVE, ALL, do_add_special, a, do_save)
4354
4355 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4356 {
4357 gen_helper_restore(tcg_env);
4358 gen_store_gpr(dc, rd, src);
4359 return advance_pc(dc);
4360 }
4361
TRANS(RESTORE,ALL,do_add_special,a,do_restore)4362 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4363
4364 static bool do_done_retry(DisasContext *dc, bool done)
4365 {
4366 if (!supervisor(dc)) {
4367 return raise_priv(dc);
4368 }
4369 dc->npc = DYNAMIC_PC;
4370 dc->pc = DYNAMIC_PC;
4371 translator_io_start(&dc->base);
4372 if (done) {
4373 gen_helper_done(tcg_env);
4374 } else {
4375 gen_helper_retry(tcg_env);
4376 }
4377 return true;
4378 }
4379
4380 TRANS(DONE, 64, do_done_retry, true)
4381 TRANS(RETRY, 64, do_done_retry, false)
4382
4383 /*
4384 * Major opcode 11 -- load and store instructions
4385 */
4386
gen_ldst_addr(DisasContext * dc,int rs1,bool imm,int rs2_or_imm)4387 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4388 {
4389 TCGv addr, tmp = NULL;
4390
4391 if (!imm && !check_rs2(dc, &rs2_or_imm)) {
4392 return NULL;
4393 }
4394
4395 addr = gen_load_gpr(dc, rs1);
4396 if (rs2_or_imm) {
4397 tmp = tcg_temp_new();
4398 if (imm) {
4399 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4400 } else {
4401 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4402 }
4403 addr = tmp;
4404 }
4405 if (AM_CHECK(dc)) {
4406 if (!tmp) {
4407 tmp = tcg_temp_new();
4408 }
4409 tcg_gen_ext32u_tl(tmp, addr);
4410 addr = tmp;
4411 }
4412 return addr;
4413 }
4414
do_ld_gpr(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4415 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4416 {
4417 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4418 DisasASI da;
4419
4420 if (addr == NULL) {
4421 return false;
4422 }
4423 da = resolve_asi(dc, a->asi, mop);
4424
4425 reg = gen_dest_gpr(dc, a->rd);
4426 gen_ld_asi(dc, &da, reg, addr);
4427 gen_store_gpr(dc, a->rd, reg);
4428 return advance_pc(dc);
4429 }
4430
TRANS(LDUW,ALL,do_ld_gpr,a,MO_TEUL)4431 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4432 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4433 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4434 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4435 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4436 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4437 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4438
4439 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4440 {
4441 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4442 DisasASI da;
4443
4444 if (addr == NULL) {
4445 return false;
4446 }
4447 da = resolve_asi(dc, a->asi, mop);
4448
4449 reg = gen_load_gpr(dc, a->rd);
4450 gen_st_asi(dc, &da, reg, addr);
4451 return advance_pc(dc);
4452 }
4453
TRANS(STW,ALL,do_st_gpr,a,MO_TEUL)4454 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4455 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4456 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4457 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4458
4459 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4460 {
4461 TCGv addr;
4462 DisasASI da;
4463
4464 if (a->rd & 1) {
4465 return false;
4466 }
4467 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4468 if (addr == NULL) {
4469 return false;
4470 }
4471 da = resolve_asi(dc, a->asi, MO_TEUQ);
4472 gen_ldda_asi(dc, &da, addr, a->rd);
4473 return advance_pc(dc);
4474 }
4475
trans_STD(DisasContext * dc,arg_r_r_ri_asi * a)4476 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4477 {
4478 TCGv addr;
4479 DisasASI da;
4480
4481 if (a->rd & 1) {
4482 return false;
4483 }
4484 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4485 if (addr == NULL) {
4486 return false;
4487 }
4488 da = resolve_asi(dc, a->asi, MO_TEUQ);
4489 gen_stda_asi(dc, &da, addr, a->rd);
4490 return advance_pc(dc);
4491 }
4492
trans_LDSTUB(DisasContext * dc,arg_r_r_ri_asi * a)4493 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4494 {
4495 TCGv addr, reg;
4496 DisasASI da;
4497
4498 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4499 if (addr == NULL) {
4500 return false;
4501 }
4502 da = resolve_asi(dc, a->asi, MO_UB);
4503
4504 reg = gen_dest_gpr(dc, a->rd);
4505 gen_ldstub_asi(dc, &da, reg, addr);
4506 gen_store_gpr(dc, a->rd, reg);
4507 return advance_pc(dc);
4508 }
4509
trans_SWAP(DisasContext * dc,arg_r_r_ri_asi * a)4510 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4511 {
4512 TCGv addr, dst, src;
4513 DisasASI da;
4514
4515 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4516 if (addr == NULL) {
4517 return false;
4518 }
4519 da = resolve_asi(dc, a->asi, MO_TEUL);
4520
4521 dst = gen_dest_gpr(dc, a->rd);
4522 src = gen_load_gpr(dc, a->rd);
4523 gen_swap_asi(dc, &da, dst, src, addr);
4524 gen_store_gpr(dc, a->rd, dst);
4525 return advance_pc(dc);
4526 }
4527
do_casa(DisasContext * dc,arg_r_r_ri_asi * a,MemOp mop)4528 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4529 {
4530 TCGv addr, o, n, c;
4531 DisasASI da;
4532
4533 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4534 if (addr == NULL) {
4535 return false;
4536 }
4537 da = resolve_asi(dc, a->asi, mop);
4538
4539 o = gen_dest_gpr(dc, a->rd);
4540 n = gen_load_gpr(dc, a->rd);
4541 c = gen_load_gpr(dc, a->rs2_or_imm);
4542 gen_cas_asi(dc, &da, o, n, c, addr);
4543 gen_store_gpr(dc, a->rd, o);
4544 return advance_pc(dc);
4545 }
4546
TRANS(CASA,CASA,do_casa,a,MO_TEUL)4547 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4548 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4549
4550 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4551 {
4552 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4553 DisasASI da;
4554
4555 if (addr == NULL) {
4556 return false;
4557 }
4558 if (gen_trap_if_nofpu_fpexception(dc)) {
4559 return true;
4560 }
4561 if (sz == MO_128 && gen_trap_float128(dc)) {
4562 return true;
4563 }
4564 da = resolve_asi(dc, a->asi, MO_TE | sz);
4565 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4566 gen_update_fprs_dirty(dc, a->rd);
4567 return advance_pc(dc);
4568 }
4569
TRANS(LDF,ALL,do_ld_fpr,a,MO_32)4570 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4571 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4572 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4573
4574 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4575 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4576 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4577
4578 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4579 {
4580 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4581 DisasASI da;
4582
4583 if (addr == NULL) {
4584 return false;
4585 }
4586 /* Store insns are ok in fp_exception_pending state. */
4587 if (gen_trap_ifnofpu(dc)) {
4588 return true;
4589 }
4590 if (sz == MO_128 && gen_trap_float128(dc)) {
4591 return true;
4592 }
4593 da = resolve_asi(dc, a->asi, MO_TE | sz);
4594 gen_stf_asi(dc, &da, sz, addr, a->rd);
4595 return advance_pc(dc);
4596 }
4597
TRANS(STF,ALL,do_st_fpr,a,MO_32)4598 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4599 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4600 TRANS(STQF, 64, do_st_fpr, a, MO_128)
4601
4602 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4603 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4604 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4605
4606 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4607 {
4608 TCGv addr;
4609
4610 if (!avail_32(dc)) {
4611 return false;
4612 }
4613 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4614 if (addr == NULL) {
4615 return false;
4616 }
4617 if (!supervisor(dc)) {
4618 return raise_priv(dc);
4619 }
4620 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
4621 if (gen_trap_ifnofpu(dc)) {
4622 return true;
4623 }
4624 if (!dc->fsr_qne) {
4625 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4626 return true;
4627 }
4628
4629 /* Store the single element from the queue. */
4630 TCGv_i64 fq = tcg_temp_new_i64();
4631 tcg_gen_ld_i64(fq, tcg_env, offsetof(CPUSPARCState, fq.d));
4632 tcg_gen_qemu_st_i64(fq, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN_4);
4633
4634 /* Mark the queue empty, transitioning to fp_execute state. */
4635 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
4636 offsetof(CPUSPARCState, fsr_qne));
4637 dc->fsr_qne = 0;
4638
4639 return advance_pc(dc);
4640 #else
4641 qemu_build_not_reached();
4642 #endif
4643 }
4644
trans_LDFSR(DisasContext * dc,arg_r_r_ri * a)4645 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4646 {
4647 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4648 TCGv_i32 tmp;
4649
4650 if (addr == NULL) {
4651 return false;
4652 }
4653 if (gen_trap_if_nofpu_fpexception(dc)) {
4654 return true;
4655 }
4656
4657 tmp = tcg_temp_new_i32();
4658 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4659
4660 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4661 /* LDFSR does not change FCC[1-3]. */
4662
4663 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4664 return advance_pc(dc);
4665 }
4666
do_ldxfsr(DisasContext * dc,arg_r_r_ri * a,bool entire)4667 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4668 {
4669 #ifdef TARGET_SPARC64
4670 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4671 TCGv_i64 t64;
4672 TCGv_i32 lo, hi;
4673
4674 if (addr == NULL) {
4675 return false;
4676 }
4677 if (gen_trap_if_nofpu_fpexception(dc)) {
4678 return true;
4679 }
4680
4681 t64 = tcg_temp_new_i64();
4682 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4683
4684 lo = tcg_temp_new_i32();
4685 hi = cpu_fcc[3];
4686 tcg_gen_extr_i64_i32(lo, hi, t64);
4687 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4688 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4689 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4690 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4691
4692 if (entire) {
4693 gen_helper_set_fsr_nofcc(tcg_env, lo);
4694 } else {
4695 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4696 }
4697 return advance_pc(dc);
4698 #else
4699 return false;
4700 #endif
4701 }
4702
4703 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
TRANS(LDXEFSR,VIS3B,do_ldxfsr,a,true)4704 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4705
4706 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4707 {
4708 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4709 TCGv fsr;
4710
4711 if (addr == NULL) {
4712 return false;
4713 }
4714 /* Store insns are ok in fp_exception_pending state. */
4715 if (gen_trap_ifnofpu(dc)) {
4716 return true;
4717 }
4718
4719 fsr = tcg_temp_new();
4720 gen_helper_get_fsr(fsr, tcg_env);
4721 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4722 return advance_pc(dc);
4723 }
4724
TRANS(STFSR,ALL,do_stfsr,a,MO_TEUL)4725 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4726 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4727
4728 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4729 {
4730 if (gen_trap_ifnofpu(dc)) {
4731 return true;
4732 }
4733 gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4734 return advance_pc(dc);
4735 }
4736
4737 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4738 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4739
do_dc(DisasContext * dc,int rd,int64_t c)4740 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4741 {
4742 if (gen_trap_ifnofpu(dc)) {
4743 return true;
4744 }
4745 gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4746 return advance_pc(dc);
4747 }
4748
4749 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4750 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4751
do_ff(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i32,TCGv_i32))4752 static bool do_ff(DisasContext *dc, arg_r_r *a,
4753 void (*func)(TCGv_i32, TCGv_i32))
4754 {
4755 TCGv_i32 tmp;
4756
4757 if (gen_trap_if_nofpu_fpexception(dc)) {
4758 return true;
4759 }
4760
4761 tmp = gen_load_fpr_F(dc, a->rs);
4762 func(tmp, tmp);
4763 gen_store_fpr_F(dc, a->rd, tmp);
4764 return advance_pc(dc);
4765 }
4766
TRANS(FMOVs,ALL,do_ff,a,gen_op_fmovs)4767 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4768 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4769 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4770 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4771 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4772
4773 static bool do_fd(DisasContext *dc, arg_r_r *a,
4774 void (*func)(TCGv_i32, TCGv_i64))
4775 {
4776 TCGv_i32 dst;
4777 TCGv_i64 src;
4778
4779 if (gen_trap_ifnofpu(dc)) {
4780 return true;
4781 }
4782
4783 dst = tcg_temp_new_i32();
4784 src = gen_load_fpr_D(dc, a->rs);
4785 func(dst, src);
4786 gen_store_fpr_F(dc, a->rd, dst);
4787 return advance_pc(dc);
4788 }
4789
TRANS(FPACK16,VIS1,do_fd,a,gen_op_fpack16)4790 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4791 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4792
4793 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4794 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4795 {
4796 TCGv_i32 tmp;
4797
4798 if (gen_trap_if_nofpu_fpexception(dc)) {
4799 return true;
4800 }
4801
4802 tmp = gen_load_fpr_F(dc, a->rs);
4803 func(tmp, tcg_env, tmp);
4804 gen_store_fpr_F(dc, a->rd, tmp);
4805 return advance_pc(dc);
4806 }
4807
TRANS(FSQRTs,ALL,do_env_ff,a,gen_helper_fsqrts)4808 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4809 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4810 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4811
4812 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4813 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4814 {
4815 TCGv_i32 dst;
4816 TCGv_i64 src;
4817
4818 if (gen_trap_if_nofpu_fpexception(dc)) {
4819 return true;
4820 }
4821
4822 dst = tcg_temp_new_i32();
4823 src = gen_load_fpr_D(dc, a->rs);
4824 func(dst, tcg_env, src);
4825 gen_store_fpr_F(dc, a->rd, dst);
4826 return advance_pc(dc);
4827 }
4828
TRANS(FdTOs,ALL,do_env_fd,a,gen_helper_fdtos)4829 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4830 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4831 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4832
4833 static bool do_dd(DisasContext *dc, arg_r_r *a,
4834 void (*func)(TCGv_i64, TCGv_i64))
4835 {
4836 TCGv_i64 dst, src;
4837
4838 if (gen_trap_if_nofpu_fpexception(dc)) {
4839 return true;
4840 }
4841
4842 dst = tcg_temp_new_i64();
4843 src = gen_load_fpr_D(dc, a->rs);
4844 func(dst, src);
4845 gen_store_fpr_D(dc, a->rd, dst);
4846 return advance_pc(dc);
4847 }
4848
4849 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4850 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4851 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
TRANS(FSRCd,VIS1,do_dd,a,tcg_gen_mov_i64)4852 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4853 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4854
4855 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4856 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4857 {
4858 TCGv_i64 dst, src;
4859
4860 if (gen_trap_if_nofpu_fpexception(dc)) {
4861 return true;
4862 }
4863
4864 dst = tcg_temp_new_i64();
4865 src = gen_load_fpr_D(dc, a->rs);
4866 func(dst, tcg_env, src);
4867 gen_store_fpr_D(dc, a->rd, dst);
4868 return advance_pc(dc);
4869 }
4870
TRANS(FSQRTd,ALL,do_env_dd,a,gen_helper_fsqrtd)4871 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4872 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4873 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4874
4875 static bool do_df(DisasContext *dc, arg_r_r *a,
4876 void (*func)(TCGv_i64, TCGv_i32))
4877 {
4878 TCGv_i64 dst;
4879 TCGv_i32 src;
4880
4881 if (gen_trap_ifnofpu(dc)) {
4882 return true;
4883 }
4884
4885 dst = tcg_temp_new_i64();
4886 src = gen_load_fpr_F(dc, a->rs);
4887 func(dst, src);
4888 gen_store_fpr_D(dc, a->rd, dst);
4889 return advance_pc(dc);
4890 }
4891
TRANS(FEXPAND,VIS1,do_df,a,gen_helper_fexpand)4892 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4893
4894 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4895 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4896 {
4897 TCGv_i64 dst;
4898 TCGv_i32 src;
4899
4900 if (gen_trap_if_nofpu_fpexception(dc)) {
4901 return true;
4902 }
4903
4904 dst = tcg_temp_new_i64();
4905 src = gen_load_fpr_F(dc, a->rs);
4906 func(dst, tcg_env, src);
4907 gen_store_fpr_D(dc, a->rd, dst);
4908 return advance_pc(dc);
4909 }
4910
TRANS(FiTOd,ALL,do_env_df,a,gen_helper_fitod)4911 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4912 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4913 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4914
4915 static bool do_qq(DisasContext *dc, arg_r_r *a,
4916 void (*func)(TCGv_i128, TCGv_i128))
4917 {
4918 TCGv_i128 t;
4919
4920 if (gen_trap_ifnofpu(dc)) {
4921 return true;
4922 }
4923 if (gen_trap_float128(dc)) {
4924 return true;
4925 }
4926
4927 gen_op_clear_ieee_excp_and_FTT();
4928 t = gen_load_fpr_Q(dc, a->rs);
4929 func(t, t);
4930 gen_store_fpr_Q(dc, a->rd, t);
4931 return advance_pc(dc);
4932 }
4933
4934 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4935 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4936 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4937
do_env_qq(DisasContext * dc,arg_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128))4938 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4939 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4940 {
4941 TCGv_i128 t;
4942
4943 if (gen_trap_if_nofpu_fpexception(dc)) {
4944 return true;
4945 }
4946 if (gen_trap_float128(dc)) {
4947 return true;
4948 }
4949
4950 t = gen_load_fpr_Q(dc, a->rs);
4951 func(t, tcg_env, t);
4952 gen_store_fpr_Q(dc, a->rd, t);
4953 return advance_pc(dc);
4954 }
4955
TRANS(FSQRTq,ALL,do_env_qq,a,gen_helper_fsqrtq)4956 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4957
4958 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4959 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4960 {
4961 TCGv_i128 src;
4962 TCGv_i32 dst;
4963
4964 if (gen_trap_if_nofpu_fpexception(dc)) {
4965 return true;
4966 }
4967 if (gen_trap_float128(dc)) {
4968 return true;
4969 }
4970
4971 src = gen_load_fpr_Q(dc, a->rs);
4972 dst = tcg_temp_new_i32();
4973 func(dst, tcg_env, src);
4974 gen_store_fpr_F(dc, a->rd, dst);
4975 return advance_pc(dc);
4976 }
4977
TRANS(FqTOs,ALL,do_env_fq,a,gen_helper_fqtos)4978 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4979 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4980
4981 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4982 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4983 {
4984 TCGv_i128 src;
4985 TCGv_i64 dst;
4986
4987 if (gen_trap_if_nofpu_fpexception(dc)) {
4988 return true;
4989 }
4990 if (gen_trap_float128(dc)) {
4991 return true;
4992 }
4993
4994 src = gen_load_fpr_Q(dc, a->rs);
4995 dst = tcg_temp_new_i64();
4996 func(dst, tcg_env, src);
4997 gen_store_fpr_D(dc, a->rd, dst);
4998 return advance_pc(dc);
4999 }
5000
TRANS(FqTOd,ALL,do_env_dq,a,gen_helper_fqtod)5001 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
5002 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
5003
5004 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
5005 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
5006 {
5007 TCGv_i32 src;
5008 TCGv_i128 dst;
5009
5010 if (gen_trap_if_nofpu_fpexception(dc)) {
5011 return true;
5012 }
5013 if (gen_trap_float128(dc)) {
5014 return true;
5015 }
5016
5017 src = gen_load_fpr_F(dc, a->rs);
5018 dst = tcg_temp_new_i128();
5019 func(dst, tcg_env, src);
5020 gen_store_fpr_Q(dc, a->rd, dst);
5021 return advance_pc(dc);
5022 }
5023
TRANS(FiTOq,ALL,do_env_qf,a,gen_helper_fitoq)5024 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
5025 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
5026
5027 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
5028 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
5029 {
5030 TCGv_i64 src;
5031 TCGv_i128 dst;
5032
5033 if (gen_trap_if_nofpu_fpexception(dc)) {
5034 return true;
5035 }
5036
5037 src = gen_load_fpr_D(dc, a->rs);
5038 dst = tcg_temp_new_i128();
5039 func(dst, tcg_env, src);
5040 gen_store_fpr_Q(dc, a->rd, dst);
5041 return advance_pc(dc);
5042 }
5043
TRANS(FdTOq,ALL,do_env_qd,a,gen_helper_fdtoq)5044 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
5045 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
5046
5047 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
5048 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
5049 {
5050 TCGv_i32 src1, src2;
5051
5052 if (gen_trap_ifnofpu(dc)) {
5053 return true;
5054 }
5055
5056 src1 = gen_load_fpr_F(dc, a->rs1);
5057 src2 = gen_load_fpr_F(dc, a->rs2);
5058 func(src1, src1, src2);
5059 gen_store_fpr_F(dc, a->rd, src1);
5060 return advance_pc(dc);
5061 }
5062
TRANS(FPADD16s,VIS1,do_fff,a,tcg_gen_vec_add16_i32)5063 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
5064 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
5065 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
5066 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
5067 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
5068 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
5069 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
5070 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
5071 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
5072 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
5073 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
5074 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
5075
5076 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
5077 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
5078 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
5079
5080 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
5081 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
5082 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
5083 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
5084
5085 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
5086 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
5087 {
5088 TCGv_i32 src1, src2;
5089
5090 if (gen_trap_if_nofpu_fpexception(dc)) {
5091 return true;
5092 }
5093
5094 src1 = gen_load_fpr_F(dc, a->rs1);
5095 src2 = gen_load_fpr_F(dc, a->rs2);
5096 func(src1, tcg_env, src1, src2);
5097 gen_store_fpr_F(dc, a->rd, src1);
5098 return advance_pc(dc);
5099 }
5100
TRANS(FADDs,ALL,do_env_fff,a,gen_helper_fadds)5101 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5102 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5103 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5104 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5105 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5106 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5107
5108 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5109 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5110 {
5111 TCGv_i64 dst;
5112 TCGv_i32 src1, src2;
5113
5114 if (gen_trap_ifnofpu(dc)) {
5115 return true;
5116 }
5117
5118 dst = tcg_temp_new_i64();
5119 src1 = gen_load_fpr_F(dc, a->rs1);
5120 src2 = gen_load_fpr_F(dc, a->rs2);
5121 func(dst, src1, src2);
5122 gen_store_fpr_D(dc, a->rd, dst);
5123 return advance_pc(dc);
5124 }
5125
TRANS(FMUL8x16AU,VIS1,do_dff,a,gen_op_fmul8x16au)5126 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5127 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5128 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5129 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5130 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5131
5132 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5133 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5134 {
5135 TCGv_i64 dst, src2;
5136 TCGv_i32 src1;
5137
5138 if (gen_trap_ifnofpu(dc)) {
5139 return true;
5140 }
5141
5142 dst = tcg_temp_new_i64();
5143 src1 = gen_load_fpr_F(dc, a->rs1);
5144 src2 = gen_load_fpr_D(dc, a->rs2);
5145 func(dst, src1, src2);
5146 gen_store_fpr_D(dc, a->rd, dst);
5147 return advance_pc(dc);
5148 }
5149
TRANS(FMUL8x16,VIS1,do_dfd,a,gen_helper_fmul8x16)5150 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5151
5152 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5153 void (*func)(unsigned, uint32_t, uint32_t,
5154 uint32_t, uint32_t, uint32_t))
5155 {
5156 if (gen_trap_ifnofpu(dc)) {
5157 return true;
5158 }
5159
5160 func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5161 gen_offset_fpr_D(a->rs2), 8, 8);
5162 return advance_pc(dc);
5163 }
5164
TRANS(FPADD8,VIS4,do_gvec_ddd,a,MO_8,tcg_gen_gvec_add)5165 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5166 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5167 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5168
5169 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5170 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5171 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5172
5173 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5174 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5175
5176 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5177 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5178 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5179 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5180 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5181
5182 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5183 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5184 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5185 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5186 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5187
5188 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5189 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5190 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5191 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5192 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5193 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5194
5195 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5196 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5197 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5198 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5199 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5200 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5201
5202 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5203 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5204 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5205 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5206 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5207 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5208
5209 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5210 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5211 {
5212 TCGv_i64 dst, src1, src2;
5213
5214 if (gen_trap_ifnofpu(dc)) {
5215 return true;
5216 }
5217
5218 dst = tcg_temp_new_i64();
5219 src1 = gen_load_fpr_D(dc, a->rs1);
5220 src2 = gen_load_fpr_D(dc, a->rs2);
5221 func(dst, src1, src2);
5222 gen_store_fpr_D(dc, a->rd, dst);
5223 return advance_pc(dc);
5224 }
5225
TRANS(FMUL8SUx16,VIS1,do_ddd,a,gen_helper_fmul8sux16)5226 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5227 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5228
5229 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5230 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5231 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5232 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5233 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5234 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5235 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5236 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5237
5238 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5239 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5240 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5241
5242 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5243 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5244 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5245
5246 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5247 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5248 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5249 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5250
5251 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5252 void (*func)(TCGv, TCGv_i64, TCGv_i64))
5253 {
5254 TCGv_i64 src1, src2;
5255 TCGv dst;
5256
5257 if (gen_trap_ifnofpu(dc)) {
5258 return true;
5259 }
5260
5261 dst = gen_dest_gpr(dc, a->rd);
5262 src1 = gen_load_fpr_D(dc, a->rs1);
5263 src2 = gen_load_fpr_D(dc, a->rs2);
5264 func(dst, src1, src2);
5265 gen_store_gpr(dc, a->rd, dst);
5266 return advance_pc(dc);
5267 }
5268
TRANS(FPCMPLE16,VIS1,do_rdd,a,gen_helper_fcmple16)5269 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5270 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5271 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5272 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5273 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5274 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5275
5276 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5277 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5278 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5279 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5280 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5281 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5282
5283 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5284 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5285 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5286 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5287 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5288 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5289
5290 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5291 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5292 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5293
5294 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5295 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5296 {
5297 TCGv_i64 dst, src1, src2;
5298
5299 if (gen_trap_if_nofpu_fpexception(dc)) {
5300 return true;
5301 }
5302
5303 dst = tcg_temp_new_i64();
5304 src1 = gen_load_fpr_D(dc, a->rs1);
5305 src2 = gen_load_fpr_D(dc, a->rs2);
5306 func(dst, tcg_env, src1, src2);
5307 gen_store_fpr_D(dc, a->rd, dst);
5308 return advance_pc(dc);
5309 }
5310
TRANS(FADDd,ALL,do_env_ddd,a,gen_helper_faddd)5311 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5312 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5313 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5314 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5315 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5316 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5317
5318 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5319 {
5320 TCGv_i64 dst;
5321 TCGv_i32 src1, src2;
5322
5323 if (gen_trap_if_nofpu_fpexception(dc)) {
5324 return true;
5325 }
5326 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5327 return raise_unimpfpop(dc);
5328 }
5329
5330 dst = tcg_temp_new_i64();
5331 src1 = gen_load_fpr_F(dc, a->rs1);
5332 src2 = gen_load_fpr_F(dc, a->rs2);
5333 gen_helper_fsmuld(dst, tcg_env, src1, src2);
5334 gen_store_fpr_D(dc, a->rd, dst);
5335 return advance_pc(dc);
5336 }
5337
trans_FNsMULd(DisasContext * dc,arg_r_r_r * a)5338 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5339 {
5340 TCGv_i64 dst;
5341 TCGv_i32 src1, src2;
5342
5343 if (!avail_VIS3(dc)) {
5344 return false;
5345 }
5346 if (gen_trap_ifnofpu(dc)) {
5347 return true;
5348 }
5349 dst = tcg_temp_new_i64();
5350 src1 = gen_load_fpr_F(dc, a->rs1);
5351 src2 = gen_load_fpr_F(dc, a->rs2);
5352 gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5353 gen_store_fpr_D(dc, a->rd, dst);
5354 return advance_pc(dc);
5355 }
5356
do_ffff(DisasContext * dc,arg_r_r_r_r * a,void (* func)(TCGv_i32,TCGv_i32,TCGv_i32,TCGv_i32))5357 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5358 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5359 {
5360 TCGv_i32 dst, src1, src2, src3;
5361
5362 if (gen_trap_ifnofpu(dc)) {
5363 return true;
5364 }
5365
5366 src1 = gen_load_fpr_F(dc, a->rs1);
5367 src2 = gen_load_fpr_F(dc, a->rs2);
5368 src3 = gen_load_fpr_F(dc, a->rs3);
5369 dst = tcg_temp_new_i32();
5370 func(dst, src1, src2, src3);
5371 gen_store_fpr_F(dc, a->rd, dst);
5372 return advance_pc(dc);
5373 }
5374
TRANS(FMADDs,FMAF,do_ffff,a,gen_op_fmadds)5375 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5376 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5377 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5378 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5379
5380 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5381 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5382 {
5383 TCGv_i64 dst, src1, src2, src3;
5384
5385 if (gen_trap_ifnofpu(dc)) {
5386 return true;
5387 }
5388
5389 dst = tcg_temp_new_i64();
5390 src1 = gen_load_fpr_D(dc, a->rs1);
5391 src2 = gen_load_fpr_D(dc, a->rs2);
5392 src3 = gen_load_fpr_D(dc, a->rs3);
5393 func(dst, src1, src2, src3);
5394 gen_store_fpr_D(dc, a->rd, dst);
5395 return advance_pc(dc);
5396 }
5397
TRANS(PDIST,VIS1,do_dddd,a,gen_helper_pdist)5398 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5399 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5400 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5401 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5402 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5403 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5404 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5405
5406 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5407 {
5408 TCGv_i64 dst, src1, src2;
5409 TCGv src3;
5410
5411 if (!avail_VIS4(dc)) {
5412 return false;
5413 }
5414 if (gen_trap_ifnofpu(dc)) {
5415 return true;
5416 }
5417
5418 dst = tcg_temp_new_i64();
5419 src1 = gen_load_fpr_D(dc, a->rd);
5420 src2 = gen_load_fpr_D(dc, a->rs2);
5421 src3 = gen_load_gpr(dc, a->rs1);
5422 gen_op_faligndata_i(dst, src1, src2, src3);
5423 gen_store_fpr_D(dc, a->rd, dst);
5424 return advance_pc(dc);
5425 }
5426
do_env_qqq(DisasContext * dc,arg_r_r_r * a,void (* func)(TCGv_i128,TCGv_env,TCGv_i128,TCGv_i128))5427 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5428 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5429 {
5430 TCGv_i128 src1, src2;
5431
5432 if (gen_trap_if_nofpu_fpexception(dc)) {
5433 return true;
5434 }
5435 if (gen_trap_float128(dc)) {
5436 return true;
5437 }
5438
5439 src1 = gen_load_fpr_Q(dc, a->rs1);
5440 src2 = gen_load_fpr_Q(dc, a->rs2);
5441 func(src1, tcg_env, src1, src2);
5442 gen_store_fpr_Q(dc, a->rd, src1);
5443 return advance_pc(dc);
5444 }
5445
TRANS(FADDq,ALL,do_env_qqq,a,gen_helper_faddq)5446 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5447 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5448 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5449 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5450
5451 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5452 {
5453 TCGv_i64 src1, src2;
5454 TCGv_i128 dst;
5455
5456 if (gen_trap_if_nofpu_fpexception(dc)) {
5457 return true;
5458 }
5459 if (gen_trap_float128(dc)) {
5460 return true;
5461 }
5462
5463 src1 = gen_load_fpr_D(dc, a->rs1);
5464 src2 = gen_load_fpr_D(dc, a->rs2);
5465 dst = tcg_temp_new_i128();
5466 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5467 gen_store_fpr_Q(dc, a->rd, dst);
5468 return advance_pc(dc);
5469 }
5470
do_fmovr(DisasContext * dc,arg_FMOVRs * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5471 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5472 void (*func)(DisasContext *, DisasCompare *, int, int))
5473 {
5474 DisasCompare cmp;
5475
5476 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5477 return false;
5478 }
5479 if (gen_trap_ifnofpu(dc)) {
5480 return true;
5481 }
5482 if (is_128 && gen_trap_float128(dc)) {
5483 return true;
5484 }
5485
5486 gen_op_clear_ieee_excp_and_FTT();
5487 func(dc, &cmp, a->rd, a->rs2);
5488 return advance_pc(dc);
5489 }
5490
5491 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5492 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5493 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5494
do_fmovcc(DisasContext * dc,arg_FMOVscc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5495 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5496 void (*func)(DisasContext *, DisasCompare *, int, int))
5497 {
5498 DisasCompare cmp;
5499
5500 if (gen_trap_ifnofpu(dc)) {
5501 return true;
5502 }
5503 if (is_128 && gen_trap_float128(dc)) {
5504 return true;
5505 }
5506
5507 gen_op_clear_ieee_excp_and_FTT();
5508 gen_compare(&cmp, a->cc, a->cond, dc);
5509 func(dc, &cmp, a->rd, a->rs2);
5510 return advance_pc(dc);
5511 }
5512
5513 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5514 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5515 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5516
do_fmovfcc(DisasContext * dc,arg_FMOVsfcc * a,bool is_128,void (* func)(DisasContext *,DisasCompare *,int,int))5517 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5518 void (*func)(DisasContext *, DisasCompare *, int, int))
5519 {
5520 DisasCompare cmp;
5521
5522 if (gen_trap_ifnofpu(dc)) {
5523 return true;
5524 }
5525 if (is_128 && gen_trap_float128(dc)) {
5526 return true;
5527 }
5528
5529 gen_op_clear_ieee_excp_and_FTT();
5530 gen_fcompare(&cmp, a->cc, a->cond);
5531 func(dc, &cmp, a->rd, a->rs2);
5532 return advance_pc(dc);
5533 }
5534
5535 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5536 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5537 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5538
do_fcmps(DisasContext * dc,arg_FCMPs * a,bool e)5539 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5540 {
5541 TCGv_i32 src1, src2;
5542
5543 if (avail_32(dc) && a->cc != 0) {
5544 return false;
5545 }
5546 if (gen_trap_if_nofpu_fpexception(dc)) {
5547 return true;
5548 }
5549
5550 src1 = gen_load_fpr_F(dc, a->rs1);
5551 src2 = gen_load_fpr_F(dc, a->rs2);
5552 if (e) {
5553 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5554 } else {
5555 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5556 }
5557 return advance_pc(dc);
5558 }
5559
TRANS(FCMPs,ALL,do_fcmps,a,false)5560 TRANS(FCMPs, ALL, do_fcmps, a, false)
5561 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5562
5563 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5564 {
5565 TCGv_i64 src1, src2;
5566
5567 if (avail_32(dc) && a->cc != 0) {
5568 return false;
5569 }
5570 if (gen_trap_if_nofpu_fpexception(dc)) {
5571 return true;
5572 }
5573
5574 src1 = gen_load_fpr_D(dc, a->rs1);
5575 src2 = gen_load_fpr_D(dc, a->rs2);
5576 if (e) {
5577 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5578 } else {
5579 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5580 }
5581 return advance_pc(dc);
5582 }
5583
TRANS(FCMPd,ALL,do_fcmpd,a,false)5584 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5585 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5586
5587 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5588 {
5589 TCGv_i128 src1, src2;
5590
5591 if (avail_32(dc) && a->cc != 0) {
5592 return false;
5593 }
5594 if (gen_trap_if_nofpu_fpexception(dc)) {
5595 return true;
5596 }
5597 if (gen_trap_float128(dc)) {
5598 return true;
5599 }
5600
5601 src1 = gen_load_fpr_Q(dc, a->rs1);
5602 src2 = gen_load_fpr_Q(dc, a->rs2);
5603 if (e) {
5604 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5605 } else {
5606 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5607 }
5608 return advance_pc(dc);
5609 }
5610
TRANS(FCMPq,ALL,do_fcmpq,a,false)5611 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5612 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5613
5614 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5615 {
5616 TCGv_i32 src1, src2;
5617
5618 if (!avail_VIS3(dc)) {
5619 return false;
5620 }
5621 if (gen_trap_ifnofpu(dc)) {
5622 return true;
5623 }
5624
5625 src1 = gen_load_fpr_F(dc, a->rs1);
5626 src2 = gen_load_fpr_F(dc, a->rs2);
5627 gen_helper_flcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5628 return advance_pc(dc);
5629 }
5630
trans_FLCMPd(DisasContext * dc,arg_FLCMPd * a)5631 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5632 {
5633 TCGv_i64 src1, src2;
5634
5635 if (!avail_VIS3(dc)) {
5636 return false;
5637 }
5638 if (gen_trap_ifnofpu(dc)) {
5639 return true;
5640 }
5641
5642 src1 = gen_load_fpr_D(dc, a->rs1);
5643 src2 = gen_load_fpr_D(dc, a->rs2);
5644 gen_helper_flcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5645 return advance_pc(dc);
5646 }
5647
do_movf2r(DisasContext * dc,arg_r_r * a,int (* offset)(unsigned int),void (* load)(TCGv,TCGv_ptr,tcg_target_long))5648 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5649 int (*offset)(unsigned int),
5650 void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5651 {
5652 TCGv dst;
5653
5654 if (gen_trap_ifnofpu(dc)) {
5655 return true;
5656 }
5657 dst = gen_dest_gpr(dc, a->rd);
5658 load(dst, tcg_env, offset(a->rs));
5659 gen_store_gpr(dc, a->rd, dst);
5660 return advance_pc(dc);
5661 }
5662
TRANS(MOVsTOsw,VIS3B,do_movf2r,a,gen_offset_fpr_F,tcg_gen_ld32s_tl)5663 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5664 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5665 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5666
5667 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5668 int (*offset)(unsigned int),
5669 void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5670 {
5671 TCGv src;
5672
5673 if (gen_trap_ifnofpu(dc)) {
5674 return true;
5675 }
5676 src = gen_load_gpr(dc, a->rs);
5677 store(src, tcg_env, offset(a->rd));
5678 return advance_pc(dc);
5679 }
5680
TRANS(MOVwTOs,VIS3B,do_movr2f,a,gen_offset_fpr_F,tcg_gen_st32_tl)5681 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5682 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5683
5684 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5685 {
5686 DisasContext *dc = container_of(dcbase, DisasContext, base);
5687 int bound;
5688
5689 dc->pc = dc->base.pc_first;
5690 dc->npc = (target_ulong)dc->base.tb->cs_base;
5691 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5692 dc->def = &cpu_env(cs)->def;
5693 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5694 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5695 #ifndef CONFIG_USER_ONLY
5696 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5697 # ifdef TARGET_SPARC64
5698 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5699 # else
5700 dc->fsr_qne = (dc->base.tb->flags & TB_FLAG_FSR_QNE) != 0;
5701 # endif
5702 #endif
5703 #ifdef TARGET_SPARC64
5704 dc->fprs_dirty = 0;
5705 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5706 #endif
5707 /*
5708 * if we reach a page boundary, we stop generation so that the
5709 * PC of a TT_TFAULT exception is always in the right page
5710 */
5711 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5712 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5713 }
5714
sparc_tr_tb_start(DisasContextBase * db,CPUState * cs)5715 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5716 {
5717 }
5718
sparc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)5719 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5720 {
5721 DisasContext *dc = container_of(dcbase, DisasContext, base);
5722 target_ulong npc = dc->npc;
5723
5724 if (npc & 3) {
5725 switch (npc) {
5726 case JUMP_PC:
5727 assert(dc->jump_pc[1] == dc->pc + 4);
5728 npc = dc->jump_pc[0] | JUMP_PC;
5729 break;
5730 case DYNAMIC_PC:
5731 case DYNAMIC_PC_LOOKUP:
5732 npc = DYNAMIC_PC;
5733 break;
5734 default:
5735 g_assert_not_reached();
5736 }
5737 }
5738 tcg_gen_insn_start(dc->pc, npc);
5739 }
5740
sparc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)5741 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5742 {
5743 DisasContext *dc = container_of(dcbase, DisasContext, base);
5744 unsigned int insn;
5745
5746 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5747 dc->base.pc_next += 4;
5748
5749 if (!decode(dc, insn)) {
5750 gen_exception(dc, TT_ILL_INSN);
5751 }
5752
5753 if (dc->base.is_jmp == DISAS_NORETURN) {
5754 return;
5755 }
5756 if (dc->pc != dc->base.pc_next) {
5757 dc->base.is_jmp = DISAS_TOO_MANY;
5758 }
5759 }
5760
sparc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)5761 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5762 {
5763 DisasContext *dc = container_of(dcbase, DisasContext, base);
5764 DisasDelayException *e, *e_next;
5765 bool may_lookup;
5766
5767 finishing_insn(dc);
5768
5769 switch (dc->base.is_jmp) {
5770 case DISAS_NEXT:
5771 case DISAS_TOO_MANY:
5772 if (((dc->pc | dc->npc) & 3) == 0) {
5773 /* static PC and NPC: we can use direct chaining */
5774 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5775 break;
5776 }
5777
5778 may_lookup = true;
5779 if (dc->pc & 3) {
5780 switch (dc->pc) {
5781 case DYNAMIC_PC_LOOKUP:
5782 break;
5783 case DYNAMIC_PC:
5784 may_lookup = false;
5785 break;
5786 default:
5787 g_assert_not_reached();
5788 }
5789 } else {
5790 tcg_gen_movi_tl(cpu_pc, dc->pc);
5791 }
5792
5793 if (dc->npc & 3) {
5794 switch (dc->npc) {
5795 case JUMP_PC:
5796 gen_generic_branch(dc);
5797 break;
5798 case DYNAMIC_PC:
5799 may_lookup = false;
5800 break;
5801 case DYNAMIC_PC_LOOKUP:
5802 break;
5803 default:
5804 g_assert_not_reached();
5805 }
5806 } else {
5807 tcg_gen_movi_tl(cpu_npc, dc->npc);
5808 }
5809 if (may_lookup) {
5810 tcg_gen_lookup_and_goto_ptr();
5811 } else {
5812 tcg_gen_exit_tb(NULL, 0);
5813 }
5814 break;
5815
5816 case DISAS_NORETURN:
5817 break;
5818
5819 case DISAS_EXIT:
5820 /* Exit TB */
5821 save_state(dc);
5822 tcg_gen_exit_tb(NULL, 0);
5823 break;
5824
5825 default:
5826 g_assert_not_reached();
5827 }
5828
5829 for (e = dc->delay_excp_list; e ; e = e_next) {
5830 gen_set_label(e->lab);
5831
5832 tcg_gen_movi_tl(cpu_pc, e->pc);
5833 if (e->npc % 4 == 0) {
5834 tcg_gen_movi_tl(cpu_npc, e->npc);
5835 }
5836 gen_helper_raise_exception(tcg_env, e->excp);
5837
5838 e_next = e->next;
5839 g_free(e);
5840 }
5841 }
5842
5843 static const TranslatorOps sparc_tr_ops = {
5844 .init_disas_context = sparc_tr_init_disas_context,
5845 .tb_start = sparc_tr_tb_start,
5846 .insn_start = sparc_tr_insn_start,
5847 .translate_insn = sparc_tr_translate_insn,
5848 .tb_stop = sparc_tr_tb_stop,
5849 };
5850
sparc_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)5851 void sparc_translate_code(CPUState *cs, TranslationBlock *tb,
5852 int *max_insns, vaddr pc, void *host_pc)
5853 {
5854 DisasContext dc = {};
5855
5856 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5857 }
5858
sparc_tcg_init(void)5859 void sparc_tcg_init(void)
5860 {
5861 static const char gregnames[32][4] = {
5862 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5863 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5864 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5865 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5866 };
5867
5868 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5869 #ifdef TARGET_SPARC64
5870 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5871 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5872 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5873 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5874 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5875 #else
5876 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5877 #endif
5878 };
5879
5880 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5881 #ifdef TARGET_SPARC64
5882 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5883 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5884 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5885 #endif
5886 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5887 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5888 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5889 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5890 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5891 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5892 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5893 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5894 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5895 };
5896
5897 unsigned int i;
5898
5899 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5900 offsetof(CPUSPARCState, regwptr),
5901 "regwptr");
5902
5903 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5904 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5905 }
5906
5907 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5908 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5909 }
5910
5911 cpu_regs[0] = NULL;
5912 for (i = 1; i < 8; ++i) {
5913 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5914 offsetof(CPUSPARCState, gregs[i]),
5915 gregnames[i]);
5916 }
5917
5918 for (i = 8; i < 32; ++i) {
5919 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5920 (i - 8) * sizeof(target_ulong),
5921 gregnames[i]);
5922 }
5923 }
5924