1 /*
2 * Alpha emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 #define HELPER_H "helper.h"
32 #include "exec/helper-info.c.inc"
33 #undef HELPER_H
34
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 # define LOG_DISAS(...) do { } while (0)
42 #endif
43
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46 DisasContextBase base;
47
48 #ifdef CONFIG_USER_ONLY
49 MemOp unalign;
50 #else
51 uint64_t palbr;
52 #endif
53 uint32_t tbflags;
54 int mem_idx;
55
56 /* True if generating pc-relative code. */
57 bool pcrel;
58
59 /* implver and amask values for this CPU. */
60 int implver;
61 int amask;
62
63 /* Current rounding mode for this TB. */
64 int tb_rm;
65 /* Current flush-to-zero setting for this TB. */
66 int tb_ftz;
67
68 /* The set of registers active in the current context. */
69 TCGv *ir;
70
71 /* Temporaries for $31 and $f31 as source and destination. */
72 TCGv zero;
73 TCGv sink;
74 };
75
76 #ifdef CONFIG_USER_ONLY
77 #define UNALIGN(C) (C)->unalign
78 #else
79 #define UNALIGN(C) MO_ALIGN
80 #endif
81
82 /* Target-specific return values from translate_one, indicating the
83 state of the TB. Note that DISAS_NEXT indicates that we are not
84 exiting the TB. */
85 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
86 #define DISAS_PC_UPDATED DISAS_TARGET_1
87 #define DISAS_PC_STALE DISAS_TARGET_2
88
89 /* global register indexes */
90 static TCGv cpu_std_ir[31];
91 static TCGv cpu_fir[31];
92 static TCGv cpu_pc;
93 static TCGv cpu_lock_addr;
94 static TCGv cpu_lock_value;
95
96 #ifndef CONFIG_USER_ONLY
97 static TCGv cpu_pal_ir[31];
98 #endif
99
alpha_translate_init(void)100 void alpha_translate_init(void)
101 {
102 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
103
104 typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
105 static const GlobalVar vars[] = {
106 DEF_VAR(pc),
107 DEF_VAR(lock_addr),
108 DEF_VAR(lock_value),
109 };
110
111 #undef DEF_VAR
112
113 /* Use the symbolic register names that match the disassembler. */
114 static const char greg_names[31][4] = {
115 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
116 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
117 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
118 "t10", "t11", "ra", "t12", "at", "gp", "sp"
119 };
120 static const char freg_names[31][4] = {
121 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
122 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
123 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
124 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
125 };
126 #ifndef CONFIG_USER_ONLY
127 static const char shadow_names[8][8] = {
128 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
129 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
130 };
131 #endif
132
133 int i;
134
135 for (i = 0; i < 31; i++) {
136 cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env,
137 offsetof(CPUAlphaState, ir[i]),
138 greg_names[i]);
139 }
140
141 for (i = 0; i < 31; i++) {
142 cpu_fir[i] = tcg_global_mem_new_i64(tcg_env,
143 offsetof(CPUAlphaState, fir[i]),
144 freg_names[i]);
145 }
146
147 #ifndef CONFIG_USER_ONLY
148 memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
149 for (i = 0; i < 8; i++) {
150 int r = (i == 7 ? 25 : i + 8);
151 cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env,
152 offsetof(CPUAlphaState,
153 shadow[i]),
154 shadow_names[i]);
155 }
156 #endif
157
158 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
159 const GlobalVar *v = &vars[i];
160 *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name);
161 }
162 }
163
load_zero(DisasContext * ctx)164 static TCGv load_zero(DisasContext *ctx)
165 {
166 if (!ctx->zero) {
167 ctx->zero = tcg_constant_i64(0);
168 }
169 return ctx->zero;
170 }
171
dest_sink(DisasContext * ctx)172 static TCGv dest_sink(DisasContext *ctx)
173 {
174 if (!ctx->sink) {
175 ctx->sink = tcg_temp_new();
176 }
177 return ctx->sink;
178 }
179
free_context_temps(DisasContext * ctx)180 static void free_context_temps(DisasContext *ctx)
181 {
182 if (ctx->sink) {
183 tcg_gen_discard_i64(ctx->sink);
184 ctx->sink = NULL;
185 }
186 }
187
load_gpr(DisasContext * ctx,unsigned reg)188 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
189 {
190 if (likely(reg < 31)) {
191 return ctx->ir[reg];
192 } else {
193 return load_zero(ctx);
194 }
195 }
196
load_gpr_lit(DisasContext * ctx,unsigned reg,uint8_t lit,bool islit)197 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
198 uint8_t lit, bool islit)
199 {
200 if (islit) {
201 return tcg_constant_i64(lit);
202 } else if (likely(reg < 31)) {
203 return ctx->ir[reg];
204 } else {
205 return load_zero(ctx);
206 }
207 }
208
dest_gpr(DisasContext * ctx,unsigned reg)209 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
210 {
211 if (likely(reg < 31)) {
212 return ctx->ir[reg];
213 } else {
214 return dest_sink(ctx);
215 }
216 }
217
load_fpr(DisasContext * ctx,unsigned reg)218 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
219 {
220 if (likely(reg < 31)) {
221 return cpu_fir[reg];
222 } else {
223 return load_zero(ctx);
224 }
225 }
226
dest_fpr(DisasContext * ctx,unsigned reg)227 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
228 {
229 if (likely(reg < 31)) {
230 return cpu_fir[reg];
231 } else {
232 return dest_sink(ctx);
233 }
234 }
235
get_flag_ofs(unsigned shift)236 static int get_flag_ofs(unsigned shift)
237 {
238 int ofs = offsetof(CPUAlphaState, flags);
239 #if HOST_BIG_ENDIAN
240 ofs += 3 - (shift / 8);
241 #else
242 ofs += shift / 8;
243 #endif
244 return ofs;
245 }
246
ld_flag_byte(TCGv val,unsigned shift)247 static void ld_flag_byte(TCGv val, unsigned shift)
248 {
249 tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift));
250 }
251
st_flag_byte(TCGv val,unsigned shift)252 static void st_flag_byte(TCGv val, unsigned shift)
253 {
254 tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift));
255 }
256
gen_pc_disp(DisasContext * ctx,TCGv dest,int32_t disp)257 static void gen_pc_disp(DisasContext *ctx, TCGv dest, int32_t disp)
258 {
259 uint64_t addr = ctx->base.pc_next + disp;
260 if (ctx->pcrel) {
261 tcg_gen_addi_i64(dest, cpu_pc, addr - ctx->base.pc_first);
262 } else {
263 tcg_gen_movi_i64(dest, addr);
264 }
265 }
266
gen_excp_1(int exception,int error_code)267 static void gen_excp_1(int exception, int error_code)
268 {
269 TCGv_i32 tmp1, tmp2;
270
271 tmp1 = tcg_constant_i32(exception);
272 tmp2 = tcg_constant_i32(error_code);
273 gen_helper_excp(tcg_env, tmp1, tmp2);
274 }
275
gen_excp(DisasContext * ctx,int exception,int error_code)276 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
277 {
278 gen_pc_disp(ctx, cpu_pc, 0);
279 gen_excp_1(exception, error_code);
280 return DISAS_NORETURN;
281 }
282
gen_invalid(DisasContext * ctx)283 static inline DisasJumpType gen_invalid(DisasContext *ctx)
284 {
285 return gen_excp(ctx, EXCP_OPCDEC, 0);
286 }
287
gen_ldf(DisasContext * ctx,TCGv dest,TCGv addr)288 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
289 {
290 TCGv_i32 tmp32 = tcg_temp_new_i32();
291 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
292 gen_helper_memory_to_f(dest, tmp32);
293 }
294
gen_ldg(DisasContext * ctx,TCGv dest,TCGv addr)295 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
296 {
297 TCGv tmp = tcg_temp_new();
298 tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
299 gen_helper_memory_to_g(dest, tmp);
300 }
301
gen_lds(DisasContext * ctx,TCGv dest,TCGv addr)302 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
303 {
304 TCGv_i32 tmp32 = tcg_temp_new_i32();
305 tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
306 gen_helper_memory_to_s(dest, tmp32);
307 }
308
gen_ldt(DisasContext * ctx,TCGv dest,TCGv addr)309 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
310 {
311 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
312 }
313
gen_load_fp(DisasContext * ctx,int ra,int rb,int32_t disp16,void (* func)(DisasContext *,TCGv,TCGv))314 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
315 void (*func)(DisasContext *, TCGv, TCGv))
316 {
317 /* Loads to $f31 are prefetches, which we can treat as nops. */
318 if (likely(ra != 31)) {
319 TCGv addr = tcg_temp_new();
320 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
321 func(ctx, cpu_fir[ra], addr);
322 }
323 }
324
gen_load_int(DisasContext * ctx,int ra,int rb,int32_t disp16,MemOp op,bool clear,bool locked)325 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
326 MemOp op, bool clear, bool locked)
327 {
328 TCGv addr, dest;
329
330 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
331 prefetches, which we can treat as nops. No worries about
332 missed exceptions here. */
333 if (unlikely(ra == 31)) {
334 return;
335 }
336
337 addr = tcg_temp_new();
338 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
339 if (clear) {
340 tcg_gen_andi_i64(addr, addr, ~0x7);
341 } else if (!locked) {
342 op |= UNALIGN(ctx);
343 }
344
345 dest = ctx->ir[ra];
346 tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
347
348 if (locked) {
349 tcg_gen_mov_i64(cpu_lock_addr, addr);
350 tcg_gen_mov_i64(cpu_lock_value, dest);
351 }
352 }
353
gen_stf(DisasContext * ctx,TCGv src,TCGv addr)354 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
355 {
356 TCGv_i32 tmp32 = tcg_temp_new_i32();
357 gen_helper_f_to_memory(tmp32, addr);
358 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
359 }
360
gen_stg(DisasContext * ctx,TCGv src,TCGv addr)361 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
362 {
363 TCGv tmp = tcg_temp_new();
364 gen_helper_g_to_memory(tmp, src);
365 tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
366 }
367
gen_sts(DisasContext * ctx,TCGv src,TCGv addr)368 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
369 {
370 TCGv_i32 tmp32 = tcg_temp_new_i32();
371 gen_helper_s_to_memory(tmp32, src);
372 tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
373 }
374
gen_stt(DisasContext * ctx,TCGv src,TCGv addr)375 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
376 {
377 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
378 }
379
gen_store_fp(DisasContext * ctx,int ra,int rb,int32_t disp16,void (* func)(DisasContext *,TCGv,TCGv))380 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
381 void (*func)(DisasContext *, TCGv, TCGv))
382 {
383 TCGv addr = tcg_temp_new();
384 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
385 func(ctx, load_fpr(ctx, ra), addr);
386 }
387
gen_store_int(DisasContext * ctx,int ra,int rb,int32_t disp16,MemOp op,bool clear)388 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
389 MemOp op, bool clear)
390 {
391 TCGv addr, src;
392
393 addr = tcg_temp_new();
394 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
395 if (clear) {
396 tcg_gen_andi_i64(addr, addr, ~0x7);
397 } else {
398 op |= UNALIGN(ctx);
399 }
400
401 src = load_gpr(ctx, ra);
402 tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
403 }
404
gen_store_conditional(DisasContext * ctx,int ra,int rb,int32_t disp16,int mem_idx,MemOp op)405 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
406 int32_t disp16, int mem_idx,
407 MemOp op)
408 {
409 TCGLabel *lab_fail, *lab_done;
410 TCGv addr, val;
411
412 addr = tcg_temp_new_i64();
413 tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
414 free_context_temps(ctx);
415
416 lab_fail = gen_new_label();
417 lab_done = gen_new_label();
418 tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
419
420 val = tcg_temp_new_i64();
421 tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
422 load_gpr(ctx, ra), mem_idx, op);
423 free_context_temps(ctx);
424
425 if (ra != 31) {
426 tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
427 }
428 tcg_gen_br(lab_done);
429
430 gen_set_label(lab_fail);
431 if (ra != 31) {
432 tcg_gen_movi_i64(ctx->ir[ra], 0);
433 }
434
435 gen_set_label(lab_done);
436 tcg_gen_movi_i64(cpu_lock_addr, -1);
437 return DISAS_NEXT;
438 }
439
gen_goto_tb(DisasContext * ctx,int idx,int32_t disp)440 static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp)
441 {
442 if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) {
443 /* With PCREL, PC must always be up-to-date. */
444 if (ctx->pcrel) {
445 gen_pc_disp(ctx, cpu_pc, disp);
446 tcg_gen_goto_tb(idx);
447 } else {
448 tcg_gen_goto_tb(idx);
449 gen_pc_disp(ctx, cpu_pc, disp);
450 }
451 tcg_gen_exit_tb(ctx->base.tb, idx);
452 } else {
453 gen_pc_disp(ctx, cpu_pc, disp);
454 tcg_gen_lookup_and_goto_ptr();
455 }
456 }
457
gen_bdirect(DisasContext * ctx,int ra,int32_t disp)458 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
459 {
460 if (ra != 31) {
461 gen_pc_disp(ctx, ctx->ir[ra], 0);
462 }
463
464 /* Notice branch-to-next; used to initialize RA with the PC. */
465 if (disp == 0) {
466 return DISAS_NEXT;
467 }
468 gen_goto_tb(ctx, 0, disp);
469 return DISAS_NORETURN;
470 }
471
gen_bcond_internal(DisasContext * ctx,TCGCond cond,TCGv cmp,uint64_t imm,int32_t disp)472 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
473 TCGv cmp, uint64_t imm, int32_t disp)
474 {
475 TCGLabel *lab_true = gen_new_label();
476
477 tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);
478 gen_goto_tb(ctx, 0, 0);
479 gen_set_label(lab_true);
480 gen_goto_tb(ctx, 1, disp);
481
482 return DISAS_NORETURN;
483 }
484
gen_bcond(DisasContext * ctx,TCGCond cond,int ra,int32_t disp)485 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
486 int32_t disp)
487 {
488 return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
489 is_tst_cond(cond), disp);
490 }
491
492 /* Fold -0.0 for comparison with COND. */
493
gen_fold_mzero(TCGCond * pcond,uint64_t * pimm,TCGv_i64 src)494 static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
495 {
496 TCGv_i64 tmp;
497
498 *pimm = 0;
499 switch (*pcond) {
500 case TCG_COND_LE:
501 case TCG_COND_GT:
502 /* For <= or >, the -0.0 value directly compares the way we want. */
503 return src;
504
505 case TCG_COND_EQ:
506 case TCG_COND_NE:
507 /* For == or !=, we can compare without the sign bit. */
508 *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
509 *pimm = INT64_MAX;
510 return src;
511
512 case TCG_COND_GE:
513 case TCG_COND_LT:
514 /* For >= or <, map -0.0 to +0.0. */
515 tmp = tcg_temp_new_i64();
516 tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
517 src, tcg_constant_i64(INT64_MIN),
518 tcg_constant_i64(0), src);
519 return tmp;
520
521 default:
522 g_assert_not_reached();
523 }
524 }
525
gen_fbcond(DisasContext * ctx,TCGCond cond,int ra,int32_t disp)526 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
527 int32_t disp)
528 {
529 uint64_t imm;
530 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
531 return gen_bcond_internal(ctx, cond, tmp, imm, disp);
532 }
533
gen_fcmov(DisasContext * ctx,TCGCond cond,int ra,int rb,int rc)534 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
535 {
536 uint64_t imm;
537 TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
538 tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
539 tmp, tcg_constant_i64(imm),
540 load_fpr(ctx, rb), load_fpr(ctx, rc));
541 }
542
543 #define QUAL_RM_N 0x080 /* Round mode nearest even */
544 #define QUAL_RM_C 0x000 /* Round mode chopped */
545 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
546 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
547 #define QUAL_RM_MASK 0x0c0
548
549 #define QUAL_U 0x100 /* Underflow enable (fp output) */
550 #define QUAL_V 0x100 /* Overflow enable (int output) */
551 #define QUAL_S 0x400 /* Software completion enable */
552 #define QUAL_I 0x200 /* Inexact detection enable */
553
gen_qual_roundmode(DisasContext * ctx,int fn11)554 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
555 {
556 TCGv_i32 tmp;
557
558 fn11 &= QUAL_RM_MASK;
559 if (fn11 == ctx->tb_rm) {
560 return;
561 }
562 ctx->tb_rm = fn11;
563
564 tmp = tcg_temp_new_i32();
565 switch (fn11) {
566 case QUAL_RM_N:
567 tcg_gen_movi_i32(tmp, float_round_nearest_even);
568 break;
569 case QUAL_RM_C:
570 tcg_gen_movi_i32(tmp, float_round_to_zero);
571 break;
572 case QUAL_RM_M:
573 tcg_gen_movi_i32(tmp, float_round_down);
574 break;
575 case QUAL_RM_D:
576 tcg_gen_ld8u_i32(tmp, tcg_env,
577 offsetof(CPUAlphaState, fpcr_dyn_round));
578 break;
579 }
580
581 #if defined(CONFIG_SOFTFLOAT_INLINE)
582 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
583 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
584 sets the one field. */
585 tcg_gen_st8_i32(tmp, tcg_env,
586 offsetof(CPUAlphaState, fp_status.float_rounding_mode));
587 #else
588 gen_helper_setroundmode(tmp);
589 #endif
590 }
591
gen_qual_flushzero(DisasContext * ctx,int fn11)592 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
593 {
594 TCGv_i32 tmp;
595
596 fn11 &= QUAL_U;
597 if (fn11 == ctx->tb_ftz) {
598 return;
599 }
600 ctx->tb_ftz = fn11;
601
602 tmp = tcg_temp_new_i32();
603 if (fn11) {
604 /* Underflow is enabled, use the FPCR setting. */
605 tcg_gen_ld8u_i32(tmp, tcg_env,
606 offsetof(CPUAlphaState, fpcr_flush_to_zero));
607 } else {
608 /* Underflow is disabled, force flush-to-zero. */
609 tcg_gen_movi_i32(tmp, 1);
610 }
611
612 #if defined(CONFIG_SOFTFLOAT_INLINE)
613 tcg_gen_st8_i32(tmp, tcg_env,
614 offsetof(CPUAlphaState, fp_status.flush_to_zero));
615 #else
616 gen_helper_setflushzero(tmp);
617 #endif
618 }
619
gen_ieee_input(DisasContext * ctx,int reg,int fn11,int is_cmp)620 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
621 {
622 TCGv val;
623
624 if (unlikely(reg == 31)) {
625 val = load_zero(ctx);
626 } else {
627 val = cpu_fir[reg];
628 if ((fn11 & QUAL_S) == 0) {
629 if (is_cmp) {
630 gen_helper_ieee_input_cmp(tcg_env, val);
631 } else {
632 gen_helper_ieee_input(tcg_env, val);
633 }
634 } else {
635 #ifndef CONFIG_USER_ONLY
636 /* In system mode, raise exceptions for denormals like real
637 hardware. In user mode, proceed as if the OS completion
638 handler is handling the denormal as per spec. */
639 gen_helper_ieee_input_s(tcg_env, val);
640 #endif
641 }
642 }
643 return val;
644 }
645
gen_fp_exc_raise(int rc,int fn11)646 static void gen_fp_exc_raise(int rc, int fn11)
647 {
648 /* ??? We ought to be able to do something with imprecise exceptions.
649 E.g. notice we're still in the trap shadow of something within the
650 TB and do not generate the code to signal the exception; end the TB
651 when an exception is forced to arrive, either by consumption of a
652 register value or TRAPB or EXCB. */
653 TCGv_i32 reg, ign;
654 uint32_t ignore = 0;
655
656 if (!(fn11 & QUAL_U)) {
657 /* Note that QUAL_U == QUAL_V, so ignore either. */
658 ignore |= FPCR_UNF | FPCR_IOV;
659 }
660 if (!(fn11 & QUAL_I)) {
661 ignore |= FPCR_INE;
662 }
663 ign = tcg_constant_i32(ignore);
664
665 /* ??? Pass in the regno of the destination so that the helper can
666 set EXC_MASK, which contains a bitmask of destination registers
667 that have caused arithmetic traps. A simple userspace emulation
668 does not require this. We do need it for a guest kernel's entArith,
669 or if we were to do something clever with imprecise exceptions. */
670 reg = tcg_constant_i32(rc + 32);
671 if (fn11 & QUAL_S) {
672 gen_helper_fp_exc_raise_s(tcg_env, ign, reg);
673 } else {
674 gen_helper_fp_exc_raise(tcg_env, ign, reg);
675 }
676 }
677
gen_cvtlq(TCGv vc,TCGv vb)678 static void gen_cvtlq(TCGv vc, TCGv vb)
679 {
680 TCGv tmp = tcg_temp_new();
681
682 /* The arithmetic right shift here, plus the sign-extended mask below
683 yields a sign-extended result without an explicit ext32s_i64. */
684 tcg_gen_shri_i64(tmp, vb, 29);
685 tcg_gen_sari_i64(vc, vb, 32);
686 tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
687 }
688
gen_ieee_arith2(DisasContext * ctx,void (* helper)(TCGv,TCGv_ptr,TCGv),int rb,int rc,int fn11)689 static void gen_ieee_arith2(DisasContext *ctx,
690 void (*helper)(TCGv, TCGv_ptr, TCGv),
691 int rb, int rc, int fn11)
692 {
693 TCGv vb;
694
695 gen_qual_roundmode(ctx, fn11);
696 gen_qual_flushzero(ctx, fn11);
697
698 vb = gen_ieee_input(ctx, rb, fn11, 0);
699 helper(dest_fpr(ctx, rc), tcg_env, vb);
700
701 gen_fp_exc_raise(rc, fn11);
702 }
703
704 #define IEEE_ARITH2(name) \
705 static inline void glue(gen_, name)(DisasContext *ctx, \
706 int rb, int rc, int fn11) \
707 { \
708 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
709 }
710 IEEE_ARITH2(sqrts)
IEEE_ARITH2(sqrtt)711 IEEE_ARITH2(sqrtt)
712 IEEE_ARITH2(cvtst)
713 IEEE_ARITH2(cvtts)
714
715 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
716 {
717 TCGv vb, vc;
718
719 /* No need to set flushzero, since we have an integer output. */
720 vb = gen_ieee_input(ctx, rb, fn11, 0);
721 vc = dest_fpr(ctx, rc);
722
723 /* Almost all integer conversions use cropped rounding;
724 special case that. */
725 if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
726 gen_helper_cvttq_c(vc, tcg_env, vb);
727 } else {
728 gen_qual_roundmode(ctx, fn11);
729 gen_helper_cvttq(vc, tcg_env, vb);
730 }
731 gen_fp_exc_raise(rc, fn11);
732 }
733
gen_ieee_intcvt(DisasContext * ctx,void (* helper)(TCGv,TCGv_ptr,TCGv),int rb,int rc,int fn11)734 static void gen_ieee_intcvt(DisasContext *ctx,
735 void (*helper)(TCGv, TCGv_ptr, TCGv),
736 int rb, int rc, int fn11)
737 {
738 TCGv vb, vc;
739
740 gen_qual_roundmode(ctx, fn11);
741 vb = load_fpr(ctx, rb);
742 vc = dest_fpr(ctx, rc);
743
744 /* The only exception that can be raised by integer conversion
745 is inexact. Thus we only need to worry about exceptions when
746 inexact handling is requested. */
747 if (fn11 & QUAL_I) {
748 helper(vc, tcg_env, vb);
749 gen_fp_exc_raise(rc, fn11);
750 } else {
751 helper(vc, tcg_env, vb);
752 }
753 }
754
755 #define IEEE_INTCVT(name) \
756 static inline void glue(gen_, name)(DisasContext *ctx, \
757 int rb, int rc, int fn11) \
758 { \
759 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
760 }
761 IEEE_INTCVT(cvtqs)
IEEE_INTCVT(cvtqt)762 IEEE_INTCVT(cvtqt)
763
764 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
765 {
766 TCGv vmask = tcg_constant_i64(mask);
767 TCGv tmp = tcg_temp_new_i64();
768
769 if (inv_a) {
770 tcg_gen_andc_i64(tmp, vmask, va);
771 } else {
772 tcg_gen_and_i64(tmp, va, vmask);
773 }
774
775 tcg_gen_andc_i64(vc, vb, vmask);
776 tcg_gen_or_i64(vc, vc, tmp);
777 }
778
gen_ieee_arith3(DisasContext * ctx,void (* helper)(TCGv,TCGv_ptr,TCGv,TCGv),int ra,int rb,int rc,int fn11)779 static void gen_ieee_arith3(DisasContext *ctx,
780 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
781 int ra, int rb, int rc, int fn11)
782 {
783 TCGv va, vb, vc;
784
785 gen_qual_roundmode(ctx, fn11);
786 gen_qual_flushzero(ctx, fn11);
787
788 va = gen_ieee_input(ctx, ra, fn11, 0);
789 vb = gen_ieee_input(ctx, rb, fn11, 0);
790 vc = dest_fpr(ctx, rc);
791 helper(vc, tcg_env, va, vb);
792
793 gen_fp_exc_raise(rc, fn11);
794 }
795
796 #define IEEE_ARITH3(name) \
797 static inline void glue(gen_, name)(DisasContext *ctx, \
798 int ra, int rb, int rc, int fn11) \
799 { \
800 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
801 }
802 IEEE_ARITH3(adds)
IEEE_ARITH3(subs)803 IEEE_ARITH3(subs)
804 IEEE_ARITH3(muls)
805 IEEE_ARITH3(divs)
806 IEEE_ARITH3(addt)
807 IEEE_ARITH3(subt)
808 IEEE_ARITH3(mult)
809 IEEE_ARITH3(divt)
810
811 static void gen_ieee_compare(DisasContext *ctx,
812 void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
813 int ra, int rb, int rc, int fn11)
814 {
815 TCGv va, vb, vc;
816
817 va = gen_ieee_input(ctx, ra, fn11, 1);
818 vb = gen_ieee_input(ctx, rb, fn11, 1);
819 vc = dest_fpr(ctx, rc);
820 helper(vc, tcg_env, va, vb);
821
822 gen_fp_exc_raise(rc, fn11);
823 }
824
825 #define IEEE_CMP3(name) \
826 static inline void glue(gen_, name)(DisasContext *ctx, \
827 int ra, int rb, int rc, int fn11) \
828 { \
829 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
830 }
831 IEEE_CMP3(cmptun)
IEEE_CMP3(cmpteq)832 IEEE_CMP3(cmpteq)
833 IEEE_CMP3(cmptlt)
834 IEEE_CMP3(cmptle)
835
836 static inline uint64_t zapnot_mask(uint8_t lit)
837 {
838 uint64_t mask = 0;
839 int i;
840
841 for (i = 0; i < 8; ++i) {
842 if ((lit >> i) & 1) {
843 mask |= 0xffull << (i * 8);
844 }
845 }
846 return mask;
847 }
848
849 /* Implement zapnot with an immediate operand, which expands to some
850 form of immediate AND. This is a basic building block in the
851 definition of many of the other byte manipulation instructions. */
gen_zapnoti(TCGv dest,TCGv src,uint8_t lit)852 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
853 {
854 switch (lit) {
855 case 0x00:
856 tcg_gen_movi_i64(dest, 0);
857 break;
858 case 0x01:
859 tcg_gen_ext8u_i64(dest, src);
860 break;
861 case 0x03:
862 tcg_gen_ext16u_i64(dest, src);
863 break;
864 case 0x0f:
865 tcg_gen_ext32u_i64(dest, src);
866 break;
867 case 0xff:
868 tcg_gen_mov_i64(dest, src);
869 break;
870 default:
871 tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
872 break;
873 }
874 }
875
876 /* EXTWH, EXTLH, EXTQH */
gen_ext_h(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)877 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
878 uint8_t lit, uint8_t byte_mask)
879 {
880 if (islit) {
881 int pos = (64 - lit * 8) & 0x3f;
882 int len = cto32(byte_mask) * 8;
883 if (pos < len) {
884 tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
885 } else {
886 tcg_gen_movi_i64(vc, 0);
887 }
888 } else {
889 TCGv tmp = tcg_temp_new();
890 tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
891 tcg_gen_neg_i64(tmp, tmp);
892 tcg_gen_andi_i64(tmp, tmp, 0x3f);
893 tcg_gen_shl_i64(vc, va, tmp);
894 }
895 gen_zapnoti(vc, vc, byte_mask);
896 }
897
898 /* EXTBL, EXTWL, EXTLL, EXTQL */
gen_ext_l(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)899 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
900 uint8_t lit, uint8_t byte_mask)
901 {
902 if (islit) {
903 int pos = (lit & 7) * 8;
904 int len = cto32(byte_mask) * 8;
905 if (pos + len >= 64) {
906 len = 64 - pos;
907 }
908 tcg_gen_extract_i64(vc, va, pos, len);
909 } else {
910 TCGv tmp = tcg_temp_new();
911 tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
912 tcg_gen_shli_i64(tmp, tmp, 3);
913 tcg_gen_shr_i64(vc, va, tmp);
914 gen_zapnoti(vc, vc, byte_mask);
915 }
916 }
917
918 /* INSWH, INSLH, INSQH */
gen_ins_h(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)919 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
920 uint8_t lit, uint8_t byte_mask)
921 {
922 if (islit) {
923 int pos = 64 - (lit & 7) * 8;
924 int len = cto32(byte_mask) * 8;
925 if (pos < len) {
926 tcg_gen_extract_i64(vc, va, pos, len - pos);
927 } else {
928 tcg_gen_movi_i64(vc, 0);
929 }
930 } else {
931 TCGv tmp = tcg_temp_new();
932 TCGv shift = tcg_temp_new();
933
934 /* The instruction description has us left-shift the byte mask
935 and extract bits <15:8> and apply that zap at the end. This
936 is equivalent to simply performing the zap first and shifting
937 afterward. */
938 gen_zapnoti(tmp, va, byte_mask);
939
940 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
941 portably by splitting the shift into two parts: shift_count-1 and 1.
942 Arrange for the -1 by using ones-complement instead of
943 twos-complement in the negation: ~(B * 8) & 63. */
944
945 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
946 tcg_gen_not_i64(shift, shift);
947 tcg_gen_andi_i64(shift, shift, 0x3f);
948
949 tcg_gen_shr_i64(vc, tmp, shift);
950 tcg_gen_shri_i64(vc, vc, 1);
951 }
952 }
953
954 /* INSBL, INSWL, INSLL, INSQL */
gen_ins_l(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)955 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
956 uint8_t lit, uint8_t byte_mask)
957 {
958 if (islit) {
959 int pos = (lit & 7) * 8;
960 int len = cto32(byte_mask) * 8;
961 if (pos + len > 64) {
962 len = 64 - pos;
963 }
964 tcg_gen_deposit_z_i64(vc, va, pos, len);
965 } else {
966 TCGv tmp = tcg_temp_new();
967 TCGv shift = tcg_temp_new();
968
969 /* The instruction description has us left-shift the byte mask
970 and extract bits <15:8> and apply that zap at the end. This
971 is equivalent to simply performing the zap first and shifting
972 afterward. */
973 gen_zapnoti(tmp, va, byte_mask);
974
975 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
976 tcg_gen_shli_i64(shift, shift, 3);
977 tcg_gen_shl_i64(vc, tmp, shift);
978 }
979 }
980
981 /* MSKWH, MSKLH, MSKQH */
gen_msk_h(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)982 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
983 uint8_t lit, uint8_t byte_mask)
984 {
985 if (islit) {
986 gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
987 } else {
988 TCGv shift = tcg_temp_new();
989 TCGv mask = tcg_temp_new();
990
991 /* The instruction description is as above, where the byte_mask
992 is shifted left, and then we extract bits <15:8>. This can be
993 emulated with a right-shift on the expanded byte mask. This
994 requires extra care because for an input <2:0> == 0 we need a
995 shift of 64 bits in order to generate a zero. This is done by
996 splitting the shift into two parts, the variable shift - 1
997 followed by a constant 1 shift. The code we expand below is
998 equivalent to ~(B * 8) & 63. */
999
1000 tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1001 tcg_gen_not_i64(shift, shift);
1002 tcg_gen_andi_i64(shift, shift, 0x3f);
1003 tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1004 tcg_gen_shr_i64(mask, mask, shift);
1005 tcg_gen_shri_i64(mask, mask, 1);
1006
1007 tcg_gen_andc_i64(vc, va, mask);
1008 }
1009 }
1010
1011 /* MSKBL, MSKWL, MSKLL, MSKQL */
gen_msk_l(DisasContext * ctx,TCGv vc,TCGv va,int rb,bool islit,uint8_t lit,uint8_t byte_mask)1012 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1013 uint8_t lit, uint8_t byte_mask)
1014 {
1015 if (islit) {
1016 gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1017 } else {
1018 TCGv shift = tcg_temp_new();
1019 TCGv mask = tcg_temp_new();
1020
1021 tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1022 tcg_gen_shli_i64(shift, shift, 3);
1023 tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1024 tcg_gen_shl_i64(mask, mask, shift);
1025
1026 tcg_gen_andc_i64(vc, va, mask);
1027 }
1028 }
1029
gen_rx(DisasContext * ctx,int ra,int set)1030 static void gen_rx(DisasContext *ctx, int ra, int set)
1031 {
1032 if (ra != 31) {
1033 ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1034 }
1035
1036 st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1037 }
1038
gen_call_pal(DisasContext * ctx,int palcode)1039 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1040 {
1041 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1042 to internal cpu registers. */
1043
1044 /* Unprivileged PAL call */
1045 if (palcode >= 0x80 && palcode < 0xC0) {
1046 switch (palcode) {
1047 case 0x86:
1048 /* IMB */
1049 /* No-op inside QEMU. */
1050 break;
1051 case 0x9E:
1052 /* RDUNIQUE */
1053 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1054 offsetof(CPUAlphaState, unique));
1055 break;
1056 case 0x9F:
1057 /* WRUNIQUE */
1058 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1059 offsetof(CPUAlphaState, unique));
1060 break;
1061 default:
1062 palcode &= 0xbf;
1063 goto do_call_pal;
1064 }
1065 return DISAS_NEXT;
1066 }
1067
1068 #ifndef CONFIG_USER_ONLY
1069 /* Privileged PAL code */
1070 if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1071 switch (palcode) {
1072 case 0x01:
1073 /* CFLUSH */
1074 /* No-op inside QEMU. */
1075 break;
1076 case 0x02:
1077 /* DRAINA */
1078 /* No-op inside QEMU. */
1079 break;
1080 case 0x2D:
1081 /* WRVPTPTR */
1082 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1083 offsetof(CPUAlphaState, vptptr));
1084 break;
1085 case 0x31:
1086 /* WRVAL */
1087 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1088 offsetof(CPUAlphaState, sysval));
1089 break;
1090 case 0x32:
1091 /* RDVAL */
1092 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1093 offsetof(CPUAlphaState, sysval));
1094 break;
1095
1096 case 0x35:
1097 /* SWPIPL */
1098 /* Note that we already know we're in kernel mode, so we know
1099 that PS only contains the 3 IPL bits. */
1100 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1101
1102 /* But make sure and store only the 3 IPL bits from the user. */
1103 {
1104 TCGv tmp = tcg_temp_new();
1105 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1106 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1107 }
1108
1109 /* Allow interrupts to be recognized right away. */
1110 gen_pc_disp(ctx, cpu_pc, 0);
1111 return DISAS_PC_UPDATED_NOCHAIN;
1112
1113 case 0x36:
1114 /* RDPS */
1115 ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1116 break;
1117
1118 case 0x38:
1119 /* WRUSP */
1120 tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1121 offsetof(CPUAlphaState, usp));
1122 break;
1123 case 0x3A:
1124 /* RDUSP */
1125 tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1126 offsetof(CPUAlphaState, usp));
1127 break;
1128 case 0x3C:
1129 /* WHAMI */
1130 tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
1131 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1132 break;
1133
1134 case 0x3E:
1135 /* WTINT */
1136 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1137 -offsetof(AlphaCPU, env) +
1138 offsetof(CPUState, halted));
1139 tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1140 return gen_excp(ctx, EXCP_HALTED, 0);
1141
1142 default:
1143 palcode &= 0x3f;
1144 goto do_call_pal;
1145 }
1146 return DISAS_NEXT;
1147 }
1148 #endif
1149 return gen_invalid(ctx);
1150
1151 do_call_pal:
1152 #ifdef CONFIG_USER_ONLY
1153 return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1154 #else
1155 {
1156 TCGv tmp = tcg_temp_new();
1157 uint64_t entry;
1158
1159 gen_pc_disp(ctx, tmp, 0);
1160 if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1161 tcg_gen_ori_i64(tmp, tmp, 1);
1162 } else {
1163 st_flag_byte(tcg_constant_i64(1), ENV_FLAG_PAL_SHIFT);
1164 }
1165 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr));
1166
1167 entry = ctx->palbr;
1168 entry += (palcode & 0x80
1169 ? 0x2000 + (palcode - 0x80) * 64
1170 : 0x1000 + palcode * 64);
1171
1172 tcg_gen_movi_i64(cpu_pc, entry);
1173 return DISAS_PC_UPDATED;
1174 }
1175 #endif
1176 }
1177
1178 #ifndef CONFIG_USER_ONLY
1179
1180 #define PR_LONG 0x200000
1181
cpu_pr_data(int pr)1182 static int cpu_pr_data(int pr)
1183 {
1184 switch (pr) {
1185 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1186 case 3: return offsetof(CPUAlphaState, trap_arg0);
1187 case 4: return offsetof(CPUAlphaState, trap_arg1);
1188 case 5: return offsetof(CPUAlphaState, trap_arg2);
1189 case 6: return offsetof(CPUAlphaState, exc_addr);
1190 case 7: return offsetof(CPUAlphaState, palbr);
1191 case 8: return offsetof(CPUAlphaState, ptbr);
1192 case 9: return offsetof(CPUAlphaState, vptptr);
1193 case 10: return offsetof(CPUAlphaState, unique);
1194 case 11: return offsetof(CPUAlphaState, sysval);
1195 case 12: return offsetof(CPUAlphaState, usp);
1196
1197 case 40 ... 63:
1198 return offsetof(CPUAlphaState, scratch[pr - 40]);
1199
1200 case 251:
1201 return offsetof(CPUAlphaState, alarm_expire);
1202 }
1203 return 0;
1204 }
1205
gen_mfpr(DisasContext * ctx,TCGv va,int regno)1206 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1207 {
1208 void (*helper)(TCGv);
1209 int data;
1210
1211 switch (regno) {
1212 case 32 ... 39:
1213 /* Accessing the "non-shadow" general registers. */
1214 regno = regno == 39 ? 25 : regno - 32 + 8;
1215 tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1216 break;
1217
1218 case 250: /* WALLTIME */
1219 helper = gen_helper_get_walltime;
1220 goto do_helper;
1221 case 249: /* VMTIME */
1222 helper = gen_helper_get_vmtime;
1223 do_helper:
1224 if (translator_io_start(&ctx->base)) {
1225 helper(va);
1226 return DISAS_PC_STALE;
1227 } else {
1228 helper(va);
1229 }
1230 break;
1231
1232 case 0: /* PS */
1233 ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1234 break;
1235 case 1: /* FEN */
1236 ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1237 break;
1238
1239 default:
1240 /* The basic registers are data only, and unknown registers
1241 are read-zero, write-ignore. */
1242 data = cpu_pr_data(regno);
1243 if (data == 0) {
1244 tcg_gen_movi_i64(va, 0);
1245 } else if (data & PR_LONG) {
1246 tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG);
1247 } else {
1248 tcg_gen_ld_i64(va, tcg_env, data);
1249 }
1250 break;
1251 }
1252
1253 return DISAS_NEXT;
1254 }
1255
gen_mtpr(DisasContext * ctx,TCGv vb,int regno)1256 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1257 {
1258 int data;
1259 DisasJumpType ret = DISAS_NEXT;
1260
1261 switch (regno) {
1262 case 255:
1263 /* TBIA */
1264 gen_helper_tbia(tcg_env);
1265 break;
1266
1267 case 254:
1268 /* TBIS */
1269 gen_helper_tbis(tcg_env, vb);
1270 break;
1271
1272 case 253:
1273 /* WAIT */
1274 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1275 -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1276 return gen_excp(ctx, EXCP_HALTED, 0);
1277
1278 case 252:
1279 /* HALT */
1280 gen_helper_halt(vb);
1281 return DISAS_PC_STALE;
1282
1283 case 251:
1284 /* ALARM */
1285 if (translator_io_start(&ctx->base)) {
1286 ret = DISAS_PC_STALE;
1287 }
1288 gen_helper_set_alarm(tcg_env, vb);
1289 break;
1290
1291 case 7:
1292 /* PALBR */
1293 tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr));
1294 /* Changing the PAL base register implies un-chaining all of the TBs
1295 that ended with a CALL_PAL. Since the base register usually only
1296 changes during boot, flushing everything works well. */
1297 gen_helper_tb_flush(tcg_env);
1298 return DISAS_PC_STALE;
1299
1300 case 32 ... 39:
1301 /* Accessing the "non-shadow" general registers. */
1302 regno = regno == 39 ? 25 : regno - 32 + 8;
1303 tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1304 break;
1305
1306 case 0: /* PS */
1307 st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1308 break;
1309 case 1: /* FEN */
1310 st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1311 break;
1312
1313 default:
1314 /* The basic registers are data only, and unknown registers
1315 are read-zero, write-ignore. */
1316 data = cpu_pr_data(regno);
1317 if (data != 0) {
1318 if (data & PR_LONG) {
1319 tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG);
1320 } else {
1321 tcg_gen_st_i64(vb, tcg_env, data);
1322 }
1323 }
1324 break;
1325 }
1326
1327 return ret;
1328 }
1329 #endif /* !USER_ONLY*/
1330
1331 #define REQUIRE_NO_LIT \
1332 do { \
1333 if (real_islit) { \
1334 goto invalid_opc; \
1335 } \
1336 } while (0)
1337
1338 #define REQUIRE_AMASK(FLAG) \
1339 do { \
1340 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1341 goto invalid_opc; \
1342 } \
1343 } while (0)
1344
1345 #define REQUIRE_TB_FLAG(FLAG) \
1346 do { \
1347 if ((ctx->tbflags & (FLAG)) == 0) { \
1348 goto invalid_opc; \
1349 } \
1350 } while (0)
1351
1352 #define REQUIRE_REG_31(WHICH) \
1353 do { \
1354 if (WHICH != 31) { \
1355 goto invalid_opc; \
1356 } \
1357 } while (0)
1358
1359 #define REQUIRE_FEN \
1360 do { \
1361 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1362 goto raise_fen; \
1363 } \
1364 } while (0)
1365
translate_one(DisasContext * ctx,uint32_t insn)1366 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1367 {
1368 int32_t disp21, disp16, disp12 __attribute__((unused));
1369 uint16_t fn11;
1370 uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1371 bool islit, real_islit;
1372 TCGv va, vb, vc, tmp, tmp2;
1373 TCGv_i32 t32;
1374 DisasJumpType ret;
1375
1376 /* Decode all instruction fields */
1377 opc = extract32(insn, 26, 6);
1378 ra = extract32(insn, 21, 5);
1379 rb = extract32(insn, 16, 5);
1380 rc = extract32(insn, 0, 5);
1381 real_islit = islit = extract32(insn, 12, 1);
1382 lit = extract32(insn, 13, 8);
1383
1384 disp21 = sextract32(insn, 0, 21) * 4;
1385 disp16 = sextract32(insn, 0, 16);
1386 disp12 = sextract32(insn, 0, 12);
1387
1388 fn11 = extract32(insn, 5, 11);
1389 fpfn = extract32(insn, 5, 6);
1390 fn7 = extract32(insn, 5, 7);
1391
1392 if (rb == 31 && !islit) {
1393 islit = true;
1394 lit = 0;
1395 }
1396
1397 ret = DISAS_NEXT;
1398 switch (opc) {
1399 case 0x00:
1400 /* CALL_PAL */
1401 ret = gen_call_pal(ctx, insn & 0x03ffffff);
1402 break;
1403 case 0x01:
1404 /* OPC01 */
1405 goto invalid_opc;
1406 case 0x02:
1407 /* OPC02 */
1408 goto invalid_opc;
1409 case 0x03:
1410 /* OPC03 */
1411 goto invalid_opc;
1412 case 0x04:
1413 /* OPC04 */
1414 goto invalid_opc;
1415 case 0x05:
1416 /* OPC05 */
1417 goto invalid_opc;
1418 case 0x06:
1419 /* OPC06 */
1420 goto invalid_opc;
1421 case 0x07:
1422 /* OPC07 */
1423 goto invalid_opc;
1424
1425 case 0x09:
1426 /* LDAH */
1427 disp16 = (uint32_t)disp16 << 16;
1428 /* fall through */
1429 case 0x08:
1430 /* LDA */
1431 va = dest_gpr(ctx, ra);
1432 /* It's worth special-casing immediate loads. */
1433 if (rb == 31) {
1434 tcg_gen_movi_i64(va, disp16);
1435 } else {
1436 tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1437 }
1438 break;
1439
1440 case 0x0A:
1441 /* LDBU */
1442 REQUIRE_AMASK(BWX);
1443 gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
1444 break;
1445 case 0x0B:
1446 /* LDQ_U */
1447 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
1448 break;
1449 case 0x0C:
1450 /* LDWU */
1451 REQUIRE_AMASK(BWX);
1452 gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
1453 break;
1454 case 0x0D:
1455 /* STW */
1456 REQUIRE_AMASK(BWX);
1457 gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
1458 break;
1459 case 0x0E:
1460 /* STB */
1461 REQUIRE_AMASK(BWX);
1462 gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
1463 break;
1464 case 0x0F:
1465 /* STQ_U */
1466 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
1467 break;
1468
1469 case 0x10:
1470 vc = dest_gpr(ctx, rc);
1471 vb = load_gpr_lit(ctx, rb, lit, islit);
1472
1473 if (ra == 31) {
1474 if (fn7 == 0x00) {
1475 /* Special case ADDL as SEXTL. */
1476 tcg_gen_ext32s_i64(vc, vb);
1477 break;
1478 }
1479 if (fn7 == 0x29) {
1480 /* Special case SUBQ as NEGQ. */
1481 tcg_gen_neg_i64(vc, vb);
1482 break;
1483 }
1484 }
1485
1486 va = load_gpr(ctx, ra);
1487 switch (fn7) {
1488 case 0x00:
1489 /* ADDL */
1490 tcg_gen_add_i64(vc, va, vb);
1491 tcg_gen_ext32s_i64(vc, vc);
1492 break;
1493 case 0x02:
1494 /* S4ADDL */
1495 tmp = tcg_temp_new();
1496 tcg_gen_shli_i64(tmp, va, 2);
1497 tcg_gen_add_i64(tmp, tmp, vb);
1498 tcg_gen_ext32s_i64(vc, tmp);
1499 break;
1500 case 0x09:
1501 /* SUBL */
1502 tcg_gen_sub_i64(vc, va, vb);
1503 tcg_gen_ext32s_i64(vc, vc);
1504 break;
1505 case 0x0B:
1506 /* S4SUBL */
1507 tmp = tcg_temp_new();
1508 tcg_gen_shli_i64(tmp, va, 2);
1509 tcg_gen_sub_i64(tmp, tmp, vb);
1510 tcg_gen_ext32s_i64(vc, tmp);
1511 break;
1512 case 0x0F:
1513 /* CMPBGE */
1514 if (ra == 31) {
1515 /* Special case 0 >= X as X == 0. */
1516 gen_helper_cmpbe0(vc, vb);
1517 } else {
1518 gen_helper_cmpbge(vc, va, vb);
1519 }
1520 break;
1521 case 0x12:
1522 /* S8ADDL */
1523 tmp = tcg_temp_new();
1524 tcg_gen_shli_i64(tmp, va, 3);
1525 tcg_gen_add_i64(tmp, tmp, vb);
1526 tcg_gen_ext32s_i64(vc, tmp);
1527 break;
1528 case 0x1B:
1529 /* S8SUBL */
1530 tmp = tcg_temp_new();
1531 tcg_gen_shli_i64(tmp, va, 3);
1532 tcg_gen_sub_i64(tmp, tmp, vb);
1533 tcg_gen_ext32s_i64(vc, tmp);
1534 break;
1535 case 0x1D:
1536 /* CMPULT */
1537 tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1538 break;
1539 case 0x20:
1540 /* ADDQ */
1541 tcg_gen_add_i64(vc, va, vb);
1542 break;
1543 case 0x22:
1544 /* S4ADDQ */
1545 tmp = tcg_temp_new();
1546 tcg_gen_shli_i64(tmp, va, 2);
1547 tcg_gen_add_i64(vc, tmp, vb);
1548 break;
1549 case 0x29:
1550 /* SUBQ */
1551 tcg_gen_sub_i64(vc, va, vb);
1552 break;
1553 case 0x2B:
1554 /* S4SUBQ */
1555 tmp = tcg_temp_new();
1556 tcg_gen_shli_i64(tmp, va, 2);
1557 tcg_gen_sub_i64(vc, tmp, vb);
1558 break;
1559 case 0x2D:
1560 /* CMPEQ */
1561 tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1562 break;
1563 case 0x32:
1564 /* S8ADDQ */
1565 tmp = tcg_temp_new();
1566 tcg_gen_shli_i64(tmp, va, 3);
1567 tcg_gen_add_i64(vc, tmp, vb);
1568 break;
1569 case 0x3B:
1570 /* S8SUBQ */
1571 tmp = tcg_temp_new();
1572 tcg_gen_shli_i64(tmp, va, 3);
1573 tcg_gen_sub_i64(vc, tmp, vb);
1574 break;
1575 case 0x3D:
1576 /* CMPULE */
1577 tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1578 break;
1579 case 0x40:
1580 /* ADDL/V */
1581 tmp = tcg_temp_new();
1582 tcg_gen_ext32s_i64(tmp, va);
1583 tcg_gen_ext32s_i64(vc, vb);
1584 tcg_gen_add_i64(tmp, tmp, vc);
1585 tcg_gen_ext32s_i64(vc, tmp);
1586 gen_helper_check_overflow(tcg_env, vc, tmp);
1587 break;
1588 case 0x49:
1589 /* SUBL/V */
1590 tmp = tcg_temp_new();
1591 tcg_gen_ext32s_i64(tmp, va);
1592 tcg_gen_ext32s_i64(vc, vb);
1593 tcg_gen_sub_i64(tmp, tmp, vc);
1594 tcg_gen_ext32s_i64(vc, tmp);
1595 gen_helper_check_overflow(tcg_env, vc, tmp);
1596 break;
1597 case 0x4D:
1598 /* CMPLT */
1599 tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1600 break;
1601 case 0x60:
1602 /* ADDQ/V */
1603 tmp = tcg_temp_new();
1604 tmp2 = tcg_temp_new();
1605 tcg_gen_eqv_i64(tmp, va, vb);
1606 tcg_gen_mov_i64(tmp2, va);
1607 tcg_gen_add_i64(vc, va, vb);
1608 tcg_gen_xor_i64(tmp2, tmp2, vc);
1609 tcg_gen_and_i64(tmp, tmp, tmp2);
1610 tcg_gen_shri_i64(tmp, tmp, 63);
1611 tcg_gen_movi_i64(tmp2, 0);
1612 gen_helper_check_overflow(tcg_env, tmp, tmp2);
1613 break;
1614 case 0x69:
1615 /* SUBQ/V */
1616 tmp = tcg_temp_new();
1617 tmp2 = tcg_temp_new();
1618 tcg_gen_xor_i64(tmp, va, vb);
1619 tcg_gen_mov_i64(tmp2, va);
1620 tcg_gen_sub_i64(vc, va, vb);
1621 tcg_gen_xor_i64(tmp2, tmp2, vc);
1622 tcg_gen_and_i64(tmp, tmp, tmp2);
1623 tcg_gen_shri_i64(tmp, tmp, 63);
1624 tcg_gen_movi_i64(tmp2, 0);
1625 gen_helper_check_overflow(tcg_env, tmp, tmp2);
1626 break;
1627 case 0x6D:
1628 /* CMPLE */
1629 tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1630 break;
1631 default:
1632 goto invalid_opc;
1633 }
1634 break;
1635
1636 case 0x11:
1637 if (fn7 == 0x20) {
1638 if (rc == 31) {
1639 /* Special case BIS as NOP. */
1640 break;
1641 }
1642 if (ra == 31) {
1643 /* Special case BIS as MOV. */
1644 vc = dest_gpr(ctx, rc);
1645 if (islit) {
1646 tcg_gen_movi_i64(vc, lit);
1647 } else {
1648 tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1649 }
1650 break;
1651 }
1652 }
1653
1654 vc = dest_gpr(ctx, rc);
1655 vb = load_gpr_lit(ctx, rb, lit, islit);
1656
1657 if (fn7 == 0x28 && ra == 31) {
1658 /* Special case ORNOT as NOT. */
1659 tcg_gen_not_i64(vc, vb);
1660 break;
1661 }
1662
1663 va = load_gpr(ctx, ra);
1664 switch (fn7) {
1665 case 0x00:
1666 /* AND */
1667 tcg_gen_and_i64(vc, va, vb);
1668 break;
1669 case 0x08:
1670 /* BIC */
1671 tcg_gen_andc_i64(vc, va, vb);
1672 break;
1673 case 0x14:
1674 /* CMOVLBS */
1675 tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
1676 vb, load_gpr(ctx, rc));
1677 break;
1678 case 0x16:
1679 /* CMOVLBC */
1680 tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
1681 vb, load_gpr(ctx, rc));
1682 break;
1683 case 0x20:
1684 /* BIS */
1685 tcg_gen_or_i64(vc, va, vb);
1686 break;
1687 case 0x24:
1688 /* CMOVEQ */
1689 tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1690 vb, load_gpr(ctx, rc));
1691 break;
1692 case 0x26:
1693 /* CMOVNE */
1694 tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1695 vb, load_gpr(ctx, rc));
1696 break;
1697 case 0x28:
1698 /* ORNOT */
1699 tcg_gen_orc_i64(vc, va, vb);
1700 break;
1701 case 0x40:
1702 /* XOR */
1703 tcg_gen_xor_i64(vc, va, vb);
1704 break;
1705 case 0x44:
1706 /* CMOVLT */
1707 tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1708 vb, load_gpr(ctx, rc));
1709 break;
1710 case 0x46:
1711 /* CMOVGE */
1712 tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1713 vb, load_gpr(ctx, rc));
1714 break;
1715 case 0x48:
1716 /* EQV */
1717 tcg_gen_eqv_i64(vc, va, vb);
1718 break;
1719 case 0x61:
1720 /* AMASK */
1721 REQUIRE_REG_31(ra);
1722 tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1723 break;
1724 case 0x64:
1725 /* CMOVLE */
1726 tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1727 vb, load_gpr(ctx, rc));
1728 break;
1729 case 0x66:
1730 /* CMOVGT */
1731 tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1732 vb, load_gpr(ctx, rc));
1733 break;
1734 case 0x6C:
1735 /* IMPLVER */
1736 REQUIRE_REG_31(ra);
1737 tcg_gen_movi_i64(vc, ctx->implver);
1738 break;
1739 default:
1740 goto invalid_opc;
1741 }
1742 break;
1743
1744 case 0x12:
1745 vc = dest_gpr(ctx, rc);
1746 va = load_gpr(ctx, ra);
1747 switch (fn7) {
1748 case 0x02:
1749 /* MSKBL */
1750 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1751 break;
1752 case 0x06:
1753 /* EXTBL */
1754 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1755 break;
1756 case 0x0B:
1757 /* INSBL */
1758 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1759 break;
1760 case 0x12:
1761 /* MSKWL */
1762 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1763 break;
1764 case 0x16:
1765 /* EXTWL */
1766 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1767 break;
1768 case 0x1B:
1769 /* INSWL */
1770 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1771 break;
1772 case 0x22:
1773 /* MSKLL */
1774 gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1775 break;
1776 case 0x26:
1777 /* EXTLL */
1778 gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1779 break;
1780 case 0x2B:
1781 /* INSLL */
1782 gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1783 break;
1784 case 0x30:
1785 /* ZAP */
1786 if (islit) {
1787 gen_zapnoti(vc, va, ~lit);
1788 } else {
1789 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1790 }
1791 break;
1792 case 0x31:
1793 /* ZAPNOT */
1794 if (islit) {
1795 gen_zapnoti(vc, va, lit);
1796 } else {
1797 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1798 }
1799 break;
1800 case 0x32:
1801 /* MSKQL */
1802 gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1803 break;
1804 case 0x34:
1805 /* SRL */
1806 if (islit) {
1807 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1808 } else {
1809 tmp = tcg_temp_new();
1810 vb = load_gpr(ctx, rb);
1811 tcg_gen_andi_i64(tmp, vb, 0x3f);
1812 tcg_gen_shr_i64(vc, va, tmp);
1813 }
1814 break;
1815 case 0x36:
1816 /* EXTQL */
1817 gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1818 break;
1819 case 0x39:
1820 /* SLL */
1821 if (islit) {
1822 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1823 } else {
1824 tmp = tcg_temp_new();
1825 vb = load_gpr(ctx, rb);
1826 tcg_gen_andi_i64(tmp, vb, 0x3f);
1827 tcg_gen_shl_i64(vc, va, tmp);
1828 }
1829 break;
1830 case 0x3B:
1831 /* INSQL */
1832 gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1833 break;
1834 case 0x3C:
1835 /* SRA */
1836 if (islit) {
1837 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1838 } else {
1839 tmp = tcg_temp_new();
1840 vb = load_gpr(ctx, rb);
1841 tcg_gen_andi_i64(tmp, vb, 0x3f);
1842 tcg_gen_sar_i64(vc, va, tmp);
1843 }
1844 break;
1845 case 0x52:
1846 /* MSKWH */
1847 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1848 break;
1849 case 0x57:
1850 /* INSWH */
1851 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1852 break;
1853 case 0x5A:
1854 /* EXTWH */
1855 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1856 break;
1857 case 0x62:
1858 /* MSKLH */
1859 gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1860 break;
1861 case 0x67:
1862 /* INSLH */
1863 gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1864 break;
1865 case 0x6A:
1866 /* EXTLH */
1867 gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1868 break;
1869 case 0x72:
1870 /* MSKQH */
1871 gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1872 break;
1873 case 0x77:
1874 /* INSQH */
1875 gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1876 break;
1877 case 0x7A:
1878 /* EXTQH */
1879 gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1880 break;
1881 default:
1882 goto invalid_opc;
1883 }
1884 break;
1885
1886 case 0x13:
1887 vc = dest_gpr(ctx, rc);
1888 vb = load_gpr_lit(ctx, rb, lit, islit);
1889 va = load_gpr(ctx, ra);
1890 switch (fn7) {
1891 case 0x00:
1892 /* MULL */
1893 tcg_gen_mul_i64(vc, va, vb);
1894 tcg_gen_ext32s_i64(vc, vc);
1895 break;
1896 case 0x20:
1897 /* MULQ */
1898 tcg_gen_mul_i64(vc, va, vb);
1899 break;
1900 case 0x30:
1901 /* UMULH */
1902 tmp = tcg_temp_new();
1903 tcg_gen_mulu2_i64(tmp, vc, va, vb);
1904 break;
1905 case 0x40:
1906 /* MULL/V */
1907 tmp = tcg_temp_new();
1908 tcg_gen_ext32s_i64(tmp, va);
1909 tcg_gen_ext32s_i64(vc, vb);
1910 tcg_gen_mul_i64(tmp, tmp, vc);
1911 tcg_gen_ext32s_i64(vc, tmp);
1912 gen_helper_check_overflow(tcg_env, vc, tmp);
1913 break;
1914 case 0x60:
1915 /* MULQ/V */
1916 tmp = tcg_temp_new();
1917 tmp2 = tcg_temp_new();
1918 tcg_gen_muls2_i64(vc, tmp, va, vb);
1919 tcg_gen_sari_i64(tmp2, vc, 63);
1920 gen_helper_check_overflow(tcg_env, tmp, tmp2);
1921 break;
1922 default:
1923 goto invalid_opc;
1924 }
1925 break;
1926
1927 case 0x14:
1928 REQUIRE_AMASK(FIX);
1929 vc = dest_fpr(ctx, rc);
1930 switch (fpfn) { /* fn11 & 0x3F */
1931 case 0x04:
1932 /* ITOFS */
1933 REQUIRE_REG_31(rb);
1934 REQUIRE_FEN;
1935 t32 = tcg_temp_new_i32();
1936 va = load_gpr(ctx, ra);
1937 tcg_gen_extrl_i64_i32(t32, va);
1938 gen_helper_memory_to_s(vc, t32);
1939 break;
1940 case 0x0A:
1941 /* SQRTF */
1942 REQUIRE_REG_31(ra);
1943 REQUIRE_FEN;
1944 vb = load_fpr(ctx, rb);
1945 gen_helper_sqrtf(vc, tcg_env, vb);
1946 break;
1947 case 0x0B:
1948 /* SQRTS */
1949 REQUIRE_REG_31(ra);
1950 REQUIRE_FEN;
1951 gen_sqrts(ctx, rb, rc, fn11);
1952 break;
1953 case 0x14:
1954 /* ITOFF */
1955 REQUIRE_REG_31(rb);
1956 REQUIRE_FEN;
1957 t32 = tcg_temp_new_i32();
1958 va = load_gpr(ctx, ra);
1959 tcg_gen_extrl_i64_i32(t32, va);
1960 gen_helper_memory_to_f(vc, t32);
1961 break;
1962 case 0x24:
1963 /* ITOFT */
1964 REQUIRE_REG_31(rb);
1965 REQUIRE_FEN;
1966 va = load_gpr(ctx, ra);
1967 tcg_gen_mov_i64(vc, va);
1968 break;
1969 case 0x2A:
1970 /* SQRTG */
1971 REQUIRE_REG_31(ra);
1972 REQUIRE_FEN;
1973 vb = load_fpr(ctx, rb);
1974 gen_helper_sqrtg(vc, tcg_env, vb);
1975 break;
1976 case 0x02B:
1977 /* SQRTT */
1978 REQUIRE_REG_31(ra);
1979 REQUIRE_FEN;
1980 gen_sqrtt(ctx, rb, rc, fn11);
1981 break;
1982 default:
1983 goto invalid_opc;
1984 }
1985 break;
1986
1987 case 0x15:
1988 /* VAX floating point */
1989 /* XXX: rounding mode and trap are ignored (!) */
1990 vc = dest_fpr(ctx, rc);
1991 vb = load_fpr(ctx, rb);
1992 va = load_fpr(ctx, ra);
1993 switch (fpfn) { /* fn11 & 0x3F */
1994 case 0x00:
1995 /* ADDF */
1996 REQUIRE_FEN;
1997 gen_helper_addf(vc, tcg_env, va, vb);
1998 break;
1999 case 0x01:
2000 /* SUBF */
2001 REQUIRE_FEN;
2002 gen_helper_subf(vc, tcg_env, va, vb);
2003 break;
2004 case 0x02:
2005 /* MULF */
2006 REQUIRE_FEN;
2007 gen_helper_mulf(vc, tcg_env, va, vb);
2008 break;
2009 case 0x03:
2010 /* DIVF */
2011 REQUIRE_FEN;
2012 gen_helper_divf(vc, tcg_env, va, vb);
2013 break;
2014 case 0x1E:
2015 /* CVTDG -- TODO */
2016 REQUIRE_REG_31(ra);
2017 goto invalid_opc;
2018 case 0x20:
2019 /* ADDG */
2020 REQUIRE_FEN;
2021 gen_helper_addg(vc, tcg_env, va, vb);
2022 break;
2023 case 0x21:
2024 /* SUBG */
2025 REQUIRE_FEN;
2026 gen_helper_subg(vc, tcg_env, va, vb);
2027 break;
2028 case 0x22:
2029 /* MULG */
2030 REQUIRE_FEN;
2031 gen_helper_mulg(vc, tcg_env, va, vb);
2032 break;
2033 case 0x23:
2034 /* DIVG */
2035 REQUIRE_FEN;
2036 gen_helper_divg(vc, tcg_env, va, vb);
2037 break;
2038 case 0x25:
2039 /* CMPGEQ */
2040 REQUIRE_FEN;
2041 gen_helper_cmpgeq(vc, tcg_env, va, vb);
2042 break;
2043 case 0x26:
2044 /* CMPGLT */
2045 REQUIRE_FEN;
2046 gen_helper_cmpglt(vc, tcg_env, va, vb);
2047 break;
2048 case 0x27:
2049 /* CMPGLE */
2050 REQUIRE_FEN;
2051 gen_helper_cmpgle(vc, tcg_env, va, vb);
2052 break;
2053 case 0x2C:
2054 /* CVTGF */
2055 REQUIRE_REG_31(ra);
2056 REQUIRE_FEN;
2057 gen_helper_cvtgf(vc, tcg_env, vb);
2058 break;
2059 case 0x2D:
2060 /* CVTGD -- TODO */
2061 REQUIRE_REG_31(ra);
2062 goto invalid_opc;
2063 case 0x2F:
2064 /* CVTGQ */
2065 REQUIRE_REG_31(ra);
2066 REQUIRE_FEN;
2067 gen_helper_cvtgq(vc, tcg_env, vb);
2068 break;
2069 case 0x3C:
2070 /* CVTQF */
2071 REQUIRE_REG_31(ra);
2072 REQUIRE_FEN;
2073 gen_helper_cvtqf(vc, tcg_env, vb);
2074 break;
2075 case 0x3E:
2076 /* CVTQG */
2077 REQUIRE_REG_31(ra);
2078 REQUIRE_FEN;
2079 gen_helper_cvtqg(vc, tcg_env, vb);
2080 break;
2081 default:
2082 goto invalid_opc;
2083 }
2084 break;
2085
2086 case 0x16:
2087 /* IEEE floating-point */
2088 switch (fpfn) { /* fn11 & 0x3F */
2089 case 0x00:
2090 /* ADDS */
2091 REQUIRE_FEN;
2092 gen_adds(ctx, ra, rb, rc, fn11);
2093 break;
2094 case 0x01:
2095 /* SUBS */
2096 REQUIRE_FEN;
2097 gen_subs(ctx, ra, rb, rc, fn11);
2098 break;
2099 case 0x02:
2100 /* MULS */
2101 REQUIRE_FEN;
2102 gen_muls(ctx, ra, rb, rc, fn11);
2103 break;
2104 case 0x03:
2105 /* DIVS */
2106 REQUIRE_FEN;
2107 gen_divs(ctx, ra, rb, rc, fn11);
2108 break;
2109 case 0x20:
2110 /* ADDT */
2111 REQUIRE_FEN;
2112 gen_addt(ctx, ra, rb, rc, fn11);
2113 break;
2114 case 0x21:
2115 /* SUBT */
2116 REQUIRE_FEN;
2117 gen_subt(ctx, ra, rb, rc, fn11);
2118 break;
2119 case 0x22:
2120 /* MULT */
2121 REQUIRE_FEN;
2122 gen_mult(ctx, ra, rb, rc, fn11);
2123 break;
2124 case 0x23:
2125 /* DIVT */
2126 REQUIRE_FEN;
2127 gen_divt(ctx, ra, rb, rc, fn11);
2128 break;
2129 case 0x24:
2130 /* CMPTUN */
2131 REQUIRE_FEN;
2132 gen_cmptun(ctx, ra, rb, rc, fn11);
2133 break;
2134 case 0x25:
2135 /* CMPTEQ */
2136 REQUIRE_FEN;
2137 gen_cmpteq(ctx, ra, rb, rc, fn11);
2138 break;
2139 case 0x26:
2140 /* CMPTLT */
2141 REQUIRE_FEN;
2142 gen_cmptlt(ctx, ra, rb, rc, fn11);
2143 break;
2144 case 0x27:
2145 /* CMPTLE */
2146 REQUIRE_FEN;
2147 gen_cmptle(ctx, ra, rb, rc, fn11);
2148 break;
2149 case 0x2C:
2150 REQUIRE_REG_31(ra);
2151 REQUIRE_FEN;
2152 if (fn11 == 0x2AC || fn11 == 0x6AC) {
2153 /* CVTST */
2154 gen_cvtst(ctx, rb, rc, fn11);
2155 } else {
2156 /* CVTTS */
2157 gen_cvtts(ctx, rb, rc, fn11);
2158 }
2159 break;
2160 case 0x2F:
2161 /* CVTTQ */
2162 REQUIRE_REG_31(ra);
2163 REQUIRE_FEN;
2164 gen_cvttq(ctx, rb, rc, fn11);
2165 break;
2166 case 0x3C:
2167 /* CVTQS */
2168 REQUIRE_REG_31(ra);
2169 REQUIRE_FEN;
2170 gen_cvtqs(ctx, rb, rc, fn11);
2171 break;
2172 case 0x3E:
2173 /* CVTQT */
2174 REQUIRE_REG_31(ra);
2175 REQUIRE_FEN;
2176 gen_cvtqt(ctx, rb, rc, fn11);
2177 break;
2178 default:
2179 goto invalid_opc;
2180 }
2181 break;
2182
2183 case 0x17:
2184 switch (fn11) {
2185 case 0x010:
2186 /* CVTLQ */
2187 REQUIRE_REG_31(ra);
2188 REQUIRE_FEN;
2189 vc = dest_fpr(ctx, rc);
2190 vb = load_fpr(ctx, rb);
2191 gen_cvtlq(vc, vb);
2192 break;
2193 case 0x020:
2194 /* CPYS */
2195 REQUIRE_FEN;
2196 if (rc == 31) {
2197 /* Special case CPYS as FNOP. */
2198 } else {
2199 vc = dest_fpr(ctx, rc);
2200 va = load_fpr(ctx, ra);
2201 if (ra == rb) {
2202 /* Special case CPYS as FMOV. */
2203 tcg_gen_mov_i64(vc, va);
2204 } else {
2205 vb = load_fpr(ctx, rb);
2206 gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2207 }
2208 }
2209 break;
2210 case 0x021:
2211 /* CPYSN */
2212 REQUIRE_FEN;
2213 vc = dest_fpr(ctx, rc);
2214 vb = load_fpr(ctx, rb);
2215 va = load_fpr(ctx, ra);
2216 gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2217 break;
2218 case 0x022:
2219 /* CPYSE */
2220 REQUIRE_FEN;
2221 vc = dest_fpr(ctx, rc);
2222 vb = load_fpr(ctx, rb);
2223 va = load_fpr(ctx, ra);
2224 gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2225 break;
2226 case 0x024:
2227 /* MT_FPCR */
2228 REQUIRE_FEN;
2229 va = load_fpr(ctx, ra);
2230 gen_helper_store_fpcr(tcg_env, va);
2231 if (ctx->tb_rm == QUAL_RM_D) {
2232 /* Re-do the copy of the rounding mode to fp_status
2233 the next time we use dynamic rounding. */
2234 ctx->tb_rm = -1;
2235 }
2236 break;
2237 case 0x025:
2238 /* MF_FPCR */
2239 REQUIRE_FEN;
2240 va = dest_fpr(ctx, ra);
2241 gen_helper_load_fpcr(va, tcg_env);
2242 break;
2243 case 0x02A:
2244 /* FCMOVEQ */
2245 REQUIRE_FEN;
2246 gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2247 break;
2248 case 0x02B:
2249 /* FCMOVNE */
2250 REQUIRE_FEN;
2251 gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2252 break;
2253 case 0x02C:
2254 /* FCMOVLT */
2255 REQUIRE_FEN;
2256 gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2257 break;
2258 case 0x02D:
2259 /* FCMOVGE */
2260 REQUIRE_FEN;
2261 gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2262 break;
2263 case 0x02E:
2264 /* FCMOVLE */
2265 REQUIRE_FEN;
2266 gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2267 break;
2268 case 0x02F:
2269 /* FCMOVGT */
2270 REQUIRE_FEN;
2271 gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2272 break;
2273 case 0x030: /* CVTQL */
2274 case 0x130: /* CVTQL/V */
2275 case 0x530: /* CVTQL/SV */
2276 REQUIRE_REG_31(ra);
2277 REQUIRE_FEN;
2278 vc = dest_fpr(ctx, rc);
2279 vb = load_fpr(ctx, rb);
2280 gen_helper_cvtql(vc, tcg_env, vb);
2281 gen_fp_exc_raise(rc, fn11);
2282 break;
2283 default:
2284 goto invalid_opc;
2285 }
2286 break;
2287
2288 case 0x18:
2289 switch ((uint16_t)disp16) {
2290 case 0x0000:
2291 /* TRAPB */
2292 /* No-op. */
2293 break;
2294 case 0x0400:
2295 /* EXCB */
2296 /* No-op. */
2297 break;
2298 case 0x4000:
2299 /* MB */
2300 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2301 break;
2302 case 0x4400:
2303 /* WMB */
2304 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2305 break;
2306 case 0x8000:
2307 /* FETCH */
2308 /* No-op */
2309 break;
2310 case 0xA000:
2311 /* FETCH_M */
2312 /* No-op */
2313 break;
2314 case 0xC000:
2315 /* RPCC */
2316 va = dest_gpr(ctx, ra);
2317 if (translator_io_start(&ctx->base)) {
2318 ret = DISAS_PC_STALE;
2319 }
2320 gen_helper_load_pcc(va, tcg_env);
2321 break;
2322 case 0xE000:
2323 /* RC */
2324 gen_rx(ctx, ra, 0);
2325 break;
2326 case 0xE800:
2327 /* ECB */
2328 break;
2329 case 0xF000:
2330 /* RS */
2331 gen_rx(ctx, ra, 1);
2332 break;
2333 case 0xF800:
2334 /* WH64 */
2335 /* No-op */
2336 break;
2337 case 0xFC00:
2338 /* WH64EN */
2339 /* No-op */
2340 break;
2341 default:
2342 goto invalid_opc;
2343 }
2344 break;
2345
2346 case 0x19:
2347 /* HW_MFPR (PALcode) */
2348 #ifndef CONFIG_USER_ONLY
2349 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2350 va = dest_gpr(ctx, ra);
2351 ret = gen_mfpr(ctx, va, insn & 0xffff);
2352 break;
2353 #else
2354 goto invalid_opc;
2355 #endif
2356
2357 case 0x1A:
2358 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2359 prediction stack action, which of course we don't implement. */
2360 vb = load_gpr(ctx, rb);
2361 if (ra != 31) {
2362 tmp = tcg_temp_new();
2363 tcg_gen_andi_i64(tmp, vb, ~3);
2364 gen_pc_disp(ctx, ctx->ir[ra], 0);
2365 tcg_gen_mov_i64(cpu_pc, tmp);
2366 } else {
2367 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2368 }
2369 ret = DISAS_PC_UPDATED;
2370 break;
2371
2372 case 0x1B:
2373 /* HW_LD (PALcode) */
2374 #ifndef CONFIG_USER_ONLY
2375 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2376 {
2377 TCGv addr = tcg_temp_new();
2378 vb = load_gpr(ctx, rb);
2379 va = dest_gpr(ctx, ra);
2380
2381 tcg_gen_addi_i64(addr, vb, disp12);
2382 switch ((insn >> 12) & 0xF) {
2383 case 0x0:
2384 /* Longword physical access (hw_ldl/p) */
2385 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2386 break;
2387 case 0x1:
2388 /* Quadword physical access (hw_ldq/p) */
2389 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2390 break;
2391 case 0x2:
2392 /* Longword physical access with lock (hw_ldl_l/p) */
2393 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2394 tcg_gen_mov_i64(cpu_lock_addr, addr);
2395 tcg_gen_mov_i64(cpu_lock_value, va);
2396 break;
2397 case 0x3:
2398 /* Quadword physical access with lock (hw_ldq_l/p) */
2399 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2400 tcg_gen_mov_i64(cpu_lock_addr, addr);
2401 tcg_gen_mov_i64(cpu_lock_value, va);
2402 break;
2403 case 0x4:
2404 /* Longword virtual PTE fetch (hw_ldl/v) */
2405 goto invalid_opc;
2406 case 0x5:
2407 /* Quadword virtual PTE fetch (hw_ldq/v) */
2408 goto invalid_opc;
2409 break;
2410 case 0x6:
2411 /* Invalid */
2412 goto invalid_opc;
2413 case 0x7:
2414 /* Invaliid */
2415 goto invalid_opc;
2416 case 0x8:
2417 /* Longword virtual access (hw_ldl) */
2418 goto invalid_opc;
2419 case 0x9:
2420 /* Quadword virtual access (hw_ldq) */
2421 goto invalid_opc;
2422 case 0xA:
2423 /* Longword virtual access with protection check (hw_ldl/w) */
2424 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2425 MO_LESL | MO_ALIGN);
2426 break;
2427 case 0xB:
2428 /* Quadword virtual access with protection check (hw_ldq/w) */
2429 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2430 MO_LEUQ | MO_ALIGN);
2431 break;
2432 case 0xC:
2433 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2434 goto invalid_opc;
2435 case 0xD:
2436 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2437 goto invalid_opc;
2438 case 0xE:
2439 /* Longword virtual access with alternate access mode and
2440 protection checks (hw_ldl/wa) */
2441 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2442 MO_LESL | MO_ALIGN);
2443 break;
2444 case 0xF:
2445 /* Quadword virtual access with alternate access mode and
2446 protection checks (hw_ldq/wa) */
2447 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2448 MO_LEUQ | MO_ALIGN);
2449 break;
2450 }
2451 break;
2452 }
2453 #else
2454 goto invalid_opc;
2455 #endif
2456
2457 case 0x1C:
2458 vc = dest_gpr(ctx, rc);
2459 if (fn7 == 0x70) {
2460 /* FTOIT */
2461 REQUIRE_AMASK(FIX);
2462 REQUIRE_REG_31(rb);
2463 va = load_fpr(ctx, ra);
2464 tcg_gen_mov_i64(vc, va);
2465 break;
2466 } else if (fn7 == 0x78) {
2467 /* FTOIS */
2468 REQUIRE_AMASK(FIX);
2469 REQUIRE_REG_31(rb);
2470 t32 = tcg_temp_new_i32();
2471 va = load_fpr(ctx, ra);
2472 gen_helper_s_to_memory(t32, va);
2473 tcg_gen_ext_i32_i64(vc, t32);
2474 break;
2475 }
2476
2477 vb = load_gpr_lit(ctx, rb, lit, islit);
2478 switch (fn7) {
2479 case 0x00:
2480 /* SEXTB */
2481 REQUIRE_AMASK(BWX);
2482 REQUIRE_REG_31(ra);
2483 tcg_gen_ext8s_i64(vc, vb);
2484 break;
2485 case 0x01:
2486 /* SEXTW */
2487 REQUIRE_AMASK(BWX);
2488 REQUIRE_REG_31(ra);
2489 tcg_gen_ext16s_i64(vc, vb);
2490 break;
2491 case 0x30:
2492 /* CTPOP */
2493 REQUIRE_AMASK(CIX);
2494 REQUIRE_REG_31(ra);
2495 REQUIRE_NO_LIT;
2496 tcg_gen_ctpop_i64(vc, vb);
2497 break;
2498 case 0x31:
2499 /* PERR */
2500 REQUIRE_AMASK(MVI);
2501 REQUIRE_NO_LIT;
2502 va = load_gpr(ctx, ra);
2503 gen_helper_perr(vc, va, vb);
2504 break;
2505 case 0x32:
2506 /* CTLZ */
2507 REQUIRE_AMASK(CIX);
2508 REQUIRE_REG_31(ra);
2509 REQUIRE_NO_LIT;
2510 tcg_gen_clzi_i64(vc, vb, 64);
2511 break;
2512 case 0x33:
2513 /* CTTZ */
2514 REQUIRE_AMASK(CIX);
2515 REQUIRE_REG_31(ra);
2516 REQUIRE_NO_LIT;
2517 tcg_gen_ctzi_i64(vc, vb, 64);
2518 break;
2519 case 0x34:
2520 /* UNPKBW */
2521 REQUIRE_AMASK(MVI);
2522 REQUIRE_REG_31(ra);
2523 REQUIRE_NO_LIT;
2524 gen_helper_unpkbw(vc, vb);
2525 break;
2526 case 0x35:
2527 /* UNPKBL */
2528 REQUIRE_AMASK(MVI);
2529 REQUIRE_REG_31(ra);
2530 REQUIRE_NO_LIT;
2531 gen_helper_unpkbl(vc, vb);
2532 break;
2533 case 0x36:
2534 /* PKWB */
2535 REQUIRE_AMASK(MVI);
2536 REQUIRE_REG_31(ra);
2537 REQUIRE_NO_LIT;
2538 gen_helper_pkwb(vc, vb);
2539 break;
2540 case 0x37:
2541 /* PKLB */
2542 REQUIRE_AMASK(MVI);
2543 REQUIRE_REG_31(ra);
2544 REQUIRE_NO_LIT;
2545 gen_helper_pklb(vc, vb);
2546 break;
2547 case 0x38:
2548 /* MINSB8 */
2549 REQUIRE_AMASK(MVI);
2550 va = load_gpr(ctx, ra);
2551 gen_helper_minsb8(vc, va, vb);
2552 break;
2553 case 0x39:
2554 /* MINSW4 */
2555 REQUIRE_AMASK(MVI);
2556 va = load_gpr(ctx, ra);
2557 gen_helper_minsw4(vc, va, vb);
2558 break;
2559 case 0x3A:
2560 /* MINUB8 */
2561 REQUIRE_AMASK(MVI);
2562 va = load_gpr(ctx, ra);
2563 gen_helper_minub8(vc, va, vb);
2564 break;
2565 case 0x3B:
2566 /* MINUW4 */
2567 REQUIRE_AMASK(MVI);
2568 va = load_gpr(ctx, ra);
2569 gen_helper_minuw4(vc, va, vb);
2570 break;
2571 case 0x3C:
2572 /* MAXUB8 */
2573 REQUIRE_AMASK(MVI);
2574 va = load_gpr(ctx, ra);
2575 gen_helper_maxub8(vc, va, vb);
2576 break;
2577 case 0x3D:
2578 /* MAXUW4 */
2579 REQUIRE_AMASK(MVI);
2580 va = load_gpr(ctx, ra);
2581 gen_helper_maxuw4(vc, va, vb);
2582 break;
2583 case 0x3E:
2584 /* MAXSB8 */
2585 REQUIRE_AMASK(MVI);
2586 va = load_gpr(ctx, ra);
2587 gen_helper_maxsb8(vc, va, vb);
2588 break;
2589 case 0x3F:
2590 /* MAXSW4 */
2591 REQUIRE_AMASK(MVI);
2592 va = load_gpr(ctx, ra);
2593 gen_helper_maxsw4(vc, va, vb);
2594 break;
2595 default:
2596 goto invalid_opc;
2597 }
2598 break;
2599
2600 case 0x1D:
2601 /* HW_MTPR (PALcode) */
2602 #ifndef CONFIG_USER_ONLY
2603 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2604 vb = load_gpr(ctx, rb);
2605 ret = gen_mtpr(ctx, vb, insn & 0xffff);
2606 break;
2607 #else
2608 goto invalid_opc;
2609 #endif
2610
2611 case 0x1E:
2612 /* HW_RET (PALcode) */
2613 #ifndef CONFIG_USER_ONLY
2614 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2615 if (rb == 31) {
2616 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2617 address from EXC_ADDR. This turns out to be useful for our
2618 emulation PALcode, so continue to accept it. */
2619 vb = dest_sink(ctx);
2620 tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr));
2621 } else {
2622 vb = load_gpr(ctx, rb);
2623 }
2624 tcg_gen_movi_i64(cpu_lock_addr, -1);
2625 st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2626 tmp = tcg_temp_new();
2627 tcg_gen_andi_i64(tmp, vb, 1);
2628 st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2629 tcg_gen_andi_i64(cpu_pc, vb, ~3);
2630 /* Allow interrupts to be recognized right away. */
2631 ret = DISAS_PC_UPDATED_NOCHAIN;
2632 break;
2633 #else
2634 goto invalid_opc;
2635 #endif
2636
2637 case 0x1F:
2638 /* HW_ST (PALcode) */
2639 #ifndef CONFIG_USER_ONLY
2640 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2641 {
2642 switch ((insn >> 12) & 0xF) {
2643 case 0x0:
2644 /* Longword physical access */
2645 va = load_gpr(ctx, ra);
2646 vb = load_gpr(ctx, rb);
2647 tmp = tcg_temp_new();
2648 tcg_gen_addi_i64(tmp, vb, disp12);
2649 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2650 break;
2651 case 0x1:
2652 /* Quadword physical access */
2653 va = load_gpr(ctx, ra);
2654 vb = load_gpr(ctx, rb);
2655 tmp = tcg_temp_new();
2656 tcg_gen_addi_i64(tmp, vb, disp12);
2657 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2658 break;
2659 case 0x2:
2660 /* Longword physical access with lock */
2661 ret = gen_store_conditional(ctx, ra, rb, disp12,
2662 MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2663 break;
2664 case 0x3:
2665 /* Quadword physical access with lock */
2666 ret = gen_store_conditional(ctx, ra, rb, disp12,
2667 MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2668 break;
2669 case 0x4:
2670 /* Longword virtual access */
2671 goto invalid_opc;
2672 case 0x5:
2673 /* Quadword virtual access */
2674 goto invalid_opc;
2675 case 0x6:
2676 /* Invalid */
2677 goto invalid_opc;
2678 case 0x7:
2679 /* Invalid */
2680 goto invalid_opc;
2681 case 0x8:
2682 /* Invalid */
2683 goto invalid_opc;
2684 case 0x9:
2685 /* Invalid */
2686 goto invalid_opc;
2687 case 0xA:
2688 /* Invalid */
2689 goto invalid_opc;
2690 case 0xB:
2691 /* Invalid */
2692 goto invalid_opc;
2693 case 0xC:
2694 /* Longword virtual access with alternate access mode */
2695 goto invalid_opc;
2696 case 0xD:
2697 /* Quadword virtual access with alternate access mode */
2698 goto invalid_opc;
2699 case 0xE:
2700 /* Invalid */
2701 goto invalid_opc;
2702 case 0xF:
2703 /* Invalid */
2704 goto invalid_opc;
2705 }
2706 break;
2707 }
2708 #else
2709 goto invalid_opc;
2710 #endif
2711 case 0x20:
2712 /* LDF */
2713 REQUIRE_FEN;
2714 gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
2715 break;
2716 case 0x21:
2717 /* LDG */
2718 REQUIRE_FEN;
2719 gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
2720 break;
2721 case 0x22:
2722 /* LDS */
2723 REQUIRE_FEN;
2724 gen_load_fp(ctx, ra, rb, disp16, gen_lds);
2725 break;
2726 case 0x23:
2727 /* LDT */
2728 REQUIRE_FEN;
2729 gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
2730 break;
2731 case 0x24:
2732 /* STF */
2733 REQUIRE_FEN;
2734 gen_store_fp(ctx, ra, rb, disp16, gen_stf);
2735 break;
2736 case 0x25:
2737 /* STG */
2738 REQUIRE_FEN;
2739 gen_store_fp(ctx, ra, rb, disp16, gen_stg);
2740 break;
2741 case 0x26:
2742 /* STS */
2743 REQUIRE_FEN;
2744 gen_store_fp(ctx, ra, rb, disp16, gen_sts);
2745 break;
2746 case 0x27:
2747 /* STT */
2748 REQUIRE_FEN;
2749 gen_store_fp(ctx, ra, rb, disp16, gen_stt);
2750 break;
2751 case 0x28:
2752 /* LDL */
2753 gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
2754 break;
2755 case 0x29:
2756 /* LDQ */
2757 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
2758 break;
2759 case 0x2A:
2760 /* LDL_L */
2761 gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
2762 break;
2763 case 0x2B:
2764 /* LDQ_L */
2765 gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
2766 break;
2767 case 0x2C:
2768 /* STL */
2769 gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
2770 break;
2771 case 0x2D:
2772 /* STQ */
2773 gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
2774 break;
2775 case 0x2E:
2776 /* STL_C */
2777 ret = gen_store_conditional(ctx, ra, rb, disp16,
2778 ctx->mem_idx, MO_LESL | MO_ALIGN);
2779 break;
2780 case 0x2F:
2781 /* STQ_C */
2782 ret = gen_store_conditional(ctx, ra, rb, disp16,
2783 ctx->mem_idx, MO_LEUQ | MO_ALIGN);
2784 break;
2785 case 0x30:
2786 /* BR */
2787 ret = gen_bdirect(ctx, ra, disp21);
2788 break;
2789 case 0x31: /* FBEQ */
2790 REQUIRE_FEN;
2791 ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2792 break;
2793 case 0x32: /* FBLT */
2794 REQUIRE_FEN;
2795 ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2796 break;
2797 case 0x33: /* FBLE */
2798 REQUIRE_FEN;
2799 ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2800 break;
2801 case 0x34:
2802 /* BSR */
2803 ret = gen_bdirect(ctx, ra, disp21);
2804 break;
2805 case 0x35: /* FBNE */
2806 REQUIRE_FEN;
2807 ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2808 break;
2809 case 0x36: /* FBGE */
2810 REQUIRE_FEN;
2811 ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2812 break;
2813 case 0x37: /* FBGT */
2814 REQUIRE_FEN;
2815 ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2816 break;
2817 case 0x38:
2818 /* BLBC */
2819 ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
2820 break;
2821 case 0x39:
2822 /* BEQ */
2823 ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
2824 break;
2825 case 0x3A:
2826 /* BLT */
2827 ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
2828 break;
2829 case 0x3B:
2830 /* BLE */
2831 ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
2832 break;
2833 case 0x3C:
2834 /* BLBS */
2835 ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
2836 break;
2837 case 0x3D:
2838 /* BNE */
2839 ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
2840 break;
2841 case 0x3E:
2842 /* BGE */
2843 ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
2844 break;
2845 case 0x3F:
2846 /* BGT */
2847 ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
2848 break;
2849 invalid_opc:
2850 ret = gen_invalid(ctx);
2851 break;
2852 raise_fen:
2853 ret = gen_excp(ctx, EXCP_FEN, 0);
2854 break;
2855 }
2856
2857 return ret;
2858 }
2859
alpha_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)2860 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2861 {
2862 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2863 CPUAlphaState *env = cpu_env(cpu);
2864 int64_t bound;
2865
2866 ctx->tbflags = ctx->base.tb->flags;
2867 ctx->mem_idx = alpha_env_mmu_index(env);
2868 ctx->pcrel = ctx->base.tb->cflags & CF_PCREL;
2869 ctx->implver = env->implver;
2870 ctx->amask = env->amask;
2871
2872 #ifdef CONFIG_USER_ONLY
2873 ctx->ir = cpu_std_ir;
2874 ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
2875 #else
2876 ctx->palbr = env->palbr;
2877 ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2878 #endif
2879
2880 /* ??? Every TB begins with unset rounding mode, to be initialized on
2881 the first fp insn of the TB. Alternately we could define a proper
2882 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2883 to reset the FP_STATUS to that default at the end of any TB that
2884 changes the default. We could even (gasp) dynamically figure out
2885 what default would be most efficient given the running program. */
2886 ctx->tb_rm = -1;
2887 /* Similarly for flush-to-zero. */
2888 ctx->tb_ftz = -1;
2889
2890 ctx->zero = NULL;
2891 ctx->sink = NULL;
2892
2893 /* Bound the number of insns to execute to those left on the page. */
2894 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2895 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2896 }
2897
alpha_tr_tb_start(DisasContextBase * db,CPUState * cpu)2898 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2899 {
2900 }
2901
alpha_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)2902 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2903 {
2904 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2905
2906 if (ctx->pcrel) {
2907 tcg_gen_insn_start(dcbase->pc_next & ~TARGET_PAGE_MASK);
2908 } else {
2909 tcg_gen_insn_start(dcbase->pc_next);
2910 }
2911 }
2912
alpha_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)2913 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2914 {
2915 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2916 uint32_t insn = translator_ldl(cpu_env(cpu), &ctx->base,
2917 ctx->base.pc_next);
2918
2919 ctx->base.pc_next += 4;
2920 ctx->base.is_jmp = translate_one(ctx, insn);
2921
2922 free_context_temps(ctx);
2923 }
2924
alpha_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)2925 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2926 {
2927 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2928
2929 switch (ctx->base.is_jmp) {
2930 case DISAS_NORETURN:
2931 break;
2932 case DISAS_TOO_MANY:
2933 gen_goto_tb(ctx, 0, 0);
2934 break;
2935 case DISAS_PC_STALE:
2936 gen_pc_disp(ctx, cpu_pc, 0);
2937 /* FALLTHRU */
2938 case DISAS_PC_UPDATED:
2939 tcg_gen_lookup_and_goto_ptr();
2940 break;
2941 case DISAS_PC_UPDATED_NOCHAIN:
2942 tcg_gen_exit_tb(NULL, 0);
2943 break;
2944 default:
2945 g_assert_not_reached();
2946 }
2947 }
2948
2949 static const TranslatorOps alpha_tr_ops = {
2950 .init_disas_context = alpha_tr_init_disas_context,
2951 .tb_start = alpha_tr_tb_start,
2952 .insn_start = alpha_tr_insn_start,
2953 .translate_insn = alpha_tr_translate_insn,
2954 .tb_stop = alpha_tr_tb_stop,
2955 };
2956
gen_intermediate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)2957 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
2958 vaddr pc, void *host_pc)
2959 {
2960 DisasContext dc;
2961 translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
2962 }
2963