xref: /openbmc/qemu/target/alpha/translate.c (revision d53106c997e5c8e61e37ae9ff9f0e1f243b03968)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 #undef ALPHA_DEBUG_DISAS
38 #define CONFIG_SOFTFLOAT_INLINE
39 
40 #ifdef ALPHA_DEBUG_DISAS
41 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 #  define LOG_DISAS(...) do { } while (0)
44 #endif
45 
46 typedef struct DisasContext DisasContext;
47 struct DisasContext {
48     DisasContextBase base;
49 
50 #ifdef CONFIG_USER_ONLY
51     MemOp unalign;
52 #else
53     uint64_t palbr;
54 #endif
55     uint32_t tbflags;
56     int mem_idx;
57 
58     /* implver and amask values for this CPU.  */
59     int implver;
60     int amask;
61 
62     /* Current rounding mode for this TB.  */
63     int tb_rm;
64     /* Current flush-to-zero setting for this TB.  */
65     int tb_ftz;
66 
67     /* The set of registers active in the current context.  */
68     TCGv *ir;
69 
70     /* Temporaries for $31 and $f31 as source and destination.  */
71     TCGv zero;
72     TCGv sink;
73 };
74 
75 #ifdef CONFIG_USER_ONLY
76 #define UNALIGN(C)  (C)->unalign
77 #else
78 #define UNALIGN(C)  MO_ALIGN
79 #endif
80 
81 /* Target-specific return values from translate_one, indicating the
82    state of the TB.  Note that DISAS_NEXT indicates that we are not
83    exiting the TB.  */
84 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
85 #define DISAS_PC_UPDATED          DISAS_TARGET_1
86 #define DISAS_PC_STALE            DISAS_TARGET_2
87 
88 /* global register indexes */
89 static TCGv cpu_std_ir[31];
90 static TCGv cpu_fir[31];
91 static TCGv cpu_pc;
92 static TCGv cpu_lock_addr;
93 static TCGv cpu_lock_value;
94 
95 #ifndef CONFIG_USER_ONLY
96 static TCGv cpu_pal_ir[31];
97 #endif
98 
99 #include "exec/gen-icount.h"
100 
101 void alpha_translate_init(void)
102 {
103 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
104 
105     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
106     static const GlobalVar vars[] = {
107         DEF_VAR(pc),
108         DEF_VAR(lock_addr),
109         DEF_VAR(lock_value),
110     };
111 
112 #undef DEF_VAR
113 
114     /* Use the symbolic register names that match the disassembler.  */
115     static const char greg_names[31][4] = {
116         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
117         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
118         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
119         "t10", "t11", "ra", "t12", "at", "gp", "sp"
120     };
121     static const char freg_names[31][4] = {
122         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
123         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
124         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
125         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
126     };
127 #ifndef CONFIG_USER_ONLY
128     static const char shadow_names[8][8] = {
129         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
130         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
131     };
132 #endif
133 
134     int i;
135 
136     for (i = 0; i < 31; i++) {
137         cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
138                                                offsetof(CPUAlphaState, ir[i]),
139                                                greg_names[i]);
140     }
141 
142     for (i = 0; i < 31; i++) {
143         cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
144                                             offsetof(CPUAlphaState, fir[i]),
145                                             freg_names[i]);
146     }
147 
148 #ifndef CONFIG_USER_ONLY
149     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
150     for (i = 0; i < 8; i++) {
151         int r = (i == 7 ? 25 : i + 8);
152         cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
153                                                offsetof(CPUAlphaState,
154                                                         shadow[i]),
155                                                shadow_names[i]);
156     }
157 #endif
158 
159     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
160         const GlobalVar *v = &vars[i];
161         *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
162     }
163 }
164 
165 static TCGv load_zero(DisasContext *ctx)
166 {
167     if (!ctx->zero) {
168         ctx->zero = tcg_constant_i64(0);
169     }
170     return ctx->zero;
171 }
172 
173 static TCGv dest_sink(DisasContext *ctx)
174 {
175     if (!ctx->sink) {
176         ctx->sink = tcg_temp_new();
177     }
178     return ctx->sink;
179 }
180 
181 static void free_context_temps(DisasContext *ctx)
182 {
183     if (ctx->sink) {
184         tcg_gen_discard_i64(ctx->sink);
185         ctx->sink = NULL;
186     }
187 }
188 
189 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
190 {
191     if (likely(reg < 31)) {
192         return ctx->ir[reg];
193     } else {
194         return load_zero(ctx);
195     }
196 }
197 
198 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
199                          uint8_t lit, bool islit)
200 {
201     if (islit) {
202         return tcg_constant_i64(lit);
203     } else if (likely(reg < 31)) {
204         return ctx->ir[reg];
205     } else {
206         return load_zero(ctx);
207     }
208 }
209 
210 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
211 {
212     if (likely(reg < 31)) {
213         return ctx->ir[reg];
214     } else {
215         return dest_sink(ctx);
216     }
217 }
218 
219 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
220 {
221     if (likely(reg < 31)) {
222         return cpu_fir[reg];
223     } else {
224         return load_zero(ctx);
225     }
226 }
227 
228 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
229 {
230     if (likely(reg < 31)) {
231         return cpu_fir[reg];
232     } else {
233         return dest_sink(ctx);
234     }
235 }
236 
237 static int get_flag_ofs(unsigned shift)
238 {
239     int ofs = offsetof(CPUAlphaState, flags);
240 #if HOST_BIG_ENDIAN
241     ofs += 3 - (shift / 8);
242 #else
243     ofs += shift / 8;
244 #endif
245     return ofs;
246 }
247 
248 static void ld_flag_byte(TCGv val, unsigned shift)
249 {
250     tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
251 }
252 
253 static void st_flag_byte(TCGv val, unsigned shift)
254 {
255     tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
256 }
257 
258 static void gen_excp_1(int exception, int error_code)
259 {
260     TCGv_i32 tmp1, tmp2;
261 
262     tmp1 = tcg_constant_i32(exception);
263     tmp2 = tcg_constant_i32(error_code);
264     gen_helper_excp(cpu_env, tmp1, tmp2);
265 }
266 
267 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
268 {
269     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
270     gen_excp_1(exception, error_code);
271     return DISAS_NORETURN;
272 }
273 
274 static inline DisasJumpType gen_invalid(DisasContext *ctx)
275 {
276     return gen_excp(ctx, EXCP_OPCDEC, 0);
277 }
278 
279 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
280 {
281     TCGv_i32 tmp32 = tcg_temp_new_i32();
282     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
283     gen_helper_memory_to_f(dest, tmp32);
284 }
285 
286 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
287 {
288     TCGv tmp = tcg_temp_new();
289     tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
290     gen_helper_memory_to_g(dest, tmp);
291 }
292 
293 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
294 {
295     TCGv_i32 tmp32 = tcg_temp_new_i32();
296     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
297     gen_helper_memory_to_s(dest, tmp32);
298 }
299 
300 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
301 {
302     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
303 }
304 
305 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
306                         void (*func)(DisasContext *, TCGv, TCGv))
307 {
308     /* Loads to $f31 are prefetches, which we can treat as nops. */
309     if (likely(ra != 31)) {
310         TCGv addr = tcg_temp_new();
311         tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
312         func(ctx, cpu_fir[ra], addr);
313     }
314 }
315 
316 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
317                          MemOp op, bool clear, bool locked)
318 {
319     TCGv addr, dest;
320 
321     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
322        prefetches, which we can treat as nops.  No worries about
323        missed exceptions here.  */
324     if (unlikely(ra == 31)) {
325         return;
326     }
327 
328     addr = tcg_temp_new();
329     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
330     if (clear) {
331         tcg_gen_andi_i64(addr, addr, ~0x7);
332     } else if (!locked) {
333         op |= UNALIGN(ctx);
334     }
335 
336     dest = ctx->ir[ra];
337     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
338 
339     if (locked) {
340         tcg_gen_mov_i64(cpu_lock_addr, addr);
341         tcg_gen_mov_i64(cpu_lock_value, dest);
342     }
343 }
344 
345 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
346 {
347     TCGv_i32 tmp32 = tcg_temp_new_i32();
348     gen_helper_f_to_memory(tmp32, addr);
349     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
350 }
351 
352 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
353 {
354     TCGv tmp = tcg_temp_new();
355     gen_helper_g_to_memory(tmp, src);
356     tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
357 }
358 
359 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
360 {
361     TCGv_i32 tmp32 = tcg_temp_new_i32();
362     gen_helper_s_to_memory(tmp32, src);
363     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
364 }
365 
366 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
367 {
368     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
369 }
370 
371 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
372                          void (*func)(DisasContext *, TCGv, TCGv))
373 {
374     TCGv addr = tcg_temp_new();
375     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
376     func(ctx, load_fpr(ctx, ra), addr);
377 }
378 
379 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
380                           MemOp op, bool clear)
381 {
382     TCGv addr, src;
383 
384     addr = tcg_temp_new();
385     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
386     if (clear) {
387         tcg_gen_andi_i64(addr, addr, ~0x7);
388     } else {
389         op |= UNALIGN(ctx);
390     }
391 
392     src = load_gpr(ctx, ra);
393     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
394 }
395 
396 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
397                                            int32_t disp16, int mem_idx,
398                                            MemOp op)
399 {
400     TCGLabel *lab_fail, *lab_done;
401     TCGv addr, val;
402 
403     addr = tcg_temp_new_i64();
404     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
405     free_context_temps(ctx);
406 
407     lab_fail = gen_new_label();
408     lab_done = gen_new_label();
409     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
410 
411     val = tcg_temp_new_i64();
412     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
413                                load_gpr(ctx, ra), mem_idx, op);
414     free_context_temps(ctx);
415 
416     if (ra != 31) {
417         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
418     }
419     tcg_gen_br(lab_done);
420 
421     gen_set_label(lab_fail);
422     if (ra != 31) {
423         tcg_gen_movi_i64(ctx->ir[ra], 0);
424     }
425 
426     gen_set_label(lab_done);
427     tcg_gen_movi_i64(cpu_lock_addr, -1);
428     return DISAS_NEXT;
429 }
430 
431 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
432 {
433     return translator_use_goto_tb(&ctx->base, dest);
434 }
435 
436 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
437 {
438     uint64_t dest = ctx->base.pc_next + (disp << 2);
439 
440     if (ra != 31) {
441         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
442     }
443 
444     /* Notice branch-to-next; used to initialize RA with the PC.  */
445     if (disp == 0) {
446         return 0;
447     } else if (use_goto_tb(ctx, dest)) {
448         tcg_gen_goto_tb(0);
449         tcg_gen_movi_i64(cpu_pc, dest);
450         tcg_gen_exit_tb(ctx->base.tb, 0);
451         return DISAS_NORETURN;
452     } else {
453         tcg_gen_movi_i64(cpu_pc, dest);
454         return DISAS_PC_UPDATED;
455     }
456 }
457 
458 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
459                                         TCGv cmp, int32_t disp)
460 {
461     uint64_t dest = ctx->base.pc_next + (disp << 2);
462     TCGLabel *lab_true = gen_new_label();
463 
464     if (use_goto_tb(ctx, dest)) {
465         tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
466 
467         tcg_gen_goto_tb(0);
468         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
469         tcg_gen_exit_tb(ctx->base.tb, 0);
470 
471         gen_set_label(lab_true);
472         tcg_gen_goto_tb(1);
473         tcg_gen_movi_i64(cpu_pc, dest);
474         tcg_gen_exit_tb(ctx->base.tb, 1);
475 
476         return DISAS_NORETURN;
477     } else {
478         TCGv_i64 z = load_zero(ctx);
479         TCGv_i64 d = tcg_constant_i64(dest);
480         TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
481 
482         tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
483         return DISAS_PC_UPDATED;
484     }
485 }
486 
487 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
488                                int32_t disp, int mask)
489 {
490     if (mask) {
491         TCGv tmp = tcg_temp_new();
492         DisasJumpType ret;
493 
494         tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
495         ret = gen_bcond_internal(ctx, cond, tmp, disp);
496         return ret;
497     }
498     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
499 }
500 
501 /* Fold -0.0 for comparison with COND.  */
502 
503 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
504 {
505     uint64_t mzero = 1ull << 63;
506 
507     switch (cond) {
508     case TCG_COND_LE:
509     case TCG_COND_GT:
510         /* For <= or >, the -0.0 value directly compares the way we want.  */
511         tcg_gen_mov_i64(dest, src);
512         break;
513 
514     case TCG_COND_EQ:
515     case TCG_COND_NE:
516         /* For == or !=, we can simply mask off the sign bit and compare.  */
517         tcg_gen_andi_i64(dest, src, mzero - 1);
518         break;
519 
520     case TCG_COND_GE:
521     case TCG_COND_LT:
522         /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
523         tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
524         tcg_gen_neg_i64(dest, dest);
525         tcg_gen_and_i64(dest, dest, src);
526         break;
527 
528     default:
529         abort();
530     }
531 }
532 
533 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
534                                 int32_t disp)
535 {
536     TCGv cmp_tmp = tcg_temp_new();
537     DisasJumpType ret;
538 
539     gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
540     ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
541     return ret;
542 }
543 
544 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
545 {
546     TCGv_i64 va, vb, z;
547 
548     z = load_zero(ctx);
549     vb = load_fpr(ctx, rb);
550     va = tcg_temp_new();
551     gen_fold_mzero(cond, va, load_fpr(ctx, ra));
552 
553     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
554 }
555 
556 #define QUAL_RM_N       0x080   /* Round mode nearest even */
557 #define QUAL_RM_C       0x000   /* Round mode chopped */
558 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
559 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
560 #define QUAL_RM_MASK    0x0c0
561 
562 #define QUAL_U          0x100   /* Underflow enable (fp output) */
563 #define QUAL_V          0x100   /* Overflow enable (int output) */
564 #define QUAL_S          0x400   /* Software completion enable */
565 #define QUAL_I          0x200   /* Inexact detection enable */
566 
567 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
568 {
569     TCGv_i32 tmp;
570 
571     fn11 &= QUAL_RM_MASK;
572     if (fn11 == ctx->tb_rm) {
573         return;
574     }
575     ctx->tb_rm = fn11;
576 
577     tmp = tcg_temp_new_i32();
578     switch (fn11) {
579     case QUAL_RM_N:
580         tcg_gen_movi_i32(tmp, float_round_nearest_even);
581         break;
582     case QUAL_RM_C:
583         tcg_gen_movi_i32(tmp, float_round_to_zero);
584         break;
585     case QUAL_RM_M:
586         tcg_gen_movi_i32(tmp, float_round_down);
587         break;
588     case QUAL_RM_D:
589         tcg_gen_ld8u_i32(tmp, cpu_env,
590                          offsetof(CPUAlphaState, fpcr_dyn_round));
591         break;
592     }
593 
594 #if defined(CONFIG_SOFTFLOAT_INLINE)
595     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
596        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
597        sets the one field.  */
598     tcg_gen_st8_i32(tmp, cpu_env,
599                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
600 #else
601     gen_helper_setroundmode(tmp);
602 #endif
603 }
604 
605 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
606 {
607     TCGv_i32 tmp;
608 
609     fn11 &= QUAL_U;
610     if (fn11 == ctx->tb_ftz) {
611         return;
612     }
613     ctx->tb_ftz = fn11;
614 
615     tmp = tcg_temp_new_i32();
616     if (fn11) {
617         /* Underflow is enabled, use the FPCR setting.  */
618         tcg_gen_ld8u_i32(tmp, cpu_env,
619                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
620     } else {
621         /* Underflow is disabled, force flush-to-zero.  */
622         tcg_gen_movi_i32(tmp, 1);
623     }
624 
625 #if defined(CONFIG_SOFTFLOAT_INLINE)
626     tcg_gen_st8_i32(tmp, cpu_env,
627                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
628 #else
629     gen_helper_setflushzero(tmp);
630 #endif
631 }
632 
633 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
634 {
635     TCGv val;
636 
637     if (unlikely(reg == 31)) {
638         val = load_zero(ctx);
639     } else {
640         val = cpu_fir[reg];
641         if ((fn11 & QUAL_S) == 0) {
642             if (is_cmp) {
643                 gen_helper_ieee_input_cmp(cpu_env, val);
644             } else {
645                 gen_helper_ieee_input(cpu_env, val);
646             }
647         } else {
648 #ifndef CONFIG_USER_ONLY
649             /* In system mode, raise exceptions for denormals like real
650                hardware.  In user mode, proceed as if the OS completion
651                handler is handling the denormal as per spec.  */
652             gen_helper_ieee_input_s(cpu_env, val);
653 #endif
654         }
655     }
656     return val;
657 }
658 
659 static void gen_fp_exc_raise(int rc, int fn11)
660 {
661     /* ??? We ought to be able to do something with imprecise exceptions.
662        E.g. notice we're still in the trap shadow of something within the
663        TB and do not generate the code to signal the exception; end the TB
664        when an exception is forced to arrive, either by consumption of a
665        register value or TRAPB or EXCB.  */
666     TCGv_i32 reg, ign;
667     uint32_t ignore = 0;
668 
669     if (!(fn11 & QUAL_U)) {
670         /* Note that QUAL_U == QUAL_V, so ignore either.  */
671         ignore |= FPCR_UNF | FPCR_IOV;
672     }
673     if (!(fn11 & QUAL_I)) {
674         ignore |= FPCR_INE;
675     }
676     ign = tcg_constant_i32(ignore);
677 
678     /* ??? Pass in the regno of the destination so that the helper can
679        set EXC_MASK, which contains a bitmask of destination registers
680        that have caused arithmetic traps.  A simple userspace emulation
681        does not require this.  We do need it for a guest kernel's entArith,
682        or if we were to do something clever with imprecise exceptions.  */
683     reg = tcg_constant_i32(rc + 32);
684     if (fn11 & QUAL_S) {
685         gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
686     } else {
687         gen_helper_fp_exc_raise(cpu_env, ign, reg);
688     }
689 }
690 
691 static void gen_cvtlq(TCGv vc, TCGv vb)
692 {
693     TCGv tmp = tcg_temp_new();
694 
695     /* The arithmetic right shift here, plus the sign-extended mask below
696        yields a sign-extended result without an explicit ext32s_i64.  */
697     tcg_gen_shri_i64(tmp, vb, 29);
698     tcg_gen_sari_i64(vc, vb, 32);
699     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
700 }
701 
702 static void gen_ieee_arith2(DisasContext *ctx,
703                             void (*helper)(TCGv, TCGv_ptr, TCGv),
704                             int rb, int rc, int fn11)
705 {
706     TCGv vb;
707 
708     gen_qual_roundmode(ctx, fn11);
709     gen_qual_flushzero(ctx, fn11);
710 
711     vb = gen_ieee_input(ctx, rb, fn11, 0);
712     helper(dest_fpr(ctx, rc), cpu_env, vb);
713 
714     gen_fp_exc_raise(rc, fn11);
715 }
716 
717 #define IEEE_ARITH2(name)                                       \
718 static inline void glue(gen_, name)(DisasContext *ctx,          \
719                                     int rb, int rc, int fn11)   \
720 {                                                               \
721     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
722 }
723 IEEE_ARITH2(sqrts)
724 IEEE_ARITH2(sqrtt)
725 IEEE_ARITH2(cvtst)
726 IEEE_ARITH2(cvtts)
727 
728 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
729 {
730     TCGv vb, vc;
731 
732     /* No need to set flushzero, since we have an integer output.  */
733     vb = gen_ieee_input(ctx, rb, fn11, 0);
734     vc = dest_fpr(ctx, rc);
735 
736     /* Almost all integer conversions use cropped rounding;
737        special case that.  */
738     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
739         gen_helper_cvttq_c(vc, cpu_env, vb);
740     } else {
741         gen_qual_roundmode(ctx, fn11);
742         gen_helper_cvttq(vc, cpu_env, vb);
743     }
744     gen_fp_exc_raise(rc, fn11);
745 }
746 
747 static void gen_ieee_intcvt(DisasContext *ctx,
748                             void (*helper)(TCGv, TCGv_ptr, TCGv),
749                             int rb, int rc, int fn11)
750 {
751     TCGv vb, vc;
752 
753     gen_qual_roundmode(ctx, fn11);
754     vb = load_fpr(ctx, rb);
755     vc = dest_fpr(ctx, rc);
756 
757     /* The only exception that can be raised by integer conversion
758        is inexact.  Thus we only need to worry about exceptions when
759        inexact handling is requested.  */
760     if (fn11 & QUAL_I) {
761         helper(vc, cpu_env, vb);
762         gen_fp_exc_raise(rc, fn11);
763     } else {
764         helper(vc, cpu_env, vb);
765     }
766 }
767 
768 #define IEEE_INTCVT(name)                                       \
769 static inline void glue(gen_, name)(DisasContext *ctx,          \
770                                     int rb, int rc, int fn11)   \
771 {                                                               \
772     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
773 }
774 IEEE_INTCVT(cvtqs)
775 IEEE_INTCVT(cvtqt)
776 
777 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
778 {
779     TCGv vmask = tcg_constant_i64(mask);
780     TCGv tmp = tcg_temp_new_i64();
781 
782     if (inv_a) {
783         tcg_gen_andc_i64(tmp, vmask, va);
784     } else {
785         tcg_gen_and_i64(tmp, va, vmask);
786     }
787 
788     tcg_gen_andc_i64(vc, vb, vmask);
789     tcg_gen_or_i64(vc, vc, tmp);
790 }
791 
792 static void gen_ieee_arith3(DisasContext *ctx,
793                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
794                             int ra, int rb, int rc, int fn11)
795 {
796     TCGv va, vb, vc;
797 
798     gen_qual_roundmode(ctx, fn11);
799     gen_qual_flushzero(ctx, fn11);
800 
801     va = gen_ieee_input(ctx, ra, fn11, 0);
802     vb = gen_ieee_input(ctx, rb, fn11, 0);
803     vc = dest_fpr(ctx, rc);
804     helper(vc, cpu_env, va, vb);
805 
806     gen_fp_exc_raise(rc, fn11);
807 }
808 
809 #define IEEE_ARITH3(name)                                               \
810 static inline void glue(gen_, name)(DisasContext *ctx,                  \
811                                     int ra, int rb, int rc, int fn11)   \
812 {                                                                       \
813     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
814 }
815 IEEE_ARITH3(adds)
816 IEEE_ARITH3(subs)
817 IEEE_ARITH3(muls)
818 IEEE_ARITH3(divs)
819 IEEE_ARITH3(addt)
820 IEEE_ARITH3(subt)
821 IEEE_ARITH3(mult)
822 IEEE_ARITH3(divt)
823 
824 static void gen_ieee_compare(DisasContext *ctx,
825                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
826                              int ra, int rb, int rc, int fn11)
827 {
828     TCGv va, vb, vc;
829 
830     va = gen_ieee_input(ctx, ra, fn11, 1);
831     vb = gen_ieee_input(ctx, rb, fn11, 1);
832     vc = dest_fpr(ctx, rc);
833     helper(vc, cpu_env, va, vb);
834 
835     gen_fp_exc_raise(rc, fn11);
836 }
837 
838 #define IEEE_CMP3(name)                                                 \
839 static inline void glue(gen_, name)(DisasContext *ctx,                  \
840                                     int ra, int rb, int rc, int fn11)   \
841 {                                                                       \
842     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
843 }
844 IEEE_CMP3(cmptun)
845 IEEE_CMP3(cmpteq)
846 IEEE_CMP3(cmptlt)
847 IEEE_CMP3(cmptle)
848 
849 static inline uint64_t zapnot_mask(uint8_t lit)
850 {
851     uint64_t mask = 0;
852     int i;
853 
854     for (i = 0; i < 8; ++i) {
855         if ((lit >> i) & 1) {
856             mask |= 0xffull << (i * 8);
857         }
858     }
859     return mask;
860 }
861 
862 /* Implement zapnot with an immediate operand, which expands to some
863    form of immediate AND.  This is a basic building block in the
864    definition of many of the other byte manipulation instructions.  */
865 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
866 {
867     switch (lit) {
868     case 0x00:
869         tcg_gen_movi_i64(dest, 0);
870         break;
871     case 0x01:
872         tcg_gen_ext8u_i64(dest, src);
873         break;
874     case 0x03:
875         tcg_gen_ext16u_i64(dest, src);
876         break;
877     case 0x0f:
878         tcg_gen_ext32u_i64(dest, src);
879         break;
880     case 0xff:
881         tcg_gen_mov_i64(dest, src);
882         break;
883     default:
884         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
885         break;
886     }
887 }
888 
889 /* EXTWH, EXTLH, EXTQH */
890 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
891                       uint8_t lit, uint8_t byte_mask)
892 {
893     if (islit) {
894         int pos = (64 - lit * 8) & 0x3f;
895         int len = cto32(byte_mask) * 8;
896         if (pos < len) {
897             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
898         } else {
899             tcg_gen_movi_i64(vc, 0);
900         }
901     } else {
902         TCGv tmp = tcg_temp_new();
903         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
904         tcg_gen_neg_i64(tmp, tmp);
905         tcg_gen_andi_i64(tmp, tmp, 0x3f);
906         tcg_gen_shl_i64(vc, va, tmp);
907     }
908     gen_zapnoti(vc, vc, byte_mask);
909 }
910 
911 /* EXTBL, EXTWL, EXTLL, EXTQL */
912 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
913                       uint8_t lit, uint8_t byte_mask)
914 {
915     if (islit) {
916         int pos = (lit & 7) * 8;
917         int len = cto32(byte_mask) * 8;
918         if (pos + len >= 64) {
919             len = 64 - pos;
920         }
921         tcg_gen_extract_i64(vc, va, pos, len);
922     } else {
923         TCGv tmp = tcg_temp_new();
924         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
925         tcg_gen_shli_i64(tmp, tmp, 3);
926         tcg_gen_shr_i64(vc, va, tmp);
927         gen_zapnoti(vc, vc, byte_mask);
928     }
929 }
930 
931 /* INSWH, INSLH, INSQH */
932 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
933                       uint8_t lit, uint8_t byte_mask)
934 {
935     if (islit) {
936         int pos = 64 - (lit & 7) * 8;
937         int len = cto32(byte_mask) * 8;
938         if (pos < len) {
939             tcg_gen_extract_i64(vc, va, pos, len - pos);
940         } else {
941             tcg_gen_movi_i64(vc, 0);
942         }
943     } else {
944         TCGv tmp = tcg_temp_new();
945         TCGv shift = tcg_temp_new();
946 
947         /* The instruction description has us left-shift the byte mask
948            and extract bits <15:8> and apply that zap at the end.  This
949            is equivalent to simply performing the zap first and shifting
950            afterward.  */
951         gen_zapnoti(tmp, va, byte_mask);
952 
953         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
954            portably by splitting the shift into two parts: shift_count-1 and 1.
955            Arrange for the -1 by using ones-complement instead of
956            twos-complement in the negation: ~(B * 8) & 63.  */
957 
958         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
959         tcg_gen_not_i64(shift, shift);
960         tcg_gen_andi_i64(shift, shift, 0x3f);
961 
962         tcg_gen_shr_i64(vc, tmp, shift);
963         tcg_gen_shri_i64(vc, vc, 1);
964     }
965 }
966 
967 /* INSBL, INSWL, INSLL, INSQL */
968 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
969                       uint8_t lit, uint8_t byte_mask)
970 {
971     if (islit) {
972         int pos = (lit & 7) * 8;
973         int len = cto32(byte_mask) * 8;
974         if (pos + len > 64) {
975             len = 64 - pos;
976         }
977         tcg_gen_deposit_z_i64(vc, va, pos, len);
978     } else {
979         TCGv tmp = tcg_temp_new();
980         TCGv shift = tcg_temp_new();
981 
982         /* The instruction description has us left-shift the byte mask
983            and extract bits <15:8> and apply that zap at the end.  This
984            is equivalent to simply performing the zap first and shifting
985            afterward.  */
986         gen_zapnoti(tmp, va, byte_mask);
987 
988         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
989         tcg_gen_shli_i64(shift, shift, 3);
990         tcg_gen_shl_i64(vc, tmp, shift);
991     }
992 }
993 
994 /* MSKWH, MSKLH, MSKQH */
995 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
996                       uint8_t lit, uint8_t byte_mask)
997 {
998     if (islit) {
999         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1000     } else {
1001         TCGv shift = tcg_temp_new();
1002         TCGv mask = tcg_temp_new();
1003 
1004         /* The instruction description is as above, where the byte_mask
1005            is shifted left, and then we extract bits <15:8>.  This can be
1006            emulated with a right-shift on the expanded byte mask.  This
1007            requires extra care because for an input <2:0> == 0 we need a
1008            shift of 64 bits in order to generate a zero.  This is done by
1009            splitting the shift into two parts, the variable shift - 1
1010            followed by a constant 1 shift.  The code we expand below is
1011            equivalent to ~(B * 8) & 63.  */
1012 
1013         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1014         tcg_gen_not_i64(shift, shift);
1015         tcg_gen_andi_i64(shift, shift, 0x3f);
1016         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1017         tcg_gen_shr_i64(mask, mask, shift);
1018         tcg_gen_shri_i64(mask, mask, 1);
1019 
1020         tcg_gen_andc_i64(vc, va, mask);
1021     }
1022 }
1023 
1024 /* MSKBL, MSKWL, MSKLL, MSKQL */
1025 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1026                       uint8_t lit, uint8_t byte_mask)
1027 {
1028     if (islit) {
1029         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1030     } else {
1031         TCGv shift = tcg_temp_new();
1032         TCGv mask = tcg_temp_new();
1033 
1034         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1035         tcg_gen_shli_i64(shift, shift, 3);
1036         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1037         tcg_gen_shl_i64(mask, mask, shift);
1038 
1039         tcg_gen_andc_i64(vc, va, mask);
1040     }
1041 }
1042 
1043 static void gen_rx(DisasContext *ctx, int ra, int set)
1044 {
1045     if (ra != 31) {
1046         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1047     }
1048 
1049     st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1050 }
1051 
1052 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1053 {
1054     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1055        to internal cpu registers.  */
1056 
1057     /* Unprivileged PAL call */
1058     if (palcode >= 0x80 && palcode < 0xC0) {
1059         switch (palcode) {
1060         case 0x86:
1061             /* IMB */
1062             /* No-op inside QEMU.  */
1063             break;
1064         case 0x9E:
1065             /* RDUNIQUE */
1066             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1067                            offsetof(CPUAlphaState, unique));
1068             break;
1069         case 0x9F:
1070             /* WRUNIQUE */
1071             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1072                            offsetof(CPUAlphaState, unique));
1073             break;
1074         default:
1075             palcode &= 0xbf;
1076             goto do_call_pal;
1077         }
1078         return DISAS_NEXT;
1079     }
1080 
1081 #ifndef CONFIG_USER_ONLY
1082     /* Privileged PAL code */
1083     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1084         switch (palcode) {
1085         case 0x01:
1086             /* CFLUSH */
1087             /* No-op inside QEMU.  */
1088             break;
1089         case 0x02:
1090             /* DRAINA */
1091             /* No-op inside QEMU.  */
1092             break;
1093         case 0x2D:
1094             /* WRVPTPTR */
1095             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1096                            offsetof(CPUAlphaState, vptptr));
1097             break;
1098         case 0x31:
1099             /* WRVAL */
1100             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1101                            offsetof(CPUAlphaState, sysval));
1102             break;
1103         case 0x32:
1104             /* RDVAL */
1105             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1106                            offsetof(CPUAlphaState, sysval));
1107             break;
1108 
1109         case 0x35:
1110             /* SWPIPL */
1111             /* Note that we already know we're in kernel mode, so we know
1112                that PS only contains the 3 IPL bits.  */
1113             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1114 
1115             /* But make sure and store only the 3 IPL bits from the user.  */
1116             {
1117                 TCGv tmp = tcg_temp_new();
1118                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1119                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1120             }
1121 
1122             /* Allow interrupts to be recognized right away.  */
1123             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1124             return DISAS_PC_UPDATED_NOCHAIN;
1125 
1126         case 0x36:
1127             /* RDPS */
1128             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1129             break;
1130 
1131         case 0x38:
1132             /* WRUSP */
1133             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1134                            offsetof(CPUAlphaState, usp));
1135             break;
1136         case 0x3A:
1137             /* RDUSP */
1138             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1139                            offsetof(CPUAlphaState, usp));
1140             break;
1141         case 0x3C:
1142             /* WHAMI */
1143             tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1144                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1145             break;
1146 
1147         case 0x3E:
1148             /* WTINT */
1149             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1150                            -offsetof(AlphaCPU, env) +
1151                            offsetof(CPUState, halted));
1152             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1153             return gen_excp(ctx, EXCP_HALTED, 0);
1154 
1155         default:
1156             palcode &= 0x3f;
1157             goto do_call_pal;
1158         }
1159         return DISAS_NEXT;
1160     }
1161 #endif
1162     return gen_invalid(ctx);
1163 
1164  do_call_pal:
1165 #ifdef CONFIG_USER_ONLY
1166     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1167 #else
1168     {
1169         TCGv tmp = tcg_temp_new();
1170         uint64_t exc_addr = ctx->base.pc_next;
1171         uint64_t entry = ctx->palbr;
1172 
1173         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1174             exc_addr |= 1;
1175         } else {
1176             tcg_gen_movi_i64(tmp, 1);
1177             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1178         }
1179 
1180         tcg_gen_movi_i64(tmp, exc_addr);
1181         tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1182 
1183         entry += (palcode & 0x80
1184                   ? 0x2000 + (palcode - 0x80) * 64
1185                   : 0x1000 + palcode * 64);
1186 
1187         tcg_gen_movi_i64(cpu_pc, entry);
1188         return DISAS_PC_UPDATED;
1189     }
1190 #endif
1191 }
1192 
1193 #ifndef CONFIG_USER_ONLY
1194 
1195 #define PR_LONG         0x200000
1196 
1197 static int cpu_pr_data(int pr)
1198 {
1199     switch (pr) {
1200     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1201     case  3: return offsetof(CPUAlphaState, trap_arg0);
1202     case  4: return offsetof(CPUAlphaState, trap_arg1);
1203     case  5: return offsetof(CPUAlphaState, trap_arg2);
1204     case  6: return offsetof(CPUAlphaState, exc_addr);
1205     case  7: return offsetof(CPUAlphaState, palbr);
1206     case  8: return offsetof(CPUAlphaState, ptbr);
1207     case  9: return offsetof(CPUAlphaState, vptptr);
1208     case 10: return offsetof(CPUAlphaState, unique);
1209     case 11: return offsetof(CPUAlphaState, sysval);
1210     case 12: return offsetof(CPUAlphaState, usp);
1211 
1212     case 40 ... 63:
1213         return offsetof(CPUAlphaState, scratch[pr - 40]);
1214 
1215     case 251:
1216         return offsetof(CPUAlphaState, alarm_expire);
1217     }
1218     return 0;
1219 }
1220 
1221 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1222 {
1223     void (*helper)(TCGv);
1224     int data;
1225 
1226     switch (regno) {
1227     case 32 ... 39:
1228         /* Accessing the "non-shadow" general registers.  */
1229         regno = regno == 39 ? 25 : regno - 32 + 8;
1230         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1231         break;
1232 
1233     case 250: /* WALLTIME */
1234         helper = gen_helper_get_walltime;
1235         goto do_helper;
1236     case 249: /* VMTIME */
1237         helper = gen_helper_get_vmtime;
1238     do_helper:
1239         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1240             gen_io_start();
1241             helper(va);
1242             return DISAS_PC_STALE;
1243         } else {
1244             helper(va);
1245         }
1246         break;
1247 
1248     case 0: /* PS */
1249         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1250         break;
1251     case 1: /* FEN */
1252         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1253         break;
1254 
1255     default:
1256         /* The basic registers are data only, and unknown registers
1257            are read-zero, write-ignore.  */
1258         data = cpu_pr_data(regno);
1259         if (data == 0) {
1260             tcg_gen_movi_i64(va, 0);
1261         } else if (data & PR_LONG) {
1262             tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1263         } else {
1264             tcg_gen_ld_i64(va, cpu_env, data);
1265         }
1266         break;
1267     }
1268 
1269     return DISAS_NEXT;
1270 }
1271 
1272 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1273 {
1274     int data;
1275     DisasJumpType ret = DISAS_NEXT;
1276 
1277     switch (regno) {
1278     case 255:
1279         /* TBIA */
1280         gen_helper_tbia(cpu_env);
1281         break;
1282 
1283     case 254:
1284         /* TBIS */
1285         gen_helper_tbis(cpu_env, vb);
1286         break;
1287 
1288     case 253:
1289         /* WAIT */
1290         tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1291                        -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1292         return gen_excp(ctx, EXCP_HALTED, 0);
1293 
1294     case 252:
1295         /* HALT */
1296         gen_helper_halt(vb);
1297         return DISAS_PC_STALE;
1298 
1299     case 251:
1300         /* ALARM */
1301         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1302             gen_io_start();
1303             ret = DISAS_PC_STALE;
1304         }
1305         gen_helper_set_alarm(cpu_env, vb);
1306         break;
1307 
1308     case 7:
1309         /* PALBR */
1310         tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1311         /* Changing the PAL base register implies un-chaining all of the TBs
1312            that ended with a CALL_PAL.  Since the base register usually only
1313            changes during boot, flushing everything works well.  */
1314         gen_helper_tb_flush(cpu_env);
1315         return DISAS_PC_STALE;
1316 
1317     case 32 ... 39:
1318         /* Accessing the "non-shadow" general registers.  */
1319         regno = regno == 39 ? 25 : regno - 32 + 8;
1320         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1321         break;
1322 
1323     case 0: /* PS */
1324         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1325         break;
1326     case 1: /* FEN */
1327         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1328         break;
1329 
1330     default:
1331         /* The basic registers are data only, and unknown registers
1332            are read-zero, write-ignore.  */
1333         data = cpu_pr_data(regno);
1334         if (data != 0) {
1335             if (data & PR_LONG) {
1336                 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1337             } else {
1338                 tcg_gen_st_i64(vb, cpu_env, data);
1339             }
1340         }
1341         break;
1342     }
1343 
1344     return ret;
1345 }
1346 #endif /* !USER_ONLY*/
1347 
1348 #define REQUIRE_NO_LIT                          \
1349     do {                                        \
1350         if (real_islit) {                       \
1351             goto invalid_opc;                   \
1352         }                                       \
1353     } while (0)
1354 
1355 #define REQUIRE_AMASK(FLAG)                     \
1356     do {                                        \
1357         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1358             goto invalid_opc;                   \
1359         }                                       \
1360     } while (0)
1361 
1362 #define REQUIRE_TB_FLAG(FLAG)                   \
1363     do {                                        \
1364         if ((ctx->tbflags & (FLAG)) == 0) {     \
1365             goto invalid_opc;                   \
1366         }                                       \
1367     } while (0)
1368 
1369 #define REQUIRE_REG_31(WHICH)                   \
1370     do {                                        \
1371         if (WHICH != 31) {                      \
1372             goto invalid_opc;                   \
1373         }                                       \
1374     } while (0)
1375 
1376 #define REQUIRE_FEN                             \
1377     do {                                        \
1378         if (!(ctx->tbflags & ENV_FLAG_FEN)) {   \
1379             goto raise_fen;                     \
1380         }                                       \
1381     } while (0)
1382 
1383 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1384 {
1385     int32_t disp21, disp16, disp12 __attribute__((unused));
1386     uint16_t fn11;
1387     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1388     bool islit, real_islit;
1389     TCGv va, vb, vc, tmp, tmp2;
1390     TCGv_i32 t32;
1391     DisasJumpType ret;
1392 
1393     /* Decode all instruction fields */
1394     opc = extract32(insn, 26, 6);
1395     ra = extract32(insn, 21, 5);
1396     rb = extract32(insn, 16, 5);
1397     rc = extract32(insn, 0, 5);
1398     real_islit = islit = extract32(insn, 12, 1);
1399     lit = extract32(insn, 13, 8);
1400 
1401     disp21 = sextract32(insn, 0, 21);
1402     disp16 = sextract32(insn, 0, 16);
1403     disp12 = sextract32(insn, 0, 12);
1404 
1405     fn11 = extract32(insn, 5, 11);
1406     fpfn = extract32(insn, 5, 6);
1407     fn7 = extract32(insn, 5, 7);
1408 
1409     if (rb == 31 && !islit) {
1410         islit = true;
1411         lit = 0;
1412     }
1413 
1414     ret = DISAS_NEXT;
1415     switch (opc) {
1416     case 0x00:
1417         /* CALL_PAL */
1418         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1419         break;
1420     case 0x01:
1421         /* OPC01 */
1422         goto invalid_opc;
1423     case 0x02:
1424         /* OPC02 */
1425         goto invalid_opc;
1426     case 0x03:
1427         /* OPC03 */
1428         goto invalid_opc;
1429     case 0x04:
1430         /* OPC04 */
1431         goto invalid_opc;
1432     case 0x05:
1433         /* OPC05 */
1434         goto invalid_opc;
1435     case 0x06:
1436         /* OPC06 */
1437         goto invalid_opc;
1438     case 0x07:
1439         /* OPC07 */
1440         goto invalid_opc;
1441 
1442     case 0x09:
1443         /* LDAH */
1444         disp16 = (uint32_t)disp16 << 16;
1445         /* fall through */
1446     case 0x08:
1447         /* LDA */
1448         va = dest_gpr(ctx, ra);
1449         /* It's worth special-casing immediate loads.  */
1450         if (rb == 31) {
1451             tcg_gen_movi_i64(va, disp16);
1452         } else {
1453             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1454         }
1455         break;
1456 
1457     case 0x0A:
1458         /* LDBU */
1459         REQUIRE_AMASK(BWX);
1460         gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
1461         break;
1462     case 0x0B:
1463         /* LDQ_U */
1464         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
1465         break;
1466     case 0x0C:
1467         /* LDWU */
1468         REQUIRE_AMASK(BWX);
1469         gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
1470         break;
1471     case 0x0D:
1472         /* STW */
1473         REQUIRE_AMASK(BWX);
1474         gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
1475         break;
1476     case 0x0E:
1477         /* STB */
1478         REQUIRE_AMASK(BWX);
1479         gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
1480         break;
1481     case 0x0F:
1482         /* STQ_U */
1483         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
1484         break;
1485 
1486     case 0x10:
1487         vc = dest_gpr(ctx, rc);
1488         vb = load_gpr_lit(ctx, rb, lit, islit);
1489 
1490         if (ra == 31) {
1491             if (fn7 == 0x00) {
1492                 /* Special case ADDL as SEXTL.  */
1493                 tcg_gen_ext32s_i64(vc, vb);
1494                 break;
1495             }
1496             if (fn7 == 0x29) {
1497                 /* Special case SUBQ as NEGQ.  */
1498                 tcg_gen_neg_i64(vc, vb);
1499                 break;
1500             }
1501         }
1502 
1503         va = load_gpr(ctx, ra);
1504         switch (fn7) {
1505         case 0x00:
1506             /* ADDL */
1507             tcg_gen_add_i64(vc, va, vb);
1508             tcg_gen_ext32s_i64(vc, vc);
1509             break;
1510         case 0x02:
1511             /* S4ADDL */
1512             tmp = tcg_temp_new();
1513             tcg_gen_shli_i64(tmp, va, 2);
1514             tcg_gen_add_i64(tmp, tmp, vb);
1515             tcg_gen_ext32s_i64(vc, tmp);
1516             break;
1517         case 0x09:
1518             /* SUBL */
1519             tcg_gen_sub_i64(vc, va, vb);
1520             tcg_gen_ext32s_i64(vc, vc);
1521             break;
1522         case 0x0B:
1523             /* S4SUBL */
1524             tmp = tcg_temp_new();
1525             tcg_gen_shli_i64(tmp, va, 2);
1526             tcg_gen_sub_i64(tmp, tmp, vb);
1527             tcg_gen_ext32s_i64(vc, tmp);
1528             break;
1529         case 0x0F:
1530             /* CMPBGE */
1531             if (ra == 31) {
1532                 /* Special case 0 >= X as X == 0.  */
1533                 gen_helper_cmpbe0(vc, vb);
1534             } else {
1535                 gen_helper_cmpbge(vc, va, vb);
1536             }
1537             break;
1538         case 0x12:
1539             /* S8ADDL */
1540             tmp = tcg_temp_new();
1541             tcg_gen_shli_i64(tmp, va, 3);
1542             tcg_gen_add_i64(tmp, tmp, vb);
1543             tcg_gen_ext32s_i64(vc, tmp);
1544             break;
1545         case 0x1B:
1546             /* S8SUBL */
1547             tmp = tcg_temp_new();
1548             tcg_gen_shli_i64(tmp, va, 3);
1549             tcg_gen_sub_i64(tmp, tmp, vb);
1550             tcg_gen_ext32s_i64(vc, tmp);
1551             break;
1552         case 0x1D:
1553             /* CMPULT */
1554             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1555             break;
1556         case 0x20:
1557             /* ADDQ */
1558             tcg_gen_add_i64(vc, va, vb);
1559             break;
1560         case 0x22:
1561             /* S4ADDQ */
1562             tmp = tcg_temp_new();
1563             tcg_gen_shli_i64(tmp, va, 2);
1564             tcg_gen_add_i64(vc, tmp, vb);
1565             break;
1566         case 0x29:
1567             /* SUBQ */
1568             tcg_gen_sub_i64(vc, va, vb);
1569             break;
1570         case 0x2B:
1571             /* S4SUBQ */
1572             tmp = tcg_temp_new();
1573             tcg_gen_shli_i64(tmp, va, 2);
1574             tcg_gen_sub_i64(vc, tmp, vb);
1575             break;
1576         case 0x2D:
1577             /* CMPEQ */
1578             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1579             break;
1580         case 0x32:
1581             /* S8ADDQ */
1582             tmp = tcg_temp_new();
1583             tcg_gen_shli_i64(tmp, va, 3);
1584             tcg_gen_add_i64(vc, tmp, vb);
1585             break;
1586         case 0x3B:
1587             /* S8SUBQ */
1588             tmp = tcg_temp_new();
1589             tcg_gen_shli_i64(tmp, va, 3);
1590             tcg_gen_sub_i64(vc, tmp, vb);
1591             break;
1592         case 0x3D:
1593             /* CMPULE */
1594             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1595             break;
1596         case 0x40:
1597             /* ADDL/V */
1598             tmp = tcg_temp_new();
1599             tcg_gen_ext32s_i64(tmp, va);
1600             tcg_gen_ext32s_i64(vc, vb);
1601             tcg_gen_add_i64(tmp, tmp, vc);
1602             tcg_gen_ext32s_i64(vc, tmp);
1603             gen_helper_check_overflow(cpu_env, vc, tmp);
1604             break;
1605         case 0x49:
1606             /* SUBL/V */
1607             tmp = tcg_temp_new();
1608             tcg_gen_ext32s_i64(tmp, va);
1609             tcg_gen_ext32s_i64(vc, vb);
1610             tcg_gen_sub_i64(tmp, tmp, vc);
1611             tcg_gen_ext32s_i64(vc, tmp);
1612             gen_helper_check_overflow(cpu_env, vc, tmp);
1613             break;
1614         case 0x4D:
1615             /* CMPLT */
1616             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1617             break;
1618         case 0x60:
1619             /* ADDQ/V */
1620             tmp = tcg_temp_new();
1621             tmp2 = tcg_temp_new();
1622             tcg_gen_eqv_i64(tmp, va, vb);
1623             tcg_gen_mov_i64(tmp2, va);
1624             tcg_gen_add_i64(vc, va, vb);
1625             tcg_gen_xor_i64(tmp2, tmp2, vc);
1626             tcg_gen_and_i64(tmp, tmp, tmp2);
1627             tcg_gen_shri_i64(tmp, tmp, 63);
1628             tcg_gen_movi_i64(tmp2, 0);
1629             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1630             break;
1631         case 0x69:
1632             /* SUBQ/V */
1633             tmp = tcg_temp_new();
1634             tmp2 = tcg_temp_new();
1635             tcg_gen_xor_i64(tmp, va, vb);
1636             tcg_gen_mov_i64(tmp2, va);
1637             tcg_gen_sub_i64(vc, va, vb);
1638             tcg_gen_xor_i64(tmp2, tmp2, vc);
1639             tcg_gen_and_i64(tmp, tmp, tmp2);
1640             tcg_gen_shri_i64(tmp, tmp, 63);
1641             tcg_gen_movi_i64(tmp2, 0);
1642             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1643             break;
1644         case 0x6D:
1645             /* CMPLE */
1646             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1647             break;
1648         default:
1649             goto invalid_opc;
1650         }
1651         break;
1652 
1653     case 0x11:
1654         if (fn7 == 0x20) {
1655             if (rc == 31) {
1656                 /* Special case BIS as NOP.  */
1657                 break;
1658             }
1659             if (ra == 31) {
1660                 /* Special case BIS as MOV.  */
1661                 vc = dest_gpr(ctx, rc);
1662                 if (islit) {
1663                     tcg_gen_movi_i64(vc, lit);
1664                 } else {
1665                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1666                 }
1667                 break;
1668             }
1669         }
1670 
1671         vc = dest_gpr(ctx, rc);
1672         vb = load_gpr_lit(ctx, rb, lit, islit);
1673 
1674         if (fn7 == 0x28 && ra == 31) {
1675             /* Special case ORNOT as NOT.  */
1676             tcg_gen_not_i64(vc, vb);
1677             break;
1678         }
1679 
1680         va = load_gpr(ctx, ra);
1681         switch (fn7) {
1682         case 0x00:
1683             /* AND */
1684             tcg_gen_and_i64(vc, va, vb);
1685             break;
1686         case 0x08:
1687             /* BIC */
1688             tcg_gen_andc_i64(vc, va, vb);
1689             break;
1690         case 0x14:
1691             /* CMOVLBS */
1692             tmp = tcg_temp_new();
1693             tcg_gen_andi_i64(tmp, va, 1);
1694             tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1695                                 vb, load_gpr(ctx, rc));
1696             break;
1697         case 0x16:
1698             /* CMOVLBC */
1699             tmp = tcg_temp_new();
1700             tcg_gen_andi_i64(tmp, va, 1);
1701             tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1702                                 vb, load_gpr(ctx, rc));
1703             break;
1704         case 0x20:
1705             /* BIS */
1706             tcg_gen_or_i64(vc, va, vb);
1707             break;
1708         case 0x24:
1709             /* CMOVEQ */
1710             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1711                                 vb, load_gpr(ctx, rc));
1712             break;
1713         case 0x26:
1714             /* CMOVNE */
1715             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1716                                 vb, load_gpr(ctx, rc));
1717             break;
1718         case 0x28:
1719             /* ORNOT */
1720             tcg_gen_orc_i64(vc, va, vb);
1721             break;
1722         case 0x40:
1723             /* XOR */
1724             tcg_gen_xor_i64(vc, va, vb);
1725             break;
1726         case 0x44:
1727             /* CMOVLT */
1728             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1729                                 vb, load_gpr(ctx, rc));
1730             break;
1731         case 0x46:
1732             /* CMOVGE */
1733             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1734                                 vb, load_gpr(ctx, rc));
1735             break;
1736         case 0x48:
1737             /* EQV */
1738             tcg_gen_eqv_i64(vc, va, vb);
1739             break;
1740         case 0x61:
1741             /* AMASK */
1742             REQUIRE_REG_31(ra);
1743             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1744             break;
1745         case 0x64:
1746             /* CMOVLE */
1747             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1748                                 vb, load_gpr(ctx, rc));
1749             break;
1750         case 0x66:
1751             /* CMOVGT */
1752             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1753                                 vb, load_gpr(ctx, rc));
1754             break;
1755         case 0x6C:
1756             /* IMPLVER */
1757             REQUIRE_REG_31(ra);
1758             tcg_gen_movi_i64(vc, ctx->implver);
1759             break;
1760         default:
1761             goto invalid_opc;
1762         }
1763         break;
1764 
1765     case 0x12:
1766         vc = dest_gpr(ctx, rc);
1767         va = load_gpr(ctx, ra);
1768         switch (fn7) {
1769         case 0x02:
1770             /* MSKBL */
1771             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1772             break;
1773         case 0x06:
1774             /* EXTBL */
1775             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1776             break;
1777         case 0x0B:
1778             /* INSBL */
1779             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1780             break;
1781         case 0x12:
1782             /* MSKWL */
1783             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1784             break;
1785         case 0x16:
1786             /* EXTWL */
1787             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1788             break;
1789         case 0x1B:
1790             /* INSWL */
1791             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1792             break;
1793         case 0x22:
1794             /* MSKLL */
1795             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1796             break;
1797         case 0x26:
1798             /* EXTLL */
1799             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1800             break;
1801         case 0x2B:
1802             /* INSLL */
1803             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1804             break;
1805         case 0x30:
1806             /* ZAP */
1807             if (islit) {
1808                 gen_zapnoti(vc, va, ~lit);
1809             } else {
1810                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1811             }
1812             break;
1813         case 0x31:
1814             /* ZAPNOT */
1815             if (islit) {
1816                 gen_zapnoti(vc, va, lit);
1817             } else {
1818                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1819             }
1820             break;
1821         case 0x32:
1822             /* MSKQL */
1823             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1824             break;
1825         case 0x34:
1826             /* SRL */
1827             if (islit) {
1828                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1829             } else {
1830                 tmp = tcg_temp_new();
1831                 vb = load_gpr(ctx, rb);
1832                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1833                 tcg_gen_shr_i64(vc, va, tmp);
1834             }
1835             break;
1836         case 0x36:
1837             /* EXTQL */
1838             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1839             break;
1840         case 0x39:
1841             /* SLL */
1842             if (islit) {
1843                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1844             } else {
1845                 tmp = tcg_temp_new();
1846                 vb = load_gpr(ctx, rb);
1847                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1848                 tcg_gen_shl_i64(vc, va, tmp);
1849             }
1850             break;
1851         case 0x3B:
1852             /* INSQL */
1853             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1854             break;
1855         case 0x3C:
1856             /* SRA */
1857             if (islit) {
1858                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1859             } else {
1860                 tmp = tcg_temp_new();
1861                 vb = load_gpr(ctx, rb);
1862                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1863                 tcg_gen_sar_i64(vc, va, tmp);
1864             }
1865             break;
1866         case 0x52:
1867             /* MSKWH */
1868             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1869             break;
1870         case 0x57:
1871             /* INSWH */
1872             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1873             break;
1874         case 0x5A:
1875             /* EXTWH */
1876             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1877             break;
1878         case 0x62:
1879             /* MSKLH */
1880             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1881             break;
1882         case 0x67:
1883             /* INSLH */
1884             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1885             break;
1886         case 0x6A:
1887             /* EXTLH */
1888             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1889             break;
1890         case 0x72:
1891             /* MSKQH */
1892             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1893             break;
1894         case 0x77:
1895             /* INSQH */
1896             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1897             break;
1898         case 0x7A:
1899             /* EXTQH */
1900             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1901             break;
1902         default:
1903             goto invalid_opc;
1904         }
1905         break;
1906 
1907     case 0x13:
1908         vc = dest_gpr(ctx, rc);
1909         vb = load_gpr_lit(ctx, rb, lit, islit);
1910         va = load_gpr(ctx, ra);
1911         switch (fn7) {
1912         case 0x00:
1913             /* MULL */
1914             tcg_gen_mul_i64(vc, va, vb);
1915             tcg_gen_ext32s_i64(vc, vc);
1916             break;
1917         case 0x20:
1918             /* MULQ */
1919             tcg_gen_mul_i64(vc, va, vb);
1920             break;
1921         case 0x30:
1922             /* UMULH */
1923             tmp = tcg_temp_new();
1924             tcg_gen_mulu2_i64(tmp, vc, va, vb);
1925             break;
1926         case 0x40:
1927             /* MULL/V */
1928             tmp = tcg_temp_new();
1929             tcg_gen_ext32s_i64(tmp, va);
1930             tcg_gen_ext32s_i64(vc, vb);
1931             tcg_gen_mul_i64(tmp, tmp, vc);
1932             tcg_gen_ext32s_i64(vc, tmp);
1933             gen_helper_check_overflow(cpu_env, vc, tmp);
1934             break;
1935         case 0x60:
1936             /* MULQ/V */
1937             tmp = tcg_temp_new();
1938             tmp2 = tcg_temp_new();
1939             tcg_gen_muls2_i64(vc, tmp, va, vb);
1940             tcg_gen_sari_i64(tmp2, vc, 63);
1941             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1942             break;
1943         default:
1944             goto invalid_opc;
1945         }
1946         break;
1947 
1948     case 0x14:
1949         REQUIRE_AMASK(FIX);
1950         vc = dest_fpr(ctx, rc);
1951         switch (fpfn) { /* fn11 & 0x3F */
1952         case 0x04:
1953             /* ITOFS */
1954             REQUIRE_REG_31(rb);
1955             REQUIRE_FEN;
1956             t32 = tcg_temp_new_i32();
1957             va = load_gpr(ctx, ra);
1958             tcg_gen_extrl_i64_i32(t32, va);
1959             gen_helper_memory_to_s(vc, t32);
1960             break;
1961         case 0x0A:
1962             /* SQRTF */
1963             REQUIRE_REG_31(ra);
1964             REQUIRE_FEN;
1965             vb = load_fpr(ctx, rb);
1966             gen_helper_sqrtf(vc, cpu_env, vb);
1967             break;
1968         case 0x0B:
1969             /* SQRTS */
1970             REQUIRE_REG_31(ra);
1971             REQUIRE_FEN;
1972             gen_sqrts(ctx, rb, rc, fn11);
1973             break;
1974         case 0x14:
1975             /* ITOFF */
1976             REQUIRE_REG_31(rb);
1977             REQUIRE_FEN;
1978             t32 = tcg_temp_new_i32();
1979             va = load_gpr(ctx, ra);
1980             tcg_gen_extrl_i64_i32(t32, va);
1981             gen_helper_memory_to_f(vc, t32);
1982             break;
1983         case 0x24:
1984             /* ITOFT */
1985             REQUIRE_REG_31(rb);
1986             REQUIRE_FEN;
1987             va = load_gpr(ctx, ra);
1988             tcg_gen_mov_i64(vc, va);
1989             break;
1990         case 0x2A:
1991             /* SQRTG */
1992             REQUIRE_REG_31(ra);
1993             REQUIRE_FEN;
1994             vb = load_fpr(ctx, rb);
1995             gen_helper_sqrtg(vc, cpu_env, vb);
1996             break;
1997         case 0x02B:
1998             /* SQRTT */
1999             REQUIRE_REG_31(ra);
2000             REQUIRE_FEN;
2001             gen_sqrtt(ctx, rb, rc, fn11);
2002             break;
2003         default:
2004             goto invalid_opc;
2005         }
2006         break;
2007 
2008     case 0x15:
2009         /* VAX floating point */
2010         /* XXX: rounding mode and trap are ignored (!) */
2011         vc = dest_fpr(ctx, rc);
2012         vb = load_fpr(ctx, rb);
2013         va = load_fpr(ctx, ra);
2014         switch (fpfn) { /* fn11 & 0x3F */
2015         case 0x00:
2016             /* ADDF */
2017             REQUIRE_FEN;
2018             gen_helper_addf(vc, cpu_env, va, vb);
2019             break;
2020         case 0x01:
2021             /* SUBF */
2022             REQUIRE_FEN;
2023             gen_helper_subf(vc, cpu_env, va, vb);
2024             break;
2025         case 0x02:
2026             /* MULF */
2027             REQUIRE_FEN;
2028             gen_helper_mulf(vc, cpu_env, va, vb);
2029             break;
2030         case 0x03:
2031             /* DIVF */
2032             REQUIRE_FEN;
2033             gen_helper_divf(vc, cpu_env, va, vb);
2034             break;
2035         case 0x1E:
2036             /* CVTDG -- TODO */
2037             REQUIRE_REG_31(ra);
2038             goto invalid_opc;
2039         case 0x20:
2040             /* ADDG */
2041             REQUIRE_FEN;
2042             gen_helper_addg(vc, cpu_env, va, vb);
2043             break;
2044         case 0x21:
2045             /* SUBG */
2046             REQUIRE_FEN;
2047             gen_helper_subg(vc, cpu_env, va, vb);
2048             break;
2049         case 0x22:
2050             /* MULG */
2051             REQUIRE_FEN;
2052             gen_helper_mulg(vc, cpu_env, va, vb);
2053             break;
2054         case 0x23:
2055             /* DIVG */
2056             REQUIRE_FEN;
2057             gen_helper_divg(vc, cpu_env, va, vb);
2058             break;
2059         case 0x25:
2060             /* CMPGEQ */
2061             REQUIRE_FEN;
2062             gen_helper_cmpgeq(vc, cpu_env, va, vb);
2063             break;
2064         case 0x26:
2065             /* CMPGLT */
2066             REQUIRE_FEN;
2067             gen_helper_cmpglt(vc, cpu_env, va, vb);
2068             break;
2069         case 0x27:
2070             /* CMPGLE */
2071             REQUIRE_FEN;
2072             gen_helper_cmpgle(vc, cpu_env, va, vb);
2073             break;
2074         case 0x2C:
2075             /* CVTGF */
2076             REQUIRE_REG_31(ra);
2077             REQUIRE_FEN;
2078             gen_helper_cvtgf(vc, cpu_env, vb);
2079             break;
2080         case 0x2D:
2081             /* CVTGD -- TODO */
2082             REQUIRE_REG_31(ra);
2083             goto invalid_opc;
2084         case 0x2F:
2085             /* CVTGQ */
2086             REQUIRE_REG_31(ra);
2087             REQUIRE_FEN;
2088             gen_helper_cvtgq(vc, cpu_env, vb);
2089             break;
2090         case 0x3C:
2091             /* CVTQF */
2092             REQUIRE_REG_31(ra);
2093             REQUIRE_FEN;
2094             gen_helper_cvtqf(vc, cpu_env, vb);
2095             break;
2096         case 0x3E:
2097             /* CVTQG */
2098             REQUIRE_REG_31(ra);
2099             REQUIRE_FEN;
2100             gen_helper_cvtqg(vc, cpu_env, vb);
2101             break;
2102         default:
2103             goto invalid_opc;
2104         }
2105         break;
2106 
2107     case 0x16:
2108         /* IEEE floating-point */
2109         switch (fpfn) { /* fn11 & 0x3F */
2110         case 0x00:
2111             /* ADDS */
2112             REQUIRE_FEN;
2113             gen_adds(ctx, ra, rb, rc, fn11);
2114             break;
2115         case 0x01:
2116             /* SUBS */
2117             REQUIRE_FEN;
2118             gen_subs(ctx, ra, rb, rc, fn11);
2119             break;
2120         case 0x02:
2121             /* MULS */
2122             REQUIRE_FEN;
2123             gen_muls(ctx, ra, rb, rc, fn11);
2124             break;
2125         case 0x03:
2126             /* DIVS */
2127             REQUIRE_FEN;
2128             gen_divs(ctx, ra, rb, rc, fn11);
2129             break;
2130         case 0x20:
2131             /* ADDT */
2132             REQUIRE_FEN;
2133             gen_addt(ctx, ra, rb, rc, fn11);
2134             break;
2135         case 0x21:
2136             /* SUBT */
2137             REQUIRE_FEN;
2138             gen_subt(ctx, ra, rb, rc, fn11);
2139             break;
2140         case 0x22:
2141             /* MULT */
2142             REQUIRE_FEN;
2143             gen_mult(ctx, ra, rb, rc, fn11);
2144             break;
2145         case 0x23:
2146             /* DIVT */
2147             REQUIRE_FEN;
2148             gen_divt(ctx, ra, rb, rc, fn11);
2149             break;
2150         case 0x24:
2151             /* CMPTUN */
2152             REQUIRE_FEN;
2153             gen_cmptun(ctx, ra, rb, rc, fn11);
2154             break;
2155         case 0x25:
2156             /* CMPTEQ */
2157             REQUIRE_FEN;
2158             gen_cmpteq(ctx, ra, rb, rc, fn11);
2159             break;
2160         case 0x26:
2161             /* CMPTLT */
2162             REQUIRE_FEN;
2163             gen_cmptlt(ctx, ra, rb, rc, fn11);
2164             break;
2165         case 0x27:
2166             /* CMPTLE */
2167             REQUIRE_FEN;
2168             gen_cmptle(ctx, ra, rb, rc, fn11);
2169             break;
2170         case 0x2C:
2171             REQUIRE_REG_31(ra);
2172             REQUIRE_FEN;
2173             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2174                 /* CVTST */
2175                 gen_cvtst(ctx, rb, rc, fn11);
2176             } else {
2177                 /* CVTTS */
2178                 gen_cvtts(ctx, rb, rc, fn11);
2179             }
2180             break;
2181         case 0x2F:
2182             /* CVTTQ */
2183             REQUIRE_REG_31(ra);
2184             REQUIRE_FEN;
2185             gen_cvttq(ctx, rb, rc, fn11);
2186             break;
2187         case 0x3C:
2188             /* CVTQS */
2189             REQUIRE_REG_31(ra);
2190             REQUIRE_FEN;
2191             gen_cvtqs(ctx, rb, rc, fn11);
2192             break;
2193         case 0x3E:
2194             /* CVTQT */
2195             REQUIRE_REG_31(ra);
2196             REQUIRE_FEN;
2197             gen_cvtqt(ctx, rb, rc, fn11);
2198             break;
2199         default:
2200             goto invalid_opc;
2201         }
2202         break;
2203 
2204     case 0x17:
2205         switch (fn11) {
2206         case 0x010:
2207             /* CVTLQ */
2208             REQUIRE_REG_31(ra);
2209             REQUIRE_FEN;
2210             vc = dest_fpr(ctx, rc);
2211             vb = load_fpr(ctx, rb);
2212             gen_cvtlq(vc, vb);
2213             break;
2214         case 0x020:
2215             /* CPYS */
2216             REQUIRE_FEN;
2217             if (rc == 31) {
2218                 /* Special case CPYS as FNOP.  */
2219             } else {
2220                 vc = dest_fpr(ctx, rc);
2221                 va = load_fpr(ctx, ra);
2222                 if (ra == rb) {
2223                     /* Special case CPYS as FMOV.  */
2224                     tcg_gen_mov_i64(vc, va);
2225                 } else {
2226                     vb = load_fpr(ctx, rb);
2227                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2228                 }
2229             }
2230             break;
2231         case 0x021:
2232             /* CPYSN */
2233             REQUIRE_FEN;
2234             vc = dest_fpr(ctx, rc);
2235             vb = load_fpr(ctx, rb);
2236             va = load_fpr(ctx, ra);
2237             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2238             break;
2239         case 0x022:
2240             /* CPYSE */
2241             REQUIRE_FEN;
2242             vc = dest_fpr(ctx, rc);
2243             vb = load_fpr(ctx, rb);
2244             va = load_fpr(ctx, ra);
2245             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2246             break;
2247         case 0x024:
2248             /* MT_FPCR */
2249             REQUIRE_FEN;
2250             va = load_fpr(ctx, ra);
2251             gen_helper_store_fpcr(cpu_env, va);
2252             if (ctx->tb_rm == QUAL_RM_D) {
2253                 /* Re-do the copy of the rounding mode to fp_status
2254                    the next time we use dynamic rounding.  */
2255                 ctx->tb_rm = -1;
2256             }
2257             break;
2258         case 0x025:
2259             /* MF_FPCR */
2260             REQUIRE_FEN;
2261             va = dest_fpr(ctx, ra);
2262             gen_helper_load_fpcr(va, cpu_env);
2263             break;
2264         case 0x02A:
2265             /* FCMOVEQ */
2266             REQUIRE_FEN;
2267             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2268             break;
2269         case 0x02B:
2270             /* FCMOVNE */
2271             REQUIRE_FEN;
2272             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2273             break;
2274         case 0x02C:
2275             /* FCMOVLT */
2276             REQUIRE_FEN;
2277             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2278             break;
2279         case 0x02D:
2280             /* FCMOVGE */
2281             REQUIRE_FEN;
2282             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2283             break;
2284         case 0x02E:
2285             /* FCMOVLE */
2286             REQUIRE_FEN;
2287             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2288             break;
2289         case 0x02F:
2290             /* FCMOVGT */
2291             REQUIRE_FEN;
2292             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2293             break;
2294         case 0x030: /* CVTQL */
2295         case 0x130: /* CVTQL/V */
2296         case 0x530: /* CVTQL/SV */
2297             REQUIRE_REG_31(ra);
2298             REQUIRE_FEN;
2299             vc = dest_fpr(ctx, rc);
2300             vb = load_fpr(ctx, rb);
2301             gen_helper_cvtql(vc, cpu_env, vb);
2302             gen_fp_exc_raise(rc, fn11);
2303             break;
2304         default:
2305             goto invalid_opc;
2306         }
2307         break;
2308 
2309     case 0x18:
2310         switch ((uint16_t)disp16) {
2311         case 0x0000:
2312             /* TRAPB */
2313             /* No-op.  */
2314             break;
2315         case 0x0400:
2316             /* EXCB */
2317             /* No-op.  */
2318             break;
2319         case 0x4000:
2320             /* MB */
2321             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2322             break;
2323         case 0x4400:
2324             /* WMB */
2325             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2326             break;
2327         case 0x8000:
2328             /* FETCH */
2329             /* No-op */
2330             break;
2331         case 0xA000:
2332             /* FETCH_M */
2333             /* No-op */
2334             break;
2335         case 0xC000:
2336             /* RPCC */
2337             va = dest_gpr(ctx, ra);
2338             if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2339                 gen_io_start();
2340                 gen_helper_load_pcc(va, cpu_env);
2341                 ret = DISAS_PC_STALE;
2342             } else {
2343                 gen_helper_load_pcc(va, cpu_env);
2344             }
2345             break;
2346         case 0xE000:
2347             /* RC */
2348             gen_rx(ctx, ra, 0);
2349             break;
2350         case 0xE800:
2351             /* ECB */
2352             break;
2353         case 0xF000:
2354             /* RS */
2355             gen_rx(ctx, ra, 1);
2356             break;
2357         case 0xF800:
2358             /* WH64 */
2359             /* No-op */
2360             break;
2361         case 0xFC00:
2362             /* WH64EN */
2363             /* No-op */
2364             break;
2365         default:
2366             goto invalid_opc;
2367         }
2368         break;
2369 
2370     case 0x19:
2371         /* HW_MFPR (PALcode) */
2372 #ifndef CONFIG_USER_ONLY
2373         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2374         va = dest_gpr(ctx, ra);
2375         ret = gen_mfpr(ctx, va, insn & 0xffff);
2376         break;
2377 #else
2378         goto invalid_opc;
2379 #endif
2380 
2381     case 0x1A:
2382         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2383            prediction stack action, which of course we don't implement.  */
2384         vb = load_gpr(ctx, rb);
2385         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2386         if (ra != 31) {
2387             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2388         }
2389         ret = DISAS_PC_UPDATED;
2390         break;
2391 
2392     case 0x1B:
2393         /* HW_LD (PALcode) */
2394 #ifndef CONFIG_USER_ONLY
2395         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2396         {
2397             TCGv addr = tcg_temp_new();
2398             vb = load_gpr(ctx, rb);
2399             va = dest_gpr(ctx, ra);
2400 
2401             tcg_gen_addi_i64(addr, vb, disp12);
2402             switch ((insn >> 12) & 0xF) {
2403             case 0x0:
2404                 /* Longword physical access (hw_ldl/p) */
2405                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2406                 break;
2407             case 0x1:
2408                 /* Quadword physical access (hw_ldq/p) */
2409                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2410                 break;
2411             case 0x2:
2412                 /* Longword physical access with lock (hw_ldl_l/p) */
2413                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2414                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2415                 tcg_gen_mov_i64(cpu_lock_value, va);
2416                 break;
2417             case 0x3:
2418                 /* Quadword physical access with lock (hw_ldq_l/p) */
2419                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2420                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2421                 tcg_gen_mov_i64(cpu_lock_value, va);
2422                 break;
2423             case 0x4:
2424                 /* Longword virtual PTE fetch (hw_ldl/v) */
2425                 goto invalid_opc;
2426             case 0x5:
2427                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2428                 goto invalid_opc;
2429                 break;
2430             case 0x6:
2431                 /* Invalid */
2432                 goto invalid_opc;
2433             case 0x7:
2434                 /* Invaliid */
2435                 goto invalid_opc;
2436             case 0x8:
2437                 /* Longword virtual access (hw_ldl) */
2438                 goto invalid_opc;
2439             case 0x9:
2440                 /* Quadword virtual access (hw_ldq) */
2441                 goto invalid_opc;
2442             case 0xA:
2443                 /* Longword virtual access with protection check (hw_ldl/w) */
2444                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2445                                     MO_LESL | MO_ALIGN);
2446                 break;
2447             case 0xB:
2448                 /* Quadword virtual access with protection check (hw_ldq/w) */
2449                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2450                                     MO_LEUQ | MO_ALIGN);
2451                 break;
2452             case 0xC:
2453                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2454                 goto invalid_opc;
2455             case 0xD:
2456                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2457                 goto invalid_opc;
2458             case 0xE:
2459                 /* Longword virtual access with alternate access mode and
2460                    protection checks (hw_ldl/wa) */
2461                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2462                                     MO_LESL | MO_ALIGN);
2463                 break;
2464             case 0xF:
2465                 /* Quadword virtual access with alternate access mode and
2466                    protection checks (hw_ldq/wa) */
2467                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2468                                     MO_LEUQ | MO_ALIGN);
2469                 break;
2470             }
2471             break;
2472         }
2473 #else
2474         goto invalid_opc;
2475 #endif
2476 
2477     case 0x1C:
2478         vc = dest_gpr(ctx, rc);
2479         if (fn7 == 0x70) {
2480             /* FTOIT */
2481             REQUIRE_AMASK(FIX);
2482             REQUIRE_REG_31(rb);
2483             va = load_fpr(ctx, ra);
2484             tcg_gen_mov_i64(vc, va);
2485             break;
2486         } else if (fn7 == 0x78) {
2487             /* FTOIS */
2488             REQUIRE_AMASK(FIX);
2489             REQUIRE_REG_31(rb);
2490             t32 = tcg_temp_new_i32();
2491             va = load_fpr(ctx, ra);
2492             gen_helper_s_to_memory(t32, va);
2493             tcg_gen_ext_i32_i64(vc, t32);
2494             break;
2495         }
2496 
2497         vb = load_gpr_lit(ctx, rb, lit, islit);
2498         switch (fn7) {
2499         case 0x00:
2500             /* SEXTB */
2501             REQUIRE_AMASK(BWX);
2502             REQUIRE_REG_31(ra);
2503             tcg_gen_ext8s_i64(vc, vb);
2504             break;
2505         case 0x01:
2506             /* SEXTW */
2507             REQUIRE_AMASK(BWX);
2508             REQUIRE_REG_31(ra);
2509             tcg_gen_ext16s_i64(vc, vb);
2510             break;
2511         case 0x30:
2512             /* CTPOP */
2513             REQUIRE_AMASK(CIX);
2514             REQUIRE_REG_31(ra);
2515             REQUIRE_NO_LIT;
2516             tcg_gen_ctpop_i64(vc, vb);
2517             break;
2518         case 0x31:
2519             /* PERR */
2520             REQUIRE_AMASK(MVI);
2521             REQUIRE_NO_LIT;
2522             va = load_gpr(ctx, ra);
2523             gen_helper_perr(vc, va, vb);
2524             break;
2525         case 0x32:
2526             /* CTLZ */
2527             REQUIRE_AMASK(CIX);
2528             REQUIRE_REG_31(ra);
2529             REQUIRE_NO_LIT;
2530             tcg_gen_clzi_i64(vc, vb, 64);
2531             break;
2532         case 0x33:
2533             /* CTTZ */
2534             REQUIRE_AMASK(CIX);
2535             REQUIRE_REG_31(ra);
2536             REQUIRE_NO_LIT;
2537             tcg_gen_ctzi_i64(vc, vb, 64);
2538             break;
2539         case 0x34:
2540             /* UNPKBW */
2541             REQUIRE_AMASK(MVI);
2542             REQUIRE_REG_31(ra);
2543             REQUIRE_NO_LIT;
2544             gen_helper_unpkbw(vc, vb);
2545             break;
2546         case 0x35:
2547             /* UNPKBL */
2548             REQUIRE_AMASK(MVI);
2549             REQUIRE_REG_31(ra);
2550             REQUIRE_NO_LIT;
2551             gen_helper_unpkbl(vc, vb);
2552             break;
2553         case 0x36:
2554             /* PKWB */
2555             REQUIRE_AMASK(MVI);
2556             REQUIRE_REG_31(ra);
2557             REQUIRE_NO_LIT;
2558             gen_helper_pkwb(vc, vb);
2559             break;
2560         case 0x37:
2561             /* PKLB */
2562             REQUIRE_AMASK(MVI);
2563             REQUIRE_REG_31(ra);
2564             REQUIRE_NO_LIT;
2565             gen_helper_pklb(vc, vb);
2566             break;
2567         case 0x38:
2568             /* MINSB8 */
2569             REQUIRE_AMASK(MVI);
2570             va = load_gpr(ctx, ra);
2571             gen_helper_minsb8(vc, va, vb);
2572             break;
2573         case 0x39:
2574             /* MINSW4 */
2575             REQUIRE_AMASK(MVI);
2576             va = load_gpr(ctx, ra);
2577             gen_helper_minsw4(vc, va, vb);
2578             break;
2579         case 0x3A:
2580             /* MINUB8 */
2581             REQUIRE_AMASK(MVI);
2582             va = load_gpr(ctx, ra);
2583             gen_helper_minub8(vc, va, vb);
2584             break;
2585         case 0x3B:
2586             /* MINUW4 */
2587             REQUIRE_AMASK(MVI);
2588             va = load_gpr(ctx, ra);
2589             gen_helper_minuw4(vc, va, vb);
2590             break;
2591         case 0x3C:
2592             /* MAXUB8 */
2593             REQUIRE_AMASK(MVI);
2594             va = load_gpr(ctx, ra);
2595             gen_helper_maxub8(vc, va, vb);
2596             break;
2597         case 0x3D:
2598             /* MAXUW4 */
2599             REQUIRE_AMASK(MVI);
2600             va = load_gpr(ctx, ra);
2601             gen_helper_maxuw4(vc, va, vb);
2602             break;
2603         case 0x3E:
2604             /* MAXSB8 */
2605             REQUIRE_AMASK(MVI);
2606             va = load_gpr(ctx, ra);
2607             gen_helper_maxsb8(vc, va, vb);
2608             break;
2609         case 0x3F:
2610             /* MAXSW4 */
2611             REQUIRE_AMASK(MVI);
2612             va = load_gpr(ctx, ra);
2613             gen_helper_maxsw4(vc, va, vb);
2614             break;
2615         default:
2616             goto invalid_opc;
2617         }
2618         break;
2619 
2620     case 0x1D:
2621         /* HW_MTPR (PALcode) */
2622 #ifndef CONFIG_USER_ONLY
2623         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2624         vb = load_gpr(ctx, rb);
2625         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2626         break;
2627 #else
2628         goto invalid_opc;
2629 #endif
2630 
2631     case 0x1E:
2632         /* HW_RET (PALcode) */
2633 #ifndef CONFIG_USER_ONLY
2634         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2635         if (rb == 31) {
2636             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2637                address from EXC_ADDR.  This turns out to be useful for our
2638                emulation PALcode, so continue to accept it.  */
2639             vb = dest_sink(ctx);
2640             tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2641         } else {
2642             vb = load_gpr(ctx, rb);
2643         }
2644         tcg_gen_movi_i64(cpu_lock_addr, -1);
2645         st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2646         tmp = tcg_temp_new();
2647         tcg_gen_andi_i64(tmp, vb, 1);
2648         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2649         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2650         /* Allow interrupts to be recognized right away.  */
2651         ret = DISAS_PC_UPDATED_NOCHAIN;
2652         break;
2653 #else
2654         goto invalid_opc;
2655 #endif
2656 
2657     case 0x1F:
2658         /* HW_ST (PALcode) */
2659 #ifndef CONFIG_USER_ONLY
2660         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2661         {
2662             switch ((insn >> 12) & 0xF) {
2663             case 0x0:
2664                 /* Longword physical access */
2665                 va = load_gpr(ctx, ra);
2666                 vb = load_gpr(ctx, rb);
2667                 tmp = tcg_temp_new();
2668                 tcg_gen_addi_i64(tmp, vb, disp12);
2669                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2670                 break;
2671             case 0x1:
2672                 /* Quadword physical access */
2673                 va = load_gpr(ctx, ra);
2674                 vb = load_gpr(ctx, rb);
2675                 tmp = tcg_temp_new();
2676                 tcg_gen_addi_i64(tmp, vb, disp12);
2677                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2678                 break;
2679             case 0x2:
2680                 /* Longword physical access with lock */
2681                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2682                                             MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2683                 break;
2684             case 0x3:
2685                 /* Quadword physical access with lock */
2686                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2687                                             MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2688                 break;
2689             case 0x4:
2690                 /* Longword virtual access */
2691                 goto invalid_opc;
2692             case 0x5:
2693                 /* Quadword virtual access */
2694                 goto invalid_opc;
2695             case 0x6:
2696                 /* Invalid */
2697                 goto invalid_opc;
2698             case 0x7:
2699                 /* Invalid */
2700                 goto invalid_opc;
2701             case 0x8:
2702                 /* Invalid */
2703                 goto invalid_opc;
2704             case 0x9:
2705                 /* Invalid */
2706                 goto invalid_opc;
2707             case 0xA:
2708                 /* Invalid */
2709                 goto invalid_opc;
2710             case 0xB:
2711                 /* Invalid */
2712                 goto invalid_opc;
2713             case 0xC:
2714                 /* Longword virtual access with alternate access mode */
2715                 goto invalid_opc;
2716             case 0xD:
2717                 /* Quadword virtual access with alternate access mode */
2718                 goto invalid_opc;
2719             case 0xE:
2720                 /* Invalid */
2721                 goto invalid_opc;
2722             case 0xF:
2723                 /* Invalid */
2724                 goto invalid_opc;
2725             }
2726             break;
2727         }
2728 #else
2729         goto invalid_opc;
2730 #endif
2731     case 0x20:
2732         /* LDF */
2733         REQUIRE_FEN;
2734         gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
2735         break;
2736     case 0x21:
2737         /* LDG */
2738         REQUIRE_FEN;
2739         gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
2740         break;
2741     case 0x22:
2742         /* LDS */
2743         REQUIRE_FEN;
2744         gen_load_fp(ctx, ra, rb, disp16, gen_lds);
2745         break;
2746     case 0x23:
2747         /* LDT */
2748         REQUIRE_FEN;
2749         gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
2750         break;
2751     case 0x24:
2752         /* STF */
2753         REQUIRE_FEN;
2754         gen_store_fp(ctx, ra, rb, disp16, gen_stf);
2755         break;
2756     case 0x25:
2757         /* STG */
2758         REQUIRE_FEN;
2759         gen_store_fp(ctx, ra, rb, disp16, gen_stg);
2760         break;
2761     case 0x26:
2762         /* STS */
2763         REQUIRE_FEN;
2764         gen_store_fp(ctx, ra, rb, disp16, gen_sts);
2765         break;
2766     case 0x27:
2767         /* STT */
2768         REQUIRE_FEN;
2769         gen_store_fp(ctx, ra, rb, disp16, gen_stt);
2770         break;
2771     case 0x28:
2772         /* LDL */
2773         gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
2774         break;
2775     case 0x29:
2776         /* LDQ */
2777         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
2778         break;
2779     case 0x2A:
2780         /* LDL_L */
2781         gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
2782         break;
2783     case 0x2B:
2784         /* LDQ_L */
2785         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
2786         break;
2787     case 0x2C:
2788         /* STL */
2789         gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
2790         break;
2791     case 0x2D:
2792         /* STQ */
2793         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
2794         break;
2795     case 0x2E:
2796         /* STL_C */
2797         ret = gen_store_conditional(ctx, ra, rb, disp16,
2798                                     ctx->mem_idx, MO_LESL | MO_ALIGN);
2799         break;
2800     case 0x2F:
2801         /* STQ_C */
2802         ret = gen_store_conditional(ctx, ra, rb, disp16,
2803                                     ctx->mem_idx, MO_LEUQ | MO_ALIGN);
2804         break;
2805     case 0x30:
2806         /* BR */
2807         ret = gen_bdirect(ctx, ra, disp21);
2808         break;
2809     case 0x31: /* FBEQ */
2810         REQUIRE_FEN;
2811         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2812         break;
2813     case 0x32: /* FBLT */
2814         REQUIRE_FEN;
2815         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2816         break;
2817     case 0x33: /* FBLE */
2818         REQUIRE_FEN;
2819         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2820         break;
2821     case 0x34:
2822         /* BSR */
2823         ret = gen_bdirect(ctx, ra, disp21);
2824         break;
2825     case 0x35: /* FBNE */
2826         REQUIRE_FEN;
2827         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2828         break;
2829     case 0x36: /* FBGE */
2830         REQUIRE_FEN;
2831         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2832         break;
2833     case 0x37: /* FBGT */
2834         REQUIRE_FEN;
2835         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2836         break;
2837     case 0x38:
2838         /* BLBC */
2839         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2840         break;
2841     case 0x39:
2842         /* BEQ */
2843         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2844         break;
2845     case 0x3A:
2846         /* BLT */
2847         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2848         break;
2849     case 0x3B:
2850         /* BLE */
2851         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2852         break;
2853     case 0x3C:
2854         /* BLBS */
2855         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2856         break;
2857     case 0x3D:
2858         /* BNE */
2859         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2860         break;
2861     case 0x3E:
2862         /* BGE */
2863         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2864         break;
2865     case 0x3F:
2866         /* BGT */
2867         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2868         break;
2869     invalid_opc:
2870         ret = gen_invalid(ctx);
2871         break;
2872     raise_fen:
2873         ret = gen_excp(ctx, EXCP_FEN, 0);
2874         break;
2875     }
2876 
2877     return ret;
2878 }
2879 
2880 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2881 {
2882     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2883     CPUAlphaState *env = cpu->env_ptr;
2884     int64_t bound;
2885 
2886     ctx->tbflags = ctx->base.tb->flags;
2887     ctx->mem_idx = cpu_mmu_index(env, false);
2888     ctx->implver = env->implver;
2889     ctx->amask = env->amask;
2890 
2891 #ifdef CONFIG_USER_ONLY
2892     ctx->ir = cpu_std_ir;
2893     ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
2894 #else
2895     ctx->palbr = env->palbr;
2896     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2897 #endif
2898 
2899     /* ??? Every TB begins with unset rounding mode, to be initialized on
2900        the first fp insn of the TB.  Alternately we could define a proper
2901        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2902        to reset the FP_STATUS to that default at the end of any TB that
2903        changes the default.  We could even (gasp) dynamiclly figure out
2904        what default would be most efficient given the running program.  */
2905     ctx->tb_rm = -1;
2906     /* Similarly for flush-to-zero.  */
2907     ctx->tb_ftz = -1;
2908 
2909     ctx->zero = NULL;
2910     ctx->sink = NULL;
2911 
2912     /* Bound the number of insns to execute to those left on the page.  */
2913     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2914     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2915 }
2916 
2917 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2918 {
2919 }
2920 
2921 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2922 {
2923     tcg_gen_insn_start(dcbase->pc_next);
2924 }
2925 
2926 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2927 {
2928     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2929     CPUAlphaState *env = cpu->env_ptr;
2930     uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
2931 
2932     ctx->base.pc_next += 4;
2933     ctx->base.is_jmp = translate_one(ctx, insn);
2934 
2935     free_context_temps(ctx);
2936 }
2937 
2938 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2939 {
2940     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2941 
2942     switch (ctx->base.is_jmp) {
2943     case DISAS_NORETURN:
2944         break;
2945     case DISAS_TOO_MANY:
2946         if (use_goto_tb(ctx, ctx->base.pc_next)) {
2947             tcg_gen_goto_tb(0);
2948             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2949             tcg_gen_exit_tb(ctx->base.tb, 0);
2950         }
2951         /* FALLTHRU */
2952     case DISAS_PC_STALE:
2953         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2954         /* FALLTHRU */
2955     case DISAS_PC_UPDATED:
2956         tcg_gen_lookup_and_goto_ptr();
2957         break;
2958     case DISAS_PC_UPDATED_NOCHAIN:
2959         tcg_gen_exit_tb(NULL, 0);
2960         break;
2961     default:
2962         g_assert_not_reached();
2963     }
2964 }
2965 
2966 static void alpha_tr_disas_log(const DisasContextBase *dcbase,
2967                                CPUState *cpu, FILE *logfile)
2968 {
2969     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2970     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
2971 }
2972 
2973 static const TranslatorOps alpha_tr_ops = {
2974     .init_disas_context = alpha_tr_init_disas_context,
2975     .tb_start           = alpha_tr_tb_start,
2976     .insn_start         = alpha_tr_insn_start,
2977     .translate_insn     = alpha_tr_translate_insn,
2978     .tb_stop            = alpha_tr_tb_stop,
2979     .disas_log          = alpha_tr_disas_log,
2980 };
2981 
2982 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
2983                            target_ulong pc, void *host_pc)
2984 {
2985     DisasContext dc;
2986     translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
2987 }
2988