xref: /openbmc/qemu/target/alpha/translate.c (revision abaf3e5b)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 #undef ALPHA_DEBUG_DISAS
38 #define CONFIG_SOFTFLOAT_INLINE
39 
40 #ifdef ALPHA_DEBUG_DISAS
41 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 #  define LOG_DISAS(...) do { } while (0)
44 #endif
45 
46 typedef struct DisasContext DisasContext;
47 struct DisasContext {
48     DisasContextBase base;
49 
50 #ifdef CONFIG_USER_ONLY
51     MemOp unalign;
52 #else
53     uint64_t palbr;
54 #endif
55     uint32_t tbflags;
56     int mem_idx;
57 
58     /* implver and amask values for this CPU.  */
59     int implver;
60     int amask;
61 
62     /* Current rounding mode for this TB.  */
63     int tb_rm;
64     /* Current flush-to-zero setting for this TB.  */
65     int tb_ftz;
66 
67     /* The set of registers active in the current context.  */
68     TCGv *ir;
69 
70     /* Temporaries for $31 and $f31 as source and destination.  */
71     TCGv zero;
72     TCGv sink;
73 };
74 
75 #ifdef CONFIG_USER_ONLY
76 #define UNALIGN(C)  (C)->unalign
77 #else
78 #define UNALIGN(C)  MO_ALIGN
79 #endif
80 
81 /* Target-specific return values from translate_one, indicating the
82    state of the TB.  Note that DISAS_NEXT indicates that we are not
83    exiting the TB.  */
84 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
85 #define DISAS_PC_UPDATED          DISAS_TARGET_1
86 #define DISAS_PC_STALE            DISAS_TARGET_2
87 
88 /* global register indexes */
89 static TCGv cpu_std_ir[31];
90 static TCGv cpu_fir[31];
91 static TCGv cpu_pc;
92 static TCGv cpu_lock_addr;
93 static TCGv cpu_lock_value;
94 
95 #ifndef CONFIG_USER_ONLY
96 static TCGv cpu_pal_ir[31];
97 #endif
98 
99 void alpha_translate_init(void)
100 {
101 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
102 
103     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
104     static const GlobalVar vars[] = {
105         DEF_VAR(pc),
106         DEF_VAR(lock_addr),
107         DEF_VAR(lock_value),
108     };
109 
110 #undef DEF_VAR
111 
112     /* Use the symbolic register names that match the disassembler.  */
113     static const char greg_names[31][4] = {
114         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117         "t10", "t11", "ra", "t12", "at", "gp", "sp"
118     };
119     static const char freg_names[31][4] = {
120         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
124     };
125 #ifndef CONFIG_USER_ONLY
126     static const char shadow_names[8][8] = {
127         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
128         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
129     };
130 #endif
131 
132     int i;
133 
134     for (i = 0; i < 31; i++) {
135         cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
136                                                offsetof(CPUAlphaState, ir[i]),
137                                                greg_names[i]);
138     }
139 
140     for (i = 0; i < 31; i++) {
141         cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
142                                             offsetof(CPUAlphaState, fir[i]),
143                                             freg_names[i]);
144     }
145 
146 #ifndef CONFIG_USER_ONLY
147     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
148     for (i = 0; i < 8; i++) {
149         int r = (i == 7 ? 25 : i + 8);
150         cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
151                                                offsetof(CPUAlphaState,
152                                                         shadow[i]),
153                                                shadow_names[i]);
154     }
155 #endif
156 
157     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
158         const GlobalVar *v = &vars[i];
159         *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
160     }
161 }
162 
163 static TCGv load_zero(DisasContext *ctx)
164 {
165     if (!ctx->zero) {
166         ctx->zero = tcg_constant_i64(0);
167     }
168     return ctx->zero;
169 }
170 
171 static TCGv dest_sink(DisasContext *ctx)
172 {
173     if (!ctx->sink) {
174         ctx->sink = tcg_temp_new();
175     }
176     return ctx->sink;
177 }
178 
179 static void free_context_temps(DisasContext *ctx)
180 {
181     if (ctx->sink) {
182         tcg_gen_discard_i64(ctx->sink);
183         ctx->sink = NULL;
184     }
185 }
186 
187 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
188 {
189     if (likely(reg < 31)) {
190         return ctx->ir[reg];
191     } else {
192         return load_zero(ctx);
193     }
194 }
195 
196 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
197                          uint8_t lit, bool islit)
198 {
199     if (islit) {
200         return tcg_constant_i64(lit);
201     } else if (likely(reg < 31)) {
202         return ctx->ir[reg];
203     } else {
204         return load_zero(ctx);
205     }
206 }
207 
208 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
209 {
210     if (likely(reg < 31)) {
211         return ctx->ir[reg];
212     } else {
213         return dest_sink(ctx);
214     }
215 }
216 
217 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
218 {
219     if (likely(reg < 31)) {
220         return cpu_fir[reg];
221     } else {
222         return load_zero(ctx);
223     }
224 }
225 
226 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
227 {
228     if (likely(reg < 31)) {
229         return cpu_fir[reg];
230     } else {
231         return dest_sink(ctx);
232     }
233 }
234 
235 static int get_flag_ofs(unsigned shift)
236 {
237     int ofs = offsetof(CPUAlphaState, flags);
238 #if HOST_BIG_ENDIAN
239     ofs += 3 - (shift / 8);
240 #else
241     ofs += shift / 8;
242 #endif
243     return ofs;
244 }
245 
246 static void ld_flag_byte(TCGv val, unsigned shift)
247 {
248     tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
249 }
250 
251 static void st_flag_byte(TCGv val, unsigned shift)
252 {
253     tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
254 }
255 
256 static void gen_excp_1(int exception, int error_code)
257 {
258     TCGv_i32 tmp1, tmp2;
259 
260     tmp1 = tcg_constant_i32(exception);
261     tmp2 = tcg_constant_i32(error_code);
262     gen_helper_excp(cpu_env, tmp1, tmp2);
263 }
264 
265 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
266 {
267     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
268     gen_excp_1(exception, error_code);
269     return DISAS_NORETURN;
270 }
271 
272 static inline DisasJumpType gen_invalid(DisasContext *ctx)
273 {
274     return gen_excp(ctx, EXCP_OPCDEC, 0);
275 }
276 
277 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
278 {
279     TCGv_i32 tmp32 = tcg_temp_new_i32();
280     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
281     gen_helper_memory_to_f(dest, tmp32);
282 }
283 
284 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
285 {
286     TCGv tmp = tcg_temp_new();
287     tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
288     gen_helper_memory_to_g(dest, tmp);
289 }
290 
291 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
292 {
293     TCGv_i32 tmp32 = tcg_temp_new_i32();
294     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
295     gen_helper_memory_to_s(dest, tmp32);
296 }
297 
298 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
299 {
300     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
301 }
302 
303 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
304                         void (*func)(DisasContext *, TCGv, TCGv))
305 {
306     /* Loads to $f31 are prefetches, which we can treat as nops. */
307     if (likely(ra != 31)) {
308         TCGv addr = tcg_temp_new();
309         tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
310         func(ctx, cpu_fir[ra], addr);
311     }
312 }
313 
314 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
315                          MemOp op, bool clear, bool locked)
316 {
317     TCGv addr, dest;
318 
319     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
320        prefetches, which we can treat as nops.  No worries about
321        missed exceptions here.  */
322     if (unlikely(ra == 31)) {
323         return;
324     }
325 
326     addr = tcg_temp_new();
327     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
328     if (clear) {
329         tcg_gen_andi_i64(addr, addr, ~0x7);
330     } else if (!locked) {
331         op |= UNALIGN(ctx);
332     }
333 
334     dest = ctx->ir[ra];
335     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
336 
337     if (locked) {
338         tcg_gen_mov_i64(cpu_lock_addr, addr);
339         tcg_gen_mov_i64(cpu_lock_value, dest);
340     }
341 }
342 
343 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
344 {
345     TCGv_i32 tmp32 = tcg_temp_new_i32();
346     gen_helper_f_to_memory(tmp32, addr);
347     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
348 }
349 
350 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
351 {
352     TCGv tmp = tcg_temp_new();
353     gen_helper_g_to_memory(tmp, src);
354     tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
355 }
356 
357 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
358 {
359     TCGv_i32 tmp32 = tcg_temp_new_i32();
360     gen_helper_s_to_memory(tmp32, src);
361     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
362 }
363 
364 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
365 {
366     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
367 }
368 
369 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
370                          void (*func)(DisasContext *, TCGv, TCGv))
371 {
372     TCGv addr = tcg_temp_new();
373     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
374     func(ctx, load_fpr(ctx, ra), addr);
375 }
376 
377 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
378                           MemOp op, bool clear)
379 {
380     TCGv addr, src;
381 
382     addr = tcg_temp_new();
383     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
384     if (clear) {
385         tcg_gen_andi_i64(addr, addr, ~0x7);
386     } else {
387         op |= UNALIGN(ctx);
388     }
389 
390     src = load_gpr(ctx, ra);
391     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
392 }
393 
394 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
395                                            int32_t disp16, int mem_idx,
396                                            MemOp op)
397 {
398     TCGLabel *lab_fail, *lab_done;
399     TCGv addr, val;
400 
401     addr = tcg_temp_new_i64();
402     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
403     free_context_temps(ctx);
404 
405     lab_fail = gen_new_label();
406     lab_done = gen_new_label();
407     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
408 
409     val = tcg_temp_new_i64();
410     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
411                                load_gpr(ctx, ra), mem_idx, op);
412     free_context_temps(ctx);
413 
414     if (ra != 31) {
415         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
416     }
417     tcg_gen_br(lab_done);
418 
419     gen_set_label(lab_fail);
420     if (ra != 31) {
421         tcg_gen_movi_i64(ctx->ir[ra], 0);
422     }
423 
424     gen_set_label(lab_done);
425     tcg_gen_movi_i64(cpu_lock_addr, -1);
426     return DISAS_NEXT;
427 }
428 
429 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
430 {
431     return translator_use_goto_tb(&ctx->base, dest);
432 }
433 
434 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
435 {
436     uint64_t dest = ctx->base.pc_next + (disp << 2);
437 
438     if (ra != 31) {
439         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
440     }
441 
442     /* Notice branch-to-next; used to initialize RA with the PC.  */
443     if (disp == 0) {
444         return 0;
445     } else if (use_goto_tb(ctx, dest)) {
446         tcg_gen_goto_tb(0);
447         tcg_gen_movi_i64(cpu_pc, dest);
448         tcg_gen_exit_tb(ctx->base.tb, 0);
449         return DISAS_NORETURN;
450     } else {
451         tcg_gen_movi_i64(cpu_pc, dest);
452         return DISAS_PC_UPDATED;
453     }
454 }
455 
456 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
457                                         TCGv cmp, int32_t disp)
458 {
459     uint64_t dest = ctx->base.pc_next + (disp << 2);
460     TCGLabel *lab_true = gen_new_label();
461 
462     if (use_goto_tb(ctx, dest)) {
463         tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
464 
465         tcg_gen_goto_tb(0);
466         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
467         tcg_gen_exit_tb(ctx->base.tb, 0);
468 
469         gen_set_label(lab_true);
470         tcg_gen_goto_tb(1);
471         tcg_gen_movi_i64(cpu_pc, dest);
472         tcg_gen_exit_tb(ctx->base.tb, 1);
473 
474         return DISAS_NORETURN;
475     } else {
476         TCGv_i64 z = load_zero(ctx);
477         TCGv_i64 d = tcg_constant_i64(dest);
478         TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
479 
480         tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
481         return DISAS_PC_UPDATED;
482     }
483 }
484 
485 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
486                                int32_t disp, int mask)
487 {
488     if (mask) {
489         TCGv tmp = tcg_temp_new();
490         DisasJumpType ret;
491 
492         tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
493         ret = gen_bcond_internal(ctx, cond, tmp, disp);
494         return ret;
495     }
496     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
497 }
498 
499 /* Fold -0.0 for comparison with COND.  */
500 
501 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
502 {
503     uint64_t mzero = 1ull << 63;
504 
505     switch (cond) {
506     case TCG_COND_LE:
507     case TCG_COND_GT:
508         /* For <= or >, the -0.0 value directly compares the way we want.  */
509         tcg_gen_mov_i64(dest, src);
510         break;
511 
512     case TCG_COND_EQ:
513     case TCG_COND_NE:
514         /* For == or !=, we can simply mask off the sign bit and compare.  */
515         tcg_gen_andi_i64(dest, src, mzero - 1);
516         break;
517 
518     case TCG_COND_GE:
519     case TCG_COND_LT:
520         /* For >= or <, map -0.0 to +0.0. */
521         tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
522                             src, tcg_constant_i64(0));
523         break;
524 
525     default:
526         abort();
527     }
528 }
529 
530 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
531                                 int32_t disp)
532 {
533     TCGv cmp_tmp = tcg_temp_new();
534     DisasJumpType ret;
535 
536     gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
537     ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
538     return ret;
539 }
540 
541 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
542 {
543     TCGv_i64 va, vb, z;
544 
545     z = load_zero(ctx);
546     vb = load_fpr(ctx, rb);
547     va = tcg_temp_new();
548     gen_fold_mzero(cond, va, load_fpr(ctx, ra));
549 
550     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
551 }
552 
553 #define QUAL_RM_N       0x080   /* Round mode nearest even */
554 #define QUAL_RM_C       0x000   /* Round mode chopped */
555 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
556 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
557 #define QUAL_RM_MASK    0x0c0
558 
559 #define QUAL_U          0x100   /* Underflow enable (fp output) */
560 #define QUAL_V          0x100   /* Overflow enable (int output) */
561 #define QUAL_S          0x400   /* Software completion enable */
562 #define QUAL_I          0x200   /* Inexact detection enable */
563 
564 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
565 {
566     TCGv_i32 tmp;
567 
568     fn11 &= QUAL_RM_MASK;
569     if (fn11 == ctx->tb_rm) {
570         return;
571     }
572     ctx->tb_rm = fn11;
573 
574     tmp = tcg_temp_new_i32();
575     switch (fn11) {
576     case QUAL_RM_N:
577         tcg_gen_movi_i32(tmp, float_round_nearest_even);
578         break;
579     case QUAL_RM_C:
580         tcg_gen_movi_i32(tmp, float_round_to_zero);
581         break;
582     case QUAL_RM_M:
583         tcg_gen_movi_i32(tmp, float_round_down);
584         break;
585     case QUAL_RM_D:
586         tcg_gen_ld8u_i32(tmp, cpu_env,
587                          offsetof(CPUAlphaState, fpcr_dyn_round));
588         break;
589     }
590 
591 #if defined(CONFIG_SOFTFLOAT_INLINE)
592     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
593        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
594        sets the one field.  */
595     tcg_gen_st8_i32(tmp, cpu_env,
596                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
597 #else
598     gen_helper_setroundmode(tmp);
599 #endif
600 }
601 
602 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
603 {
604     TCGv_i32 tmp;
605 
606     fn11 &= QUAL_U;
607     if (fn11 == ctx->tb_ftz) {
608         return;
609     }
610     ctx->tb_ftz = fn11;
611 
612     tmp = tcg_temp_new_i32();
613     if (fn11) {
614         /* Underflow is enabled, use the FPCR setting.  */
615         tcg_gen_ld8u_i32(tmp, cpu_env,
616                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
617     } else {
618         /* Underflow is disabled, force flush-to-zero.  */
619         tcg_gen_movi_i32(tmp, 1);
620     }
621 
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623     tcg_gen_st8_i32(tmp, cpu_env,
624                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
625 #else
626     gen_helper_setflushzero(tmp);
627 #endif
628 }
629 
630 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
631 {
632     TCGv val;
633 
634     if (unlikely(reg == 31)) {
635         val = load_zero(ctx);
636     } else {
637         val = cpu_fir[reg];
638         if ((fn11 & QUAL_S) == 0) {
639             if (is_cmp) {
640                 gen_helper_ieee_input_cmp(cpu_env, val);
641             } else {
642                 gen_helper_ieee_input(cpu_env, val);
643             }
644         } else {
645 #ifndef CONFIG_USER_ONLY
646             /* In system mode, raise exceptions for denormals like real
647                hardware.  In user mode, proceed as if the OS completion
648                handler is handling the denormal as per spec.  */
649             gen_helper_ieee_input_s(cpu_env, val);
650 #endif
651         }
652     }
653     return val;
654 }
655 
656 static void gen_fp_exc_raise(int rc, int fn11)
657 {
658     /* ??? We ought to be able to do something with imprecise exceptions.
659        E.g. notice we're still in the trap shadow of something within the
660        TB and do not generate the code to signal the exception; end the TB
661        when an exception is forced to arrive, either by consumption of a
662        register value or TRAPB or EXCB.  */
663     TCGv_i32 reg, ign;
664     uint32_t ignore = 0;
665 
666     if (!(fn11 & QUAL_U)) {
667         /* Note that QUAL_U == QUAL_V, so ignore either.  */
668         ignore |= FPCR_UNF | FPCR_IOV;
669     }
670     if (!(fn11 & QUAL_I)) {
671         ignore |= FPCR_INE;
672     }
673     ign = tcg_constant_i32(ignore);
674 
675     /* ??? Pass in the regno of the destination so that the helper can
676        set EXC_MASK, which contains a bitmask of destination registers
677        that have caused arithmetic traps.  A simple userspace emulation
678        does not require this.  We do need it for a guest kernel's entArith,
679        or if we were to do something clever with imprecise exceptions.  */
680     reg = tcg_constant_i32(rc + 32);
681     if (fn11 & QUAL_S) {
682         gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
683     } else {
684         gen_helper_fp_exc_raise(cpu_env, ign, reg);
685     }
686 }
687 
688 static void gen_cvtlq(TCGv vc, TCGv vb)
689 {
690     TCGv tmp = tcg_temp_new();
691 
692     /* The arithmetic right shift here, plus the sign-extended mask below
693        yields a sign-extended result without an explicit ext32s_i64.  */
694     tcg_gen_shri_i64(tmp, vb, 29);
695     tcg_gen_sari_i64(vc, vb, 32);
696     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
697 }
698 
699 static void gen_ieee_arith2(DisasContext *ctx,
700                             void (*helper)(TCGv, TCGv_ptr, TCGv),
701                             int rb, int rc, int fn11)
702 {
703     TCGv vb;
704 
705     gen_qual_roundmode(ctx, fn11);
706     gen_qual_flushzero(ctx, fn11);
707 
708     vb = gen_ieee_input(ctx, rb, fn11, 0);
709     helper(dest_fpr(ctx, rc), cpu_env, vb);
710 
711     gen_fp_exc_raise(rc, fn11);
712 }
713 
714 #define IEEE_ARITH2(name)                                       \
715 static inline void glue(gen_, name)(DisasContext *ctx,          \
716                                     int rb, int rc, int fn11)   \
717 {                                                               \
718     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
719 }
720 IEEE_ARITH2(sqrts)
721 IEEE_ARITH2(sqrtt)
722 IEEE_ARITH2(cvtst)
723 IEEE_ARITH2(cvtts)
724 
725 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
726 {
727     TCGv vb, vc;
728 
729     /* No need to set flushzero, since we have an integer output.  */
730     vb = gen_ieee_input(ctx, rb, fn11, 0);
731     vc = dest_fpr(ctx, rc);
732 
733     /* Almost all integer conversions use cropped rounding;
734        special case that.  */
735     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
736         gen_helper_cvttq_c(vc, cpu_env, vb);
737     } else {
738         gen_qual_roundmode(ctx, fn11);
739         gen_helper_cvttq(vc, cpu_env, vb);
740     }
741     gen_fp_exc_raise(rc, fn11);
742 }
743 
744 static void gen_ieee_intcvt(DisasContext *ctx,
745                             void (*helper)(TCGv, TCGv_ptr, TCGv),
746                             int rb, int rc, int fn11)
747 {
748     TCGv vb, vc;
749 
750     gen_qual_roundmode(ctx, fn11);
751     vb = load_fpr(ctx, rb);
752     vc = dest_fpr(ctx, rc);
753 
754     /* The only exception that can be raised by integer conversion
755        is inexact.  Thus we only need to worry about exceptions when
756        inexact handling is requested.  */
757     if (fn11 & QUAL_I) {
758         helper(vc, cpu_env, vb);
759         gen_fp_exc_raise(rc, fn11);
760     } else {
761         helper(vc, cpu_env, vb);
762     }
763 }
764 
765 #define IEEE_INTCVT(name)                                       \
766 static inline void glue(gen_, name)(DisasContext *ctx,          \
767                                     int rb, int rc, int fn11)   \
768 {                                                               \
769     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
770 }
771 IEEE_INTCVT(cvtqs)
772 IEEE_INTCVT(cvtqt)
773 
774 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
775 {
776     TCGv vmask = tcg_constant_i64(mask);
777     TCGv tmp = tcg_temp_new_i64();
778 
779     if (inv_a) {
780         tcg_gen_andc_i64(tmp, vmask, va);
781     } else {
782         tcg_gen_and_i64(tmp, va, vmask);
783     }
784 
785     tcg_gen_andc_i64(vc, vb, vmask);
786     tcg_gen_or_i64(vc, vc, tmp);
787 }
788 
789 static void gen_ieee_arith3(DisasContext *ctx,
790                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
791                             int ra, int rb, int rc, int fn11)
792 {
793     TCGv va, vb, vc;
794 
795     gen_qual_roundmode(ctx, fn11);
796     gen_qual_flushzero(ctx, fn11);
797 
798     va = gen_ieee_input(ctx, ra, fn11, 0);
799     vb = gen_ieee_input(ctx, rb, fn11, 0);
800     vc = dest_fpr(ctx, rc);
801     helper(vc, cpu_env, va, vb);
802 
803     gen_fp_exc_raise(rc, fn11);
804 }
805 
806 #define IEEE_ARITH3(name)                                               \
807 static inline void glue(gen_, name)(DisasContext *ctx,                  \
808                                     int ra, int rb, int rc, int fn11)   \
809 {                                                                       \
810     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
811 }
812 IEEE_ARITH3(adds)
813 IEEE_ARITH3(subs)
814 IEEE_ARITH3(muls)
815 IEEE_ARITH3(divs)
816 IEEE_ARITH3(addt)
817 IEEE_ARITH3(subt)
818 IEEE_ARITH3(mult)
819 IEEE_ARITH3(divt)
820 
821 static void gen_ieee_compare(DisasContext *ctx,
822                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
823                              int ra, int rb, int rc, int fn11)
824 {
825     TCGv va, vb, vc;
826 
827     va = gen_ieee_input(ctx, ra, fn11, 1);
828     vb = gen_ieee_input(ctx, rb, fn11, 1);
829     vc = dest_fpr(ctx, rc);
830     helper(vc, cpu_env, va, vb);
831 
832     gen_fp_exc_raise(rc, fn11);
833 }
834 
835 #define IEEE_CMP3(name)                                                 \
836 static inline void glue(gen_, name)(DisasContext *ctx,                  \
837                                     int ra, int rb, int rc, int fn11)   \
838 {                                                                       \
839     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
840 }
841 IEEE_CMP3(cmptun)
842 IEEE_CMP3(cmpteq)
843 IEEE_CMP3(cmptlt)
844 IEEE_CMP3(cmptle)
845 
846 static inline uint64_t zapnot_mask(uint8_t lit)
847 {
848     uint64_t mask = 0;
849     int i;
850 
851     for (i = 0; i < 8; ++i) {
852         if ((lit >> i) & 1) {
853             mask |= 0xffull << (i * 8);
854         }
855     }
856     return mask;
857 }
858 
859 /* Implement zapnot with an immediate operand, which expands to some
860    form of immediate AND.  This is a basic building block in the
861    definition of many of the other byte manipulation instructions.  */
862 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
863 {
864     switch (lit) {
865     case 0x00:
866         tcg_gen_movi_i64(dest, 0);
867         break;
868     case 0x01:
869         tcg_gen_ext8u_i64(dest, src);
870         break;
871     case 0x03:
872         tcg_gen_ext16u_i64(dest, src);
873         break;
874     case 0x0f:
875         tcg_gen_ext32u_i64(dest, src);
876         break;
877     case 0xff:
878         tcg_gen_mov_i64(dest, src);
879         break;
880     default:
881         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
882         break;
883     }
884 }
885 
886 /* EXTWH, EXTLH, EXTQH */
887 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
888                       uint8_t lit, uint8_t byte_mask)
889 {
890     if (islit) {
891         int pos = (64 - lit * 8) & 0x3f;
892         int len = cto32(byte_mask) * 8;
893         if (pos < len) {
894             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
895         } else {
896             tcg_gen_movi_i64(vc, 0);
897         }
898     } else {
899         TCGv tmp = tcg_temp_new();
900         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
901         tcg_gen_neg_i64(tmp, tmp);
902         tcg_gen_andi_i64(tmp, tmp, 0x3f);
903         tcg_gen_shl_i64(vc, va, tmp);
904     }
905     gen_zapnoti(vc, vc, byte_mask);
906 }
907 
908 /* EXTBL, EXTWL, EXTLL, EXTQL */
909 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
910                       uint8_t lit, uint8_t byte_mask)
911 {
912     if (islit) {
913         int pos = (lit & 7) * 8;
914         int len = cto32(byte_mask) * 8;
915         if (pos + len >= 64) {
916             len = 64 - pos;
917         }
918         tcg_gen_extract_i64(vc, va, pos, len);
919     } else {
920         TCGv tmp = tcg_temp_new();
921         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
922         tcg_gen_shli_i64(tmp, tmp, 3);
923         tcg_gen_shr_i64(vc, va, tmp);
924         gen_zapnoti(vc, vc, byte_mask);
925     }
926 }
927 
928 /* INSWH, INSLH, INSQH */
929 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
930                       uint8_t lit, uint8_t byte_mask)
931 {
932     if (islit) {
933         int pos = 64 - (lit & 7) * 8;
934         int len = cto32(byte_mask) * 8;
935         if (pos < len) {
936             tcg_gen_extract_i64(vc, va, pos, len - pos);
937         } else {
938             tcg_gen_movi_i64(vc, 0);
939         }
940     } else {
941         TCGv tmp = tcg_temp_new();
942         TCGv shift = tcg_temp_new();
943 
944         /* The instruction description has us left-shift the byte mask
945            and extract bits <15:8> and apply that zap at the end.  This
946            is equivalent to simply performing the zap first and shifting
947            afterward.  */
948         gen_zapnoti(tmp, va, byte_mask);
949 
950         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
951            portably by splitting the shift into two parts: shift_count-1 and 1.
952            Arrange for the -1 by using ones-complement instead of
953            twos-complement in the negation: ~(B * 8) & 63.  */
954 
955         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
956         tcg_gen_not_i64(shift, shift);
957         tcg_gen_andi_i64(shift, shift, 0x3f);
958 
959         tcg_gen_shr_i64(vc, tmp, shift);
960         tcg_gen_shri_i64(vc, vc, 1);
961     }
962 }
963 
964 /* INSBL, INSWL, INSLL, INSQL */
965 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
966                       uint8_t lit, uint8_t byte_mask)
967 {
968     if (islit) {
969         int pos = (lit & 7) * 8;
970         int len = cto32(byte_mask) * 8;
971         if (pos + len > 64) {
972             len = 64 - pos;
973         }
974         tcg_gen_deposit_z_i64(vc, va, pos, len);
975     } else {
976         TCGv tmp = tcg_temp_new();
977         TCGv shift = tcg_temp_new();
978 
979         /* The instruction description has us left-shift the byte mask
980            and extract bits <15:8> and apply that zap at the end.  This
981            is equivalent to simply performing the zap first and shifting
982            afterward.  */
983         gen_zapnoti(tmp, va, byte_mask);
984 
985         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
986         tcg_gen_shli_i64(shift, shift, 3);
987         tcg_gen_shl_i64(vc, tmp, shift);
988     }
989 }
990 
991 /* MSKWH, MSKLH, MSKQH */
992 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
993                       uint8_t lit, uint8_t byte_mask)
994 {
995     if (islit) {
996         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
997     } else {
998         TCGv shift = tcg_temp_new();
999         TCGv mask = tcg_temp_new();
1000 
1001         /* The instruction description is as above, where the byte_mask
1002            is shifted left, and then we extract bits <15:8>.  This can be
1003            emulated with a right-shift on the expanded byte mask.  This
1004            requires extra care because for an input <2:0> == 0 we need a
1005            shift of 64 bits in order to generate a zero.  This is done by
1006            splitting the shift into two parts, the variable shift - 1
1007            followed by a constant 1 shift.  The code we expand below is
1008            equivalent to ~(B * 8) & 63.  */
1009 
1010         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1011         tcg_gen_not_i64(shift, shift);
1012         tcg_gen_andi_i64(shift, shift, 0x3f);
1013         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1014         tcg_gen_shr_i64(mask, mask, shift);
1015         tcg_gen_shri_i64(mask, mask, 1);
1016 
1017         tcg_gen_andc_i64(vc, va, mask);
1018     }
1019 }
1020 
1021 /* MSKBL, MSKWL, MSKLL, MSKQL */
1022 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1023                       uint8_t lit, uint8_t byte_mask)
1024 {
1025     if (islit) {
1026         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1027     } else {
1028         TCGv shift = tcg_temp_new();
1029         TCGv mask = tcg_temp_new();
1030 
1031         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1032         tcg_gen_shli_i64(shift, shift, 3);
1033         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1034         tcg_gen_shl_i64(mask, mask, shift);
1035 
1036         tcg_gen_andc_i64(vc, va, mask);
1037     }
1038 }
1039 
1040 static void gen_rx(DisasContext *ctx, int ra, int set)
1041 {
1042     if (ra != 31) {
1043         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1044     }
1045 
1046     st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1047 }
1048 
1049 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1050 {
1051     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1052        to internal cpu registers.  */
1053 
1054     /* Unprivileged PAL call */
1055     if (palcode >= 0x80 && palcode < 0xC0) {
1056         switch (palcode) {
1057         case 0x86:
1058             /* IMB */
1059             /* No-op inside QEMU.  */
1060             break;
1061         case 0x9E:
1062             /* RDUNIQUE */
1063             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1064                            offsetof(CPUAlphaState, unique));
1065             break;
1066         case 0x9F:
1067             /* WRUNIQUE */
1068             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1069                            offsetof(CPUAlphaState, unique));
1070             break;
1071         default:
1072             palcode &= 0xbf;
1073             goto do_call_pal;
1074         }
1075         return DISAS_NEXT;
1076     }
1077 
1078 #ifndef CONFIG_USER_ONLY
1079     /* Privileged PAL code */
1080     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1081         switch (palcode) {
1082         case 0x01:
1083             /* CFLUSH */
1084             /* No-op inside QEMU.  */
1085             break;
1086         case 0x02:
1087             /* DRAINA */
1088             /* No-op inside QEMU.  */
1089             break;
1090         case 0x2D:
1091             /* WRVPTPTR */
1092             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1093                            offsetof(CPUAlphaState, vptptr));
1094             break;
1095         case 0x31:
1096             /* WRVAL */
1097             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1098                            offsetof(CPUAlphaState, sysval));
1099             break;
1100         case 0x32:
1101             /* RDVAL */
1102             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1103                            offsetof(CPUAlphaState, sysval));
1104             break;
1105 
1106         case 0x35:
1107             /* SWPIPL */
1108             /* Note that we already know we're in kernel mode, so we know
1109                that PS only contains the 3 IPL bits.  */
1110             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1111 
1112             /* But make sure and store only the 3 IPL bits from the user.  */
1113             {
1114                 TCGv tmp = tcg_temp_new();
1115                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1116                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1117             }
1118 
1119             /* Allow interrupts to be recognized right away.  */
1120             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1121             return DISAS_PC_UPDATED_NOCHAIN;
1122 
1123         case 0x36:
1124             /* RDPS */
1125             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1126             break;
1127 
1128         case 0x38:
1129             /* WRUSP */
1130             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1131                            offsetof(CPUAlphaState, usp));
1132             break;
1133         case 0x3A:
1134             /* RDUSP */
1135             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1136                            offsetof(CPUAlphaState, usp));
1137             break;
1138         case 0x3C:
1139             /* WHAMI */
1140             tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1141                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1142             break;
1143 
1144         case 0x3E:
1145             /* WTINT */
1146             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1147                            -offsetof(AlphaCPU, env) +
1148                            offsetof(CPUState, halted));
1149             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1150             return gen_excp(ctx, EXCP_HALTED, 0);
1151 
1152         default:
1153             palcode &= 0x3f;
1154             goto do_call_pal;
1155         }
1156         return DISAS_NEXT;
1157     }
1158 #endif
1159     return gen_invalid(ctx);
1160 
1161  do_call_pal:
1162 #ifdef CONFIG_USER_ONLY
1163     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1164 #else
1165     {
1166         TCGv tmp = tcg_temp_new();
1167         uint64_t exc_addr = ctx->base.pc_next;
1168         uint64_t entry = ctx->palbr;
1169 
1170         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1171             exc_addr |= 1;
1172         } else {
1173             tcg_gen_movi_i64(tmp, 1);
1174             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1175         }
1176 
1177         tcg_gen_movi_i64(tmp, exc_addr);
1178         tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1179 
1180         entry += (palcode & 0x80
1181                   ? 0x2000 + (palcode - 0x80) * 64
1182                   : 0x1000 + palcode * 64);
1183 
1184         tcg_gen_movi_i64(cpu_pc, entry);
1185         return DISAS_PC_UPDATED;
1186     }
1187 #endif
1188 }
1189 
1190 #ifndef CONFIG_USER_ONLY
1191 
1192 #define PR_LONG         0x200000
1193 
1194 static int cpu_pr_data(int pr)
1195 {
1196     switch (pr) {
1197     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1198     case  3: return offsetof(CPUAlphaState, trap_arg0);
1199     case  4: return offsetof(CPUAlphaState, trap_arg1);
1200     case  5: return offsetof(CPUAlphaState, trap_arg2);
1201     case  6: return offsetof(CPUAlphaState, exc_addr);
1202     case  7: return offsetof(CPUAlphaState, palbr);
1203     case  8: return offsetof(CPUAlphaState, ptbr);
1204     case  9: return offsetof(CPUAlphaState, vptptr);
1205     case 10: return offsetof(CPUAlphaState, unique);
1206     case 11: return offsetof(CPUAlphaState, sysval);
1207     case 12: return offsetof(CPUAlphaState, usp);
1208 
1209     case 40 ... 63:
1210         return offsetof(CPUAlphaState, scratch[pr - 40]);
1211 
1212     case 251:
1213         return offsetof(CPUAlphaState, alarm_expire);
1214     }
1215     return 0;
1216 }
1217 
1218 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1219 {
1220     void (*helper)(TCGv);
1221     int data;
1222 
1223     switch (regno) {
1224     case 32 ... 39:
1225         /* Accessing the "non-shadow" general registers.  */
1226         regno = regno == 39 ? 25 : regno - 32 + 8;
1227         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1228         break;
1229 
1230     case 250: /* WALLTIME */
1231         helper = gen_helper_get_walltime;
1232         goto do_helper;
1233     case 249: /* VMTIME */
1234         helper = gen_helper_get_vmtime;
1235     do_helper:
1236         if (translator_io_start(&ctx->base)) {
1237             helper(va);
1238             return DISAS_PC_STALE;
1239         } else {
1240             helper(va);
1241         }
1242         break;
1243 
1244     case 0: /* PS */
1245         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1246         break;
1247     case 1: /* FEN */
1248         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1249         break;
1250 
1251     default:
1252         /* The basic registers are data only, and unknown registers
1253            are read-zero, write-ignore.  */
1254         data = cpu_pr_data(regno);
1255         if (data == 0) {
1256             tcg_gen_movi_i64(va, 0);
1257         } else if (data & PR_LONG) {
1258             tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1259         } else {
1260             tcg_gen_ld_i64(va, cpu_env, data);
1261         }
1262         break;
1263     }
1264 
1265     return DISAS_NEXT;
1266 }
1267 
1268 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1269 {
1270     int data;
1271     DisasJumpType ret = DISAS_NEXT;
1272 
1273     switch (regno) {
1274     case 255:
1275         /* TBIA */
1276         gen_helper_tbia(cpu_env);
1277         break;
1278 
1279     case 254:
1280         /* TBIS */
1281         gen_helper_tbis(cpu_env, vb);
1282         break;
1283 
1284     case 253:
1285         /* WAIT */
1286         tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1287                        -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1288         return gen_excp(ctx, EXCP_HALTED, 0);
1289 
1290     case 252:
1291         /* HALT */
1292         gen_helper_halt(vb);
1293         return DISAS_PC_STALE;
1294 
1295     case 251:
1296         /* ALARM */
1297         if (translator_io_start(&ctx->base)) {
1298             ret = DISAS_PC_STALE;
1299         }
1300         gen_helper_set_alarm(cpu_env, vb);
1301         break;
1302 
1303     case 7:
1304         /* PALBR */
1305         tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1306         /* Changing the PAL base register implies un-chaining all of the TBs
1307            that ended with a CALL_PAL.  Since the base register usually only
1308            changes during boot, flushing everything works well.  */
1309         gen_helper_tb_flush(cpu_env);
1310         return DISAS_PC_STALE;
1311 
1312     case 32 ... 39:
1313         /* Accessing the "non-shadow" general registers.  */
1314         regno = regno == 39 ? 25 : regno - 32 + 8;
1315         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1316         break;
1317 
1318     case 0: /* PS */
1319         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1320         break;
1321     case 1: /* FEN */
1322         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1323         break;
1324 
1325     default:
1326         /* The basic registers are data only, and unknown registers
1327            are read-zero, write-ignore.  */
1328         data = cpu_pr_data(regno);
1329         if (data != 0) {
1330             if (data & PR_LONG) {
1331                 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1332             } else {
1333                 tcg_gen_st_i64(vb, cpu_env, data);
1334             }
1335         }
1336         break;
1337     }
1338 
1339     return ret;
1340 }
1341 #endif /* !USER_ONLY*/
1342 
1343 #define REQUIRE_NO_LIT                          \
1344     do {                                        \
1345         if (real_islit) {                       \
1346             goto invalid_opc;                   \
1347         }                                       \
1348     } while (0)
1349 
1350 #define REQUIRE_AMASK(FLAG)                     \
1351     do {                                        \
1352         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1353             goto invalid_opc;                   \
1354         }                                       \
1355     } while (0)
1356 
1357 #define REQUIRE_TB_FLAG(FLAG)                   \
1358     do {                                        \
1359         if ((ctx->tbflags & (FLAG)) == 0) {     \
1360             goto invalid_opc;                   \
1361         }                                       \
1362     } while (0)
1363 
1364 #define REQUIRE_REG_31(WHICH)                   \
1365     do {                                        \
1366         if (WHICH != 31) {                      \
1367             goto invalid_opc;                   \
1368         }                                       \
1369     } while (0)
1370 
1371 #define REQUIRE_FEN                             \
1372     do {                                        \
1373         if (!(ctx->tbflags & ENV_FLAG_FEN)) {   \
1374             goto raise_fen;                     \
1375         }                                       \
1376     } while (0)
1377 
1378 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1379 {
1380     int32_t disp21, disp16, disp12 __attribute__((unused));
1381     uint16_t fn11;
1382     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1383     bool islit, real_islit;
1384     TCGv va, vb, vc, tmp, tmp2;
1385     TCGv_i32 t32;
1386     DisasJumpType ret;
1387 
1388     /* Decode all instruction fields */
1389     opc = extract32(insn, 26, 6);
1390     ra = extract32(insn, 21, 5);
1391     rb = extract32(insn, 16, 5);
1392     rc = extract32(insn, 0, 5);
1393     real_islit = islit = extract32(insn, 12, 1);
1394     lit = extract32(insn, 13, 8);
1395 
1396     disp21 = sextract32(insn, 0, 21);
1397     disp16 = sextract32(insn, 0, 16);
1398     disp12 = sextract32(insn, 0, 12);
1399 
1400     fn11 = extract32(insn, 5, 11);
1401     fpfn = extract32(insn, 5, 6);
1402     fn7 = extract32(insn, 5, 7);
1403 
1404     if (rb == 31 && !islit) {
1405         islit = true;
1406         lit = 0;
1407     }
1408 
1409     ret = DISAS_NEXT;
1410     switch (opc) {
1411     case 0x00:
1412         /* CALL_PAL */
1413         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1414         break;
1415     case 0x01:
1416         /* OPC01 */
1417         goto invalid_opc;
1418     case 0x02:
1419         /* OPC02 */
1420         goto invalid_opc;
1421     case 0x03:
1422         /* OPC03 */
1423         goto invalid_opc;
1424     case 0x04:
1425         /* OPC04 */
1426         goto invalid_opc;
1427     case 0x05:
1428         /* OPC05 */
1429         goto invalid_opc;
1430     case 0x06:
1431         /* OPC06 */
1432         goto invalid_opc;
1433     case 0x07:
1434         /* OPC07 */
1435         goto invalid_opc;
1436 
1437     case 0x09:
1438         /* LDAH */
1439         disp16 = (uint32_t)disp16 << 16;
1440         /* fall through */
1441     case 0x08:
1442         /* LDA */
1443         va = dest_gpr(ctx, ra);
1444         /* It's worth special-casing immediate loads.  */
1445         if (rb == 31) {
1446             tcg_gen_movi_i64(va, disp16);
1447         } else {
1448             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1449         }
1450         break;
1451 
1452     case 0x0A:
1453         /* LDBU */
1454         REQUIRE_AMASK(BWX);
1455         gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
1456         break;
1457     case 0x0B:
1458         /* LDQ_U */
1459         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
1460         break;
1461     case 0x0C:
1462         /* LDWU */
1463         REQUIRE_AMASK(BWX);
1464         gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
1465         break;
1466     case 0x0D:
1467         /* STW */
1468         REQUIRE_AMASK(BWX);
1469         gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
1470         break;
1471     case 0x0E:
1472         /* STB */
1473         REQUIRE_AMASK(BWX);
1474         gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
1475         break;
1476     case 0x0F:
1477         /* STQ_U */
1478         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
1479         break;
1480 
1481     case 0x10:
1482         vc = dest_gpr(ctx, rc);
1483         vb = load_gpr_lit(ctx, rb, lit, islit);
1484 
1485         if (ra == 31) {
1486             if (fn7 == 0x00) {
1487                 /* Special case ADDL as SEXTL.  */
1488                 tcg_gen_ext32s_i64(vc, vb);
1489                 break;
1490             }
1491             if (fn7 == 0x29) {
1492                 /* Special case SUBQ as NEGQ.  */
1493                 tcg_gen_neg_i64(vc, vb);
1494                 break;
1495             }
1496         }
1497 
1498         va = load_gpr(ctx, ra);
1499         switch (fn7) {
1500         case 0x00:
1501             /* ADDL */
1502             tcg_gen_add_i64(vc, va, vb);
1503             tcg_gen_ext32s_i64(vc, vc);
1504             break;
1505         case 0x02:
1506             /* S4ADDL */
1507             tmp = tcg_temp_new();
1508             tcg_gen_shli_i64(tmp, va, 2);
1509             tcg_gen_add_i64(tmp, tmp, vb);
1510             tcg_gen_ext32s_i64(vc, tmp);
1511             break;
1512         case 0x09:
1513             /* SUBL */
1514             tcg_gen_sub_i64(vc, va, vb);
1515             tcg_gen_ext32s_i64(vc, vc);
1516             break;
1517         case 0x0B:
1518             /* S4SUBL */
1519             tmp = tcg_temp_new();
1520             tcg_gen_shli_i64(tmp, va, 2);
1521             tcg_gen_sub_i64(tmp, tmp, vb);
1522             tcg_gen_ext32s_i64(vc, tmp);
1523             break;
1524         case 0x0F:
1525             /* CMPBGE */
1526             if (ra == 31) {
1527                 /* Special case 0 >= X as X == 0.  */
1528                 gen_helper_cmpbe0(vc, vb);
1529             } else {
1530                 gen_helper_cmpbge(vc, va, vb);
1531             }
1532             break;
1533         case 0x12:
1534             /* S8ADDL */
1535             tmp = tcg_temp_new();
1536             tcg_gen_shli_i64(tmp, va, 3);
1537             tcg_gen_add_i64(tmp, tmp, vb);
1538             tcg_gen_ext32s_i64(vc, tmp);
1539             break;
1540         case 0x1B:
1541             /* S8SUBL */
1542             tmp = tcg_temp_new();
1543             tcg_gen_shli_i64(tmp, va, 3);
1544             tcg_gen_sub_i64(tmp, tmp, vb);
1545             tcg_gen_ext32s_i64(vc, tmp);
1546             break;
1547         case 0x1D:
1548             /* CMPULT */
1549             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1550             break;
1551         case 0x20:
1552             /* ADDQ */
1553             tcg_gen_add_i64(vc, va, vb);
1554             break;
1555         case 0x22:
1556             /* S4ADDQ */
1557             tmp = tcg_temp_new();
1558             tcg_gen_shli_i64(tmp, va, 2);
1559             tcg_gen_add_i64(vc, tmp, vb);
1560             break;
1561         case 0x29:
1562             /* SUBQ */
1563             tcg_gen_sub_i64(vc, va, vb);
1564             break;
1565         case 0x2B:
1566             /* S4SUBQ */
1567             tmp = tcg_temp_new();
1568             tcg_gen_shli_i64(tmp, va, 2);
1569             tcg_gen_sub_i64(vc, tmp, vb);
1570             break;
1571         case 0x2D:
1572             /* CMPEQ */
1573             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1574             break;
1575         case 0x32:
1576             /* S8ADDQ */
1577             tmp = tcg_temp_new();
1578             tcg_gen_shli_i64(tmp, va, 3);
1579             tcg_gen_add_i64(vc, tmp, vb);
1580             break;
1581         case 0x3B:
1582             /* S8SUBQ */
1583             tmp = tcg_temp_new();
1584             tcg_gen_shli_i64(tmp, va, 3);
1585             tcg_gen_sub_i64(vc, tmp, vb);
1586             break;
1587         case 0x3D:
1588             /* CMPULE */
1589             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1590             break;
1591         case 0x40:
1592             /* ADDL/V */
1593             tmp = tcg_temp_new();
1594             tcg_gen_ext32s_i64(tmp, va);
1595             tcg_gen_ext32s_i64(vc, vb);
1596             tcg_gen_add_i64(tmp, tmp, vc);
1597             tcg_gen_ext32s_i64(vc, tmp);
1598             gen_helper_check_overflow(cpu_env, vc, tmp);
1599             break;
1600         case 0x49:
1601             /* SUBL/V */
1602             tmp = tcg_temp_new();
1603             tcg_gen_ext32s_i64(tmp, va);
1604             tcg_gen_ext32s_i64(vc, vb);
1605             tcg_gen_sub_i64(tmp, tmp, vc);
1606             tcg_gen_ext32s_i64(vc, tmp);
1607             gen_helper_check_overflow(cpu_env, vc, tmp);
1608             break;
1609         case 0x4D:
1610             /* CMPLT */
1611             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1612             break;
1613         case 0x60:
1614             /* ADDQ/V */
1615             tmp = tcg_temp_new();
1616             tmp2 = tcg_temp_new();
1617             tcg_gen_eqv_i64(tmp, va, vb);
1618             tcg_gen_mov_i64(tmp2, va);
1619             tcg_gen_add_i64(vc, va, vb);
1620             tcg_gen_xor_i64(tmp2, tmp2, vc);
1621             tcg_gen_and_i64(tmp, tmp, tmp2);
1622             tcg_gen_shri_i64(tmp, tmp, 63);
1623             tcg_gen_movi_i64(tmp2, 0);
1624             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1625             break;
1626         case 0x69:
1627             /* SUBQ/V */
1628             tmp = tcg_temp_new();
1629             tmp2 = tcg_temp_new();
1630             tcg_gen_xor_i64(tmp, va, vb);
1631             tcg_gen_mov_i64(tmp2, va);
1632             tcg_gen_sub_i64(vc, va, vb);
1633             tcg_gen_xor_i64(tmp2, tmp2, vc);
1634             tcg_gen_and_i64(tmp, tmp, tmp2);
1635             tcg_gen_shri_i64(tmp, tmp, 63);
1636             tcg_gen_movi_i64(tmp2, 0);
1637             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1638             break;
1639         case 0x6D:
1640             /* CMPLE */
1641             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1642             break;
1643         default:
1644             goto invalid_opc;
1645         }
1646         break;
1647 
1648     case 0x11:
1649         if (fn7 == 0x20) {
1650             if (rc == 31) {
1651                 /* Special case BIS as NOP.  */
1652                 break;
1653             }
1654             if (ra == 31) {
1655                 /* Special case BIS as MOV.  */
1656                 vc = dest_gpr(ctx, rc);
1657                 if (islit) {
1658                     tcg_gen_movi_i64(vc, lit);
1659                 } else {
1660                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1661                 }
1662                 break;
1663             }
1664         }
1665 
1666         vc = dest_gpr(ctx, rc);
1667         vb = load_gpr_lit(ctx, rb, lit, islit);
1668 
1669         if (fn7 == 0x28 && ra == 31) {
1670             /* Special case ORNOT as NOT.  */
1671             tcg_gen_not_i64(vc, vb);
1672             break;
1673         }
1674 
1675         va = load_gpr(ctx, ra);
1676         switch (fn7) {
1677         case 0x00:
1678             /* AND */
1679             tcg_gen_and_i64(vc, va, vb);
1680             break;
1681         case 0x08:
1682             /* BIC */
1683             tcg_gen_andc_i64(vc, va, vb);
1684             break;
1685         case 0x14:
1686             /* CMOVLBS */
1687             tmp = tcg_temp_new();
1688             tcg_gen_andi_i64(tmp, va, 1);
1689             tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1690                                 vb, load_gpr(ctx, rc));
1691             break;
1692         case 0x16:
1693             /* CMOVLBC */
1694             tmp = tcg_temp_new();
1695             tcg_gen_andi_i64(tmp, va, 1);
1696             tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1697                                 vb, load_gpr(ctx, rc));
1698             break;
1699         case 0x20:
1700             /* BIS */
1701             tcg_gen_or_i64(vc, va, vb);
1702             break;
1703         case 0x24:
1704             /* CMOVEQ */
1705             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1706                                 vb, load_gpr(ctx, rc));
1707             break;
1708         case 0x26:
1709             /* CMOVNE */
1710             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1711                                 vb, load_gpr(ctx, rc));
1712             break;
1713         case 0x28:
1714             /* ORNOT */
1715             tcg_gen_orc_i64(vc, va, vb);
1716             break;
1717         case 0x40:
1718             /* XOR */
1719             tcg_gen_xor_i64(vc, va, vb);
1720             break;
1721         case 0x44:
1722             /* CMOVLT */
1723             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1724                                 vb, load_gpr(ctx, rc));
1725             break;
1726         case 0x46:
1727             /* CMOVGE */
1728             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1729                                 vb, load_gpr(ctx, rc));
1730             break;
1731         case 0x48:
1732             /* EQV */
1733             tcg_gen_eqv_i64(vc, va, vb);
1734             break;
1735         case 0x61:
1736             /* AMASK */
1737             REQUIRE_REG_31(ra);
1738             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1739             break;
1740         case 0x64:
1741             /* CMOVLE */
1742             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1743                                 vb, load_gpr(ctx, rc));
1744             break;
1745         case 0x66:
1746             /* CMOVGT */
1747             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1748                                 vb, load_gpr(ctx, rc));
1749             break;
1750         case 0x6C:
1751             /* IMPLVER */
1752             REQUIRE_REG_31(ra);
1753             tcg_gen_movi_i64(vc, ctx->implver);
1754             break;
1755         default:
1756             goto invalid_opc;
1757         }
1758         break;
1759 
1760     case 0x12:
1761         vc = dest_gpr(ctx, rc);
1762         va = load_gpr(ctx, ra);
1763         switch (fn7) {
1764         case 0x02:
1765             /* MSKBL */
1766             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1767             break;
1768         case 0x06:
1769             /* EXTBL */
1770             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1771             break;
1772         case 0x0B:
1773             /* INSBL */
1774             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1775             break;
1776         case 0x12:
1777             /* MSKWL */
1778             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1779             break;
1780         case 0x16:
1781             /* EXTWL */
1782             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1783             break;
1784         case 0x1B:
1785             /* INSWL */
1786             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1787             break;
1788         case 0x22:
1789             /* MSKLL */
1790             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1791             break;
1792         case 0x26:
1793             /* EXTLL */
1794             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1795             break;
1796         case 0x2B:
1797             /* INSLL */
1798             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1799             break;
1800         case 0x30:
1801             /* ZAP */
1802             if (islit) {
1803                 gen_zapnoti(vc, va, ~lit);
1804             } else {
1805                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1806             }
1807             break;
1808         case 0x31:
1809             /* ZAPNOT */
1810             if (islit) {
1811                 gen_zapnoti(vc, va, lit);
1812             } else {
1813                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1814             }
1815             break;
1816         case 0x32:
1817             /* MSKQL */
1818             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1819             break;
1820         case 0x34:
1821             /* SRL */
1822             if (islit) {
1823                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1824             } else {
1825                 tmp = tcg_temp_new();
1826                 vb = load_gpr(ctx, rb);
1827                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1828                 tcg_gen_shr_i64(vc, va, tmp);
1829             }
1830             break;
1831         case 0x36:
1832             /* EXTQL */
1833             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1834             break;
1835         case 0x39:
1836             /* SLL */
1837             if (islit) {
1838                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1839             } else {
1840                 tmp = tcg_temp_new();
1841                 vb = load_gpr(ctx, rb);
1842                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1843                 tcg_gen_shl_i64(vc, va, tmp);
1844             }
1845             break;
1846         case 0x3B:
1847             /* INSQL */
1848             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1849             break;
1850         case 0x3C:
1851             /* SRA */
1852             if (islit) {
1853                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1854             } else {
1855                 tmp = tcg_temp_new();
1856                 vb = load_gpr(ctx, rb);
1857                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1858                 tcg_gen_sar_i64(vc, va, tmp);
1859             }
1860             break;
1861         case 0x52:
1862             /* MSKWH */
1863             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1864             break;
1865         case 0x57:
1866             /* INSWH */
1867             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1868             break;
1869         case 0x5A:
1870             /* EXTWH */
1871             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1872             break;
1873         case 0x62:
1874             /* MSKLH */
1875             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1876             break;
1877         case 0x67:
1878             /* INSLH */
1879             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1880             break;
1881         case 0x6A:
1882             /* EXTLH */
1883             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1884             break;
1885         case 0x72:
1886             /* MSKQH */
1887             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1888             break;
1889         case 0x77:
1890             /* INSQH */
1891             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1892             break;
1893         case 0x7A:
1894             /* EXTQH */
1895             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1896             break;
1897         default:
1898             goto invalid_opc;
1899         }
1900         break;
1901 
1902     case 0x13:
1903         vc = dest_gpr(ctx, rc);
1904         vb = load_gpr_lit(ctx, rb, lit, islit);
1905         va = load_gpr(ctx, ra);
1906         switch (fn7) {
1907         case 0x00:
1908             /* MULL */
1909             tcg_gen_mul_i64(vc, va, vb);
1910             tcg_gen_ext32s_i64(vc, vc);
1911             break;
1912         case 0x20:
1913             /* MULQ */
1914             tcg_gen_mul_i64(vc, va, vb);
1915             break;
1916         case 0x30:
1917             /* UMULH */
1918             tmp = tcg_temp_new();
1919             tcg_gen_mulu2_i64(tmp, vc, va, vb);
1920             break;
1921         case 0x40:
1922             /* MULL/V */
1923             tmp = tcg_temp_new();
1924             tcg_gen_ext32s_i64(tmp, va);
1925             tcg_gen_ext32s_i64(vc, vb);
1926             tcg_gen_mul_i64(tmp, tmp, vc);
1927             tcg_gen_ext32s_i64(vc, tmp);
1928             gen_helper_check_overflow(cpu_env, vc, tmp);
1929             break;
1930         case 0x60:
1931             /* MULQ/V */
1932             tmp = tcg_temp_new();
1933             tmp2 = tcg_temp_new();
1934             tcg_gen_muls2_i64(vc, tmp, va, vb);
1935             tcg_gen_sari_i64(tmp2, vc, 63);
1936             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1937             break;
1938         default:
1939             goto invalid_opc;
1940         }
1941         break;
1942 
1943     case 0x14:
1944         REQUIRE_AMASK(FIX);
1945         vc = dest_fpr(ctx, rc);
1946         switch (fpfn) { /* fn11 & 0x3F */
1947         case 0x04:
1948             /* ITOFS */
1949             REQUIRE_REG_31(rb);
1950             REQUIRE_FEN;
1951             t32 = tcg_temp_new_i32();
1952             va = load_gpr(ctx, ra);
1953             tcg_gen_extrl_i64_i32(t32, va);
1954             gen_helper_memory_to_s(vc, t32);
1955             break;
1956         case 0x0A:
1957             /* SQRTF */
1958             REQUIRE_REG_31(ra);
1959             REQUIRE_FEN;
1960             vb = load_fpr(ctx, rb);
1961             gen_helper_sqrtf(vc, cpu_env, vb);
1962             break;
1963         case 0x0B:
1964             /* SQRTS */
1965             REQUIRE_REG_31(ra);
1966             REQUIRE_FEN;
1967             gen_sqrts(ctx, rb, rc, fn11);
1968             break;
1969         case 0x14:
1970             /* ITOFF */
1971             REQUIRE_REG_31(rb);
1972             REQUIRE_FEN;
1973             t32 = tcg_temp_new_i32();
1974             va = load_gpr(ctx, ra);
1975             tcg_gen_extrl_i64_i32(t32, va);
1976             gen_helper_memory_to_f(vc, t32);
1977             break;
1978         case 0x24:
1979             /* ITOFT */
1980             REQUIRE_REG_31(rb);
1981             REQUIRE_FEN;
1982             va = load_gpr(ctx, ra);
1983             tcg_gen_mov_i64(vc, va);
1984             break;
1985         case 0x2A:
1986             /* SQRTG */
1987             REQUIRE_REG_31(ra);
1988             REQUIRE_FEN;
1989             vb = load_fpr(ctx, rb);
1990             gen_helper_sqrtg(vc, cpu_env, vb);
1991             break;
1992         case 0x02B:
1993             /* SQRTT */
1994             REQUIRE_REG_31(ra);
1995             REQUIRE_FEN;
1996             gen_sqrtt(ctx, rb, rc, fn11);
1997             break;
1998         default:
1999             goto invalid_opc;
2000         }
2001         break;
2002 
2003     case 0x15:
2004         /* VAX floating point */
2005         /* XXX: rounding mode and trap are ignored (!) */
2006         vc = dest_fpr(ctx, rc);
2007         vb = load_fpr(ctx, rb);
2008         va = load_fpr(ctx, ra);
2009         switch (fpfn) { /* fn11 & 0x3F */
2010         case 0x00:
2011             /* ADDF */
2012             REQUIRE_FEN;
2013             gen_helper_addf(vc, cpu_env, va, vb);
2014             break;
2015         case 0x01:
2016             /* SUBF */
2017             REQUIRE_FEN;
2018             gen_helper_subf(vc, cpu_env, va, vb);
2019             break;
2020         case 0x02:
2021             /* MULF */
2022             REQUIRE_FEN;
2023             gen_helper_mulf(vc, cpu_env, va, vb);
2024             break;
2025         case 0x03:
2026             /* DIVF */
2027             REQUIRE_FEN;
2028             gen_helper_divf(vc, cpu_env, va, vb);
2029             break;
2030         case 0x1E:
2031             /* CVTDG -- TODO */
2032             REQUIRE_REG_31(ra);
2033             goto invalid_opc;
2034         case 0x20:
2035             /* ADDG */
2036             REQUIRE_FEN;
2037             gen_helper_addg(vc, cpu_env, va, vb);
2038             break;
2039         case 0x21:
2040             /* SUBG */
2041             REQUIRE_FEN;
2042             gen_helper_subg(vc, cpu_env, va, vb);
2043             break;
2044         case 0x22:
2045             /* MULG */
2046             REQUIRE_FEN;
2047             gen_helper_mulg(vc, cpu_env, va, vb);
2048             break;
2049         case 0x23:
2050             /* DIVG */
2051             REQUIRE_FEN;
2052             gen_helper_divg(vc, cpu_env, va, vb);
2053             break;
2054         case 0x25:
2055             /* CMPGEQ */
2056             REQUIRE_FEN;
2057             gen_helper_cmpgeq(vc, cpu_env, va, vb);
2058             break;
2059         case 0x26:
2060             /* CMPGLT */
2061             REQUIRE_FEN;
2062             gen_helper_cmpglt(vc, cpu_env, va, vb);
2063             break;
2064         case 0x27:
2065             /* CMPGLE */
2066             REQUIRE_FEN;
2067             gen_helper_cmpgle(vc, cpu_env, va, vb);
2068             break;
2069         case 0x2C:
2070             /* CVTGF */
2071             REQUIRE_REG_31(ra);
2072             REQUIRE_FEN;
2073             gen_helper_cvtgf(vc, cpu_env, vb);
2074             break;
2075         case 0x2D:
2076             /* CVTGD -- TODO */
2077             REQUIRE_REG_31(ra);
2078             goto invalid_opc;
2079         case 0x2F:
2080             /* CVTGQ */
2081             REQUIRE_REG_31(ra);
2082             REQUIRE_FEN;
2083             gen_helper_cvtgq(vc, cpu_env, vb);
2084             break;
2085         case 0x3C:
2086             /* CVTQF */
2087             REQUIRE_REG_31(ra);
2088             REQUIRE_FEN;
2089             gen_helper_cvtqf(vc, cpu_env, vb);
2090             break;
2091         case 0x3E:
2092             /* CVTQG */
2093             REQUIRE_REG_31(ra);
2094             REQUIRE_FEN;
2095             gen_helper_cvtqg(vc, cpu_env, vb);
2096             break;
2097         default:
2098             goto invalid_opc;
2099         }
2100         break;
2101 
2102     case 0x16:
2103         /* IEEE floating-point */
2104         switch (fpfn) { /* fn11 & 0x3F */
2105         case 0x00:
2106             /* ADDS */
2107             REQUIRE_FEN;
2108             gen_adds(ctx, ra, rb, rc, fn11);
2109             break;
2110         case 0x01:
2111             /* SUBS */
2112             REQUIRE_FEN;
2113             gen_subs(ctx, ra, rb, rc, fn11);
2114             break;
2115         case 0x02:
2116             /* MULS */
2117             REQUIRE_FEN;
2118             gen_muls(ctx, ra, rb, rc, fn11);
2119             break;
2120         case 0x03:
2121             /* DIVS */
2122             REQUIRE_FEN;
2123             gen_divs(ctx, ra, rb, rc, fn11);
2124             break;
2125         case 0x20:
2126             /* ADDT */
2127             REQUIRE_FEN;
2128             gen_addt(ctx, ra, rb, rc, fn11);
2129             break;
2130         case 0x21:
2131             /* SUBT */
2132             REQUIRE_FEN;
2133             gen_subt(ctx, ra, rb, rc, fn11);
2134             break;
2135         case 0x22:
2136             /* MULT */
2137             REQUIRE_FEN;
2138             gen_mult(ctx, ra, rb, rc, fn11);
2139             break;
2140         case 0x23:
2141             /* DIVT */
2142             REQUIRE_FEN;
2143             gen_divt(ctx, ra, rb, rc, fn11);
2144             break;
2145         case 0x24:
2146             /* CMPTUN */
2147             REQUIRE_FEN;
2148             gen_cmptun(ctx, ra, rb, rc, fn11);
2149             break;
2150         case 0x25:
2151             /* CMPTEQ */
2152             REQUIRE_FEN;
2153             gen_cmpteq(ctx, ra, rb, rc, fn11);
2154             break;
2155         case 0x26:
2156             /* CMPTLT */
2157             REQUIRE_FEN;
2158             gen_cmptlt(ctx, ra, rb, rc, fn11);
2159             break;
2160         case 0x27:
2161             /* CMPTLE */
2162             REQUIRE_FEN;
2163             gen_cmptle(ctx, ra, rb, rc, fn11);
2164             break;
2165         case 0x2C:
2166             REQUIRE_REG_31(ra);
2167             REQUIRE_FEN;
2168             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2169                 /* CVTST */
2170                 gen_cvtst(ctx, rb, rc, fn11);
2171             } else {
2172                 /* CVTTS */
2173                 gen_cvtts(ctx, rb, rc, fn11);
2174             }
2175             break;
2176         case 0x2F:
2177             /* CVTTQ */
2178             REQUIRE_REG_31(ra);
2179             REQUIRE_FEN;
2180             gen_cvttq(ctx, rb, rc, fn11);
2181             break;
2182         case 0x3C:
2183             /* CVTQS */
2184             REQUIRE_REG_31(ra);
2185             REQUIRE_FEN;
2186             gen_cvtqs(ctx, rb, rc, fn11);
2187             break;
2188         case 0x3E:
2189             /* CVTQT */
2190             REQUIRE_REG_31(ra);
2191             REQUIRE_FEN;
2192             gen_cvtqt(ctx, rb, rc, fn11);
2193             break;
2194         default:
2195             goto invalid_opc;
2196         }
2197         break;
2198 
2199     case 0x17:
2200         switch (fn11) {
2201         case 0x010:
2202             /* CVTLQ */
2203             REQUIRE_REG_31(ra);
2204             REQUIRE_FEN;
2205             vc = dest_fpr(ctx, rc);
2206             vb = load_fpr(ctx, rb);
2207             gen_cvtlq(vc, vb);
2208             break;
2209         case 0x020:
2210             /* CPYS */
2211             REQUIRE_FEN;
2212             if (rc == 31) {
2213                 /* Special case CPYS as FNOP.  */
2214             } else {
2215                 vc = dest_fpr(ctx, rc);
2216                 va = load_fpr(ctx, ra);
2217                 if (ra == rb) {
2218                     /* Special case CPYS as FMOV.  */
2219                     tcg_gen_mov_i64(vc, va);
2220                 } else {
2221                     vb = load_fpr(ctx, rb);
2222                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2223                 }
2224             }
2225             break;
2226         case 0x021:
2227             /* CPYSN */
2228             REQUIRE_FEN;
2229             vc = dest_fpr(ctx, rc);
2230             vb = load_fpr(ctx, rb);
2231             va = load_fpr(ctx, ra);
2232             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2233             break;
2234         case 0x022:
2235             /* CPYSE */
2236             REQUIRE_FEN;
2237             vc = dest_fpr(ctx, rc);
2238             vb = load_fpr(ctx, rb);
2239             va = load_fpr(ctx, ra);
2240             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2241             break;
2242         case 0x024:
2243             /* MT_FPCR */
2244             REQUIRE_FEN;
2245             va = load_fpr(ctx, ra);
2246             gen_helper_store_fpcr(cpu_env, va);
2247             if (ctx->tb_rm == QUAL_RM_D) {
2248                 /* Re-do the copy of the rounding mode to fp_status
2249                    the next time we use dynamic rounding.  */
2250                 ctx->tb_rm = -1;
2251             }
2252             break;
2253         case 0x025:
2254             /* MF_FPCR */
2255             REQUIRE_FEN;
2256             va = dest_fpr(ctx, ra);
2257             gen_helper_load_fpcr(va, cpu_env);
2258             break;
2259         case 0x02A:
2260             /* FCMOVEQ */
2261             REQUIRE_FEN;
2262             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2263             break;
2264         case 0x02B:
2265             /* FCMOVNE */
2266             REQUIRE_FEN;
2267             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2268             break;
2269         case 0x02C:
2270             /* FCMOVLT */
2271             REQUIRE_FEN;
2272             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2273             break;
2274         case 0x02D:
2275             /* FCMOVGE */
2276             REQUIRE_FEN;
2277             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2278             break;
2279         case 0x02E:
2280             /* FCMOVLE */
2281             REQUIRE_FEN;
2282             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2283             break;
2284         case 0x02F:
2285             /* FCMOVGT */
2286             REQUIRE_FEN;
2287             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2288             break;
2289         case 0x030: /* CVTQL */
2290         case 0x130: /* CVTQL/V */
2291         case 0x530: /* CVTQL/SV */
2292             REQUIRE_REG_31(ra);
2293             REQUIRE_FEN;
2294             vc = dest_fpr(ctx, rc);
2295             vb = load_fpr(ctx, rb);
2296             gen_helper_cvtql(vc, cpu_env, vb);
2297             gen_fp_exc_raise(rc, fn11);
2298             break;
2299         default:
2300             goto invalid_opc;
2301         }
2302         break;
2303 
2304     case 0x18:
2305         switch ((uint16_t)disp16) {
2306         case 0x0000:
2307             /* TRAPB */
2308             /* No-op.  */
2309             break;
2310         case 0x0400:
2311             /* EXCB */
2312             /* No-op.  */
2313             break;
2314         case 0x4000:
2315             /* MB */
2316             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2317             break;
2318         case 0x4400:
2319             /* WMB */
2320             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2321             break;
2322         case 0x8000:
2323             /* FETCH */
2324             /* No-op */
2325             break;
2326         case 0xA000:
2327             /* FETCH_M */
2328             /* No-op */
2329             break;
2330         case 0xC000:
2331             /* RPCC */
2332             va = dest_gpr(ctx, ra);
2333             if (translator_io_start(&ctx->base)) {
2334                 ret = DISAS_PC_STALE;
2335             }
2336             gen_helper_load_pcc(va, cpu_env);
2337             break;
2338         case 0xE000:
2339             /* RC */
2340             gen_rx(ctx, ra, 0);
2341             break;
2342         case 0xE800:
2343             /* ECB */
2344             break;
2345         case 0xF000:
2346             /* RS */
2347             gen_rx(ctx, ra, 1);
2348             break;
2349         case 0xF800:
2350             /* WH64 */
2351             /* No-op */
2352             break;
2353         case 0xFC00:
2354             /* WH64EN */
2355             /* No-op */
2356             break;
2357         default:
2358             goto invalid_opc;
2359         }
2360         break;
2361 
2362     case 0x19:
2363         /* HW_MFPR (PALcode) */
2364 #ifndef CONFIG_USER_ONLY
2365         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2366         va = dest_gpr(ctx, ra);
2367         ret = gen_mfpr(ctx, va, insn & 0xffff);
2368         break;
2369 #else
2370         goto invalid_opc;
2371 #endif
2372 
2373     case 0x1A:
2374         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2375            prediction stack action, which of course we don't implement.  */
2376         vb = load_gpr(ctx, rb);
2377         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2378         if (ra != 31) {
2379             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2380         }
2381         ret = DISAS_PC_UPDATED;
2382         break;
2383 
2384     case 0x1B:
2385         /* HW_LD (PALcode) */
2386 #ifndef CONFIG_USER_ONLY
2387         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2388         {
2389             TCGv addr = tcg_temp_new();
2390             vb = load_gpr(ctx, rb);
2391             va = dest_gpr(ctx, ra);
2392 
2393             tcg_gen_addi_i64(addr, vb, disp12);
2394             switch ((insn >> 12) & 0xF) {
2395             case 0x0:
2396                 /* Longword physical access (hw_ldl/p) */
2397                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2398                 break;
2399             case 0x1:
2400                 /* Quadword physical access (hw_ldq/p) */
2401                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2402                 break;
2403             case 0x2:
2404                 /* Longword physical access with lock (hw_ldl_l/p) */
2405                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2406                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2407                 tcg_gen_mov_i64(cpu_lock_value, va);
2408                 break;
2409             case 0x3:
2410                 /* Quadword physical access with lock (hw_ldq_l/p) */
2411                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2412                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2413                 tcg_gen_mov_i64(cpu_lock_value, va);
2414                 break;
2415             case 0x4:
2416                 /* Longword virtual PTE fetch (hw_ldl/v) */
2417                 goto invalid_opc;
2418             case 0x5:
2419                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2420                 goto invalid_opc;
2421                 break;
2422             case 0x6:
2423                 /* Invalid */
2424                 goto invalid_opc;
2425             case 0x7:
2426                 /* Invaliid */
2427                 goto invalid_opc;
2428             case 0x8:
2429                 /* Longword virtual access (hw_ldl) */
2430                 goto invalid_opc;
2431             case 0x9:
2432                 /* Quadword virtual access (hw_ldq) */
2433                 goto invalid_opc;
2434             case 0xA:
2435                 /* Longword virtual access with protection check (hw_ldl/w) */
2436                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2437                                     MO_LESL | MO_ALIGN);
2438                 break;
2439             case 0xB:
2440                 /* Quadword virtual access with protection check (hw_ldq/w) */
2441                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2442                                     MO_LEUQ | MO_ALIGN);
2443                 break;
2444             case 0xC:
2445                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2446                 goto invalid_opc;
2447             case 0xD:
2448                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2449                 goto invalid_opc;
2450             case 0xE:
2451                 /* Longword virtual access with alternate access mode and
2452                    protection checks (hw_ldl/wa) */
2453                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2454                                     MO_LESL | MO_ALIGN);
2455                 break;
2456             case 0xF:
2457                 /* Quadword virtual access with alternate access mode and
2458                    protection checks (hw_ldq/wa) */
2459                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2460                                     MO_LEUQ | MO_ALIGN);
2461                 break;
2462             }
2463             break;
2464         }
2465 #else
2466         goto invalid_opc;
2467 #endif
2468 
2469     case 0x1C:
2470         vc = dest_gpr(ctx, rc);
2471         if (fn7 == 0x70) {
2472             /* FTOIT */
2473             REQUIRE_AMASK(FIX);
2474             REQUIRE_REG_31(rb);
2475             va = load_fpr(ctx, ra);
2476             tcg_gen_mov_i64(vc, va);
2477             break;
2478         } else if (fn7 == 0x78) {
2479             /* FTOIS */
2480             REQUIRE_AMASK(FIX);
2481             REQUIRE_REG_31(rb);
2482             t32 = tcg_temp_new_i32();
2483             va = load_fpr(ctx, ra);
2484             gen_helper_s_to_memory(t32, va);
2485             tcg_gen_ext_i32_i64(vc, t32);
2486             break;
2487         }
2488 
2489         vb = load_gpr_lit(ctx, rb, lit, islit);
2490         switch (fn7) {
2491         case 0x00:
2492             /* SEXTB */
2493             REQUIRE_AMASK(BWX);
2494             REQUIRE_REG_31(ra);
2495             tcg_gen_ext8s_i64(vc, vb);
2496             break;
2497         case 0x01:
2498             /* SEXTW */
2499             REQUIRE_AMASK(BWX);
2500             REQUIRE_REG_31(ra);
2501             tcg_gen_ext16s_i64(vc, vb);
2502             break;
2503         case 0x30:
2504             /* CTPOP */
2505             REQUIRE_AMASK(CIX);
2506             REQUIRE_REG_31(ra);
2507             REQUIRE_NO_LIT;
2508             tcg_gen_ctpop_i64(vc, vb);
2509             break;
2510         case 0x31:
2511             /* PERR */
2512             REQUIRE_AMASK(MVI);
2513             REQUIRE_NO_LIT;
2514             va = load_gpr(ctx, ra);
2515             gen_helper_perr(vc, va, vb);
2516             break;
2517         case 0x32:
2518             /* CTLZ */
2519             REQUIRE_AMASK(CIX);
2520             REQUIRE_REG_31(ra);
2521             REQUIRE_NO_LIT;
2522             tcg_gen_clzi_i64(vc, vb, 64);
2523             break;
2524         case 0x33:
2525             /* CTTZ */
2526             REQUIRE_AMASK(CIX);
2527             REQUIRE_REG_31(ra);
2528             REQUIRE_NO_LIT;
2529             tcg_gen_ctzi_i64(vc, vb, 64);
2530             break;
2531         case 0x34:
2532             /* UNPKBW */
2533             REQUIRE_AMASK(MVI);
2534             REQUIRE_REG_31(ra);
2535             REQUIRE_NO_LIT;
2536             gen_helper_unpkbw(vc, vb);
2537             break;
2538         case 0x35:
2539             /* UNPKBL */
2540             REQUIRE_AMASK(MVI);
2541             REQUIRE_REG_31(ra);
2542             REQUIRE_NO_LIT;
2543             gen_helper_unpkbl(vc, vb);
2544             break;
2545         case 0x36:
2546             /* PKWB */
2547             REQUIRE_AMASK(MVI);
2548             REQUIRE_REG_31(ra);
2549             REQUIRE_NO_LIT;
2550             gen_helper_pkwb(vc, vb);
2551             break;
2552         case 0x37:
2553             /* PKLB */
2554             REQUIRE_AMASK(MVI);
2555             REQUIRE_REG_31(ra);
2556             REQUIRE_NO_LIT;
2557             gen_helper_pklb(vc, vb);
2558             break;
2559         case 0x38:
2560             /* MINSB8 */
2561             REQUIRE_AMASK(MVI);
2562             va = load_gpr(ctx, ra);
2563             gen_helper_minsb8(vc, va, vb);
2564             break;
2565         case 0x39:
2566             /* MINSW4 */
2567             REQUIRE_AMASK(MVI);
2568             va = load_gpr(ctx, ra);
2569             gen_helper_minsw4(vc, va, vb);
2570             break;
2571         case 0x3A:
2572             /* MINUB8 */
2573             REQUIRE_AMASK(MVI);
2574             va = load_gpr(ctx, ra);
2575             gen_helper_minub8(vc, va, vb);
2576             break;
2577         case 0x3B:
2578             /* MINUW4 */
2579             REQUIRE_AMASK(MVI);
2580             va = load_gpr(ctx, ra);
2581             gen_helper_minuw4(vc, va, vb);
2582             break;
2583         case 0x3C:
2584             /* MAXUB8 */
2585             REQUIRE_AMASK(MVI);
2586             va = load_gpr(ctx, ra);
2587             gen_helper_maxub8(vc, va, vb);
2588             break;
2589         case 0x3D:
2590             /* MAXUW4 */
2591             REQUIRE_AMASK(MVI);
2592             va = load_gpr(ctx, ra);
2593             gen_helper_maxuw4(vc, va, vb);
2594             break;
2595         case 0x3E:
2596             /* MAXSB8 */
2597             REQUIRE_AMASK(MVI);
2598             va = load_gpr(ctx, ra);
2599             gen_helper_maxsb8(vc, va, vb);
2600             break;
2601         case 0x3F:
2602             /* MAXSW4 */
2603             REQUIRE_AMASK(MVI);
2604             va = load_gpr(ctx, ra);
2605             gen_helper_maxsw4(vc, va, vb);
2606             break;
2607         default:
2608             goto invalid_opc;
2609         }
2610         break;
2611 
2612     case 0x1D:
2613         /* HW_MTPR (PALcode) */
2614 #ifndef CONFIG_USER_ONLY
2615         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2616         vb = load_gpr(ctx, rb);
2617         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2618         break;
2619 #else
2620         goto invalid_opc;
2621 #endif
2622 
2623     case 0x1E:
2624         /* HW_RET (PALcode) */
2625 #ifndef CONFIG_USER_ONLY
2626         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2627         if (rb == 31) {
2628             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2629                address from EXC_ADDR.  This turns out to be useful for our
2630                emulation PALcode, so continue to accept it.  */
2631             vb = dest_sink(ctx);
2632             tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2633         } else {
2634             vb = load_gpr(ctx, rb);
2635         }
2636         tcg_gen_movi_i64(cpu_lock_addr, -1);
2637         st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2638         tmp = tcg_temp_new();
2639         tcg_gen_andi_i64(tmp, vb, 1);
2640         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2641         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2642         /* Allow interrupts to be recognized right away.  */
2643         ret = DISAS_PC_UPDATED_NOCHAIN;
2644         break;
2645 #else
2646         goto invalid_opc;
2647 #endif
2648 
2649     case 0x1F:
2650         /* HW_ST (PALcode) */
2651 #ifndef CONFIG_USER_ONLY
2652         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2653         {
2654             switch ((insn >> 12) & 0xF) {
2655             case 0x0:
2656                 /* Longword physical access */
2657                 va = load_gpr(ctx, ra);
2658                 vb = load_gpr(ctx, rb);
2659                 tmp = tcg_temp_new();
2660                 tcg_gen_addi_i64(tmp, vb, disp12);
2661                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2662                 break;
2663             case 0x1:
2664                 /* Quadword physical access */
2665                 va = load_gpr(ctx, ra);
2666                 vb = load_gpr(ctx, rb);
2667                 tmp = tcg_temp_new();
2668                 tcg_gen_addi_i64(tmp, vb, disp12);
2669                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2670                 break;
2671             case 0x2:
2672                 /* Longword physical access with lock */
2673                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2674                                             MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2675                 break;
2676             case 0x3:
2677                 /* Quadword physical access with lock */
2678                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2679                                             MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2680                 break;
2681             case 0x4:
2682                 /* Longword virtual access */
2683                 goto invalid_opc;
2684             case 0x5:
2685                 /* Quadword virtual access */
2686                 goto invalid_opc;
2687             case 0x6:
2688                 /* Invalid */
2689                 goto invalid_opc;
2690             case 0x7:
2691                 /* Invalid */
2692                 goto invalid_opc;
2693             case 0x8:
2694                 /* Invalid */
2695                 goto invalid_opc;
2696             case 0x9:
2697                 /* Invalid */
2698                 goto invalid_opc;
2699             case 0xA:
2700                 /* Invalid */
2701                 goto invalid_opc;
2702             case 0xB:
2703                 /* Invalid */
2704                 goto invalid_opc;
2705             case 0xC:
2706                 /* Longword virtual access with alternate access mode */
2707                 goto invalid_opc;
2708             case 0xD:
2709                 /* Quadword virtual access with alternate access mode */
2710                 goto invalid_opc;
2711             case 0xE:
2712                 /* Invalid */
2713                 goto invalid_opc;
2714             case 0xF:
2715                 /* Invalid */
2716                 goto invalid_opc;
2717             }
2718             break;
2719         }
2720 #else
2721         goto invalid_opc;
2722 #endif
2723     case 0x20:
2724         /* LDF */
2725         REQUIRE_FEN;
2726         gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
2727         break;
2728     case 0x21:
2729         /* LDG */
2730         REQUIRE_FEN;
2731         gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
2732         break;
2733     case 0x22:
2734         /* LDS */
2735         REQUIRE_FEN;
2736         gen_load_fp(ctx, ra, rb, disp16, gen_lds);
2737         break;
2738     case 0x23:
2739         /* LDT */
2740         REQUIRE_FEN;
2741         gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
2742         break;
2743     case 0x24:
2744         /* STF */
2745         REQUIRE_FEN;
2746         gen_store_fp(ctx, ra, rb, disp16, gen_stf);
2747         break;
2748     case 0x25:
2749         /* STG */
2750         REQUIRE_FEN;
2751         gen_store_fp(ctx, ra, rb, disp16, gen_stg);
2752         break;
2753     case 0x26:
2754         /* STS */
2755         REQUIRE_FEN;
2756         gen_store_fp(ctx, ra, rb, disp16, gen_sts);
2757         break;
2758     case 0x27:
2759         /* STT */
2760         REQUIRE_FEN;
2761         gen_store_fp(ctx, ra, rb, disp16, gen_stt);
2762         break;
2763     case 0x28:
2764         /* LDL */
2765         gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
2766         break;
2767     case 0x29:
2768         /* LDQ */
2769         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
2770         break;
2771     case 0x2A:
2772         /* LDL_L */
2773         gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
2774         break;
2775     case 0x2B:
2776         /* LDQ_L */
2777         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
2778         break;
2779     case 0x2C:
2780         /* STL */
2781         gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
2782         break;
2783     case 0x2D:
2784         /* STQ */
2785         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
2786         break;
2787     case 0x2E:
2788         /* STL_C */
2789         ret = gen_store_conditional(ctx, ra, rb, disp16,
2790                                     ctx->mem_idx, MO_LESL | MO_ALIGN);
2791         break;
2792     case 0x2F:
2793         /* STQ_C */
2794         ret = gen_store_conditional(ctx, ra, rb, disp16,
2795                                     ctx->mem_idx, MO_LEUQ | MO_ALIGN);
2796         break;
2797     case 0x30:
2798         /* BR */
2799         ret = gen_bdirect(ctx, ra, disp21);
2800         break;
2801     case 0x31: /* FBEQ */
2802         REQUIRE_FEN;
2803         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2804         break;
2805     case 0x32: /* FBLT */
2806         REQUIRE_FEN;
2807         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2808         break;
2809     case 0x33: /* FBLE */
2810         REQUIRE_FEN;
2811         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2812         break;
2813     case 0x34:
2814         /* BSR */
2815         ret = gen_bdirect(ctx, ra, disp21);
2816         break;
2817     case 0x35: /* FBNE */
2818         REQUIRE_FEN;
2819         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2820         break;
2821     case 0x36: /* FBGE */
2822         REQUIRE_FEN;
2823         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2824         break;
2825     case 0x37: /* FBGT */
2826         REQUIRE_FEN;
2827         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2828         break;
2829     case 0x38:
2830         /* BLBC */
2831         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2832         break;
2833     case 0x39:
2834         /* BEQ */
2835         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2836         break;
2837     case 0x3A:
2838         /* BLT */
2839         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2840         break;
2841     case 0x3B:
2842         /* BLE */
2843         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2844         break;
2845     case 0x3C:
2846         /* BLBS */
2847         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2848         break;
2849     case 0x3D:
2850         /* BNE */
2851         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2852         break;
2853     case 0x3E:
2854         /* BGE */
2855         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2856         break;
2857     case 0x3F:
2858         /* BGT */
2859         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2860         break;
2861     invalid_opc:
2862         ret = gen_invalid(ctx);
2863         break;
2864     raise_fen:
2865         ret = gen_excp(ctx, EXCP_FEN, 0);
2866         break;
2867     }
2868 
2869     return ret;
2870 }
2871 
2872 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2873 {
2874     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2875     CPUAlphaState *env = cpu->env_ptr;
2876     int64_t bound;
2877 
2878     ctx->tbflags = ctx->base.tb->flags;
2879     ctx->mem_idx = cpu_mmu_index(env, false);
2880     ctx->implver = env->implver;
2881     ctx->amask = env->amask;
2882 
2883 #ifdef CONFIG_USER_ONLY
2884     ctx->ir = cpu_std_ir;
2885     ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
2886 #else
2887     ctx->palbr = env->palbr;
2888     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2889 #endif
2890 
2891     /* ??? Every TB begins with unset rounding mode, to be initialized on
2892        the first fp insn of the TB.  Alternately we could define a proper
2893        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2894        to reset the FP_STATUS to that default at the end of any TB that
2895        changes the default.  We could even (gasp) dynamically figure out
2896        what default would be most efficient given the running program.  */
2897     ctx->tb_rm = -1;
2898     /* Similarly for flush-to-zero.  */
2899     ctx->tb_ftz = -1;
2900 
2901     ctx->zero = NULL;
2902     ctx->sink = NULL;
2903 
2904     /* Bound the number of insns to execute to those left on the page.  */
2905     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2906     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2907 }
2908 
2909 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2910 {
2911 }
2912 
2913 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2914 {
2915     tcg_gen_insn_start(dcbase->pc_next);
2916 }
2917 
2918 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2919 {
2920     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2921     CPUAlphaState *env = cpu->env_ptr;
2922     uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
2923 
2924     ctx->base.pc_next += 4;
2925     ctx->base.is_jmp = translate_one(ctx, insn);
2926 
2927     free_context_temps(ctx);
2928 }
2929 
2930 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2931 {
2932     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2933 
2934     switch (ctx->base.is_jmp) {
2935     case DISAS_NORETURN:
2936         break;
2937     case DISAS_TOO_MANY:
2938         if (use_goto_tb(ctx, ctx->base.pc_next)) {
2939             tcg_gen_goto_tb(0);
2940             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2941             tcg_gen_exit_tb(ctx->base.tb, 0);
2942         }
2943         /* FALLTHRU */
2944     case DISAS_PC_STALE:
2945         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2946         /* FALLTHRU */
2947     case DISAS_PC_UPDATED:
2948         tcg_gen_lookup_and_goto_ptr();
2949         break;
2950     case DISAS_PC_UPDATED_NOCHAIN:
2951         tcg_gen_exit_tb(NULL, 0);
2952         break;
2953     default:
2954         g_assert_not_reached();
2955     }
2956 }
2957 
2958 static void alpha_tr_disas_log(const DisasContextBase *dcbase,
2959                                CPUState *cpu, FILE *logfile)
2960 {
2961     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2962     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
2963 }
2964 
2965 static const TranslatorOps alpha_tr_ops = {
2966     .init_disas_context = alpha_tr_init_disas_context,
2967     .tb_start           = alpha_tr_tb_start,
2968     .insn_start         = alpha_tr_insn_start,
2969     .translate_insn     = alpha_tr_translate_insn,
2970     .tb_stop            = alpha_tr_tb_stop,
2971     .disas_log          = alpha_tr_disas_log,
2972 };
2973 
2974 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
2975                            target_ulong pc, void *host_pc)
2976 {
2977     DisasContext dc;
2978     translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
2979 }
2980