xref: /openbmc/qemu/target/alpha/translate.c (revision 1793ad02)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 
34 
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37 
38 #ifdef ALPHA_DEBUG_DISAS
39 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 #  define LOG_DISAS(...) do { } while (0)
42 #endif
43 
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46     DisasContextBase base;
47 
48 #ifndef CONFIG_USER_ONLY
49     uint64_t palbr;
50 #endif
51     uint32_t tbflags;
52     int mem_idx;
53 
54     /* implver and amask values for this CPU.  */
55     int implver;
56     int amask;
57 
58     /* Current rounding mode for this TB.  */
59     int tb_rm;
60     /* Current flush-to-zero setting for this TB.  */
61     int tb_ftz;
62 
63     /* The set of registers active in the current context.  */
64     TCGv *ir;
65 
66     /* Temporaries for $31 and $f31 as source and destination.  */
67     TCGv zero;
68     TCGv sink;
69 };
70 
71 /* Target-specific return values from translate_one, indicating the
72    state of the TB.  Note that DISAS_NEXT indicates that we are not
73    exiting the TB.  */
74 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
75 #define DISAS_PC_UPDATED          DISAS_TARGET_1
76 #define DISAS_PC_STALE            DISAS_TARGET_2
77 
78 /* global register indexes */
79 static TCGv cpu_std_ir[31];
80 static TCGv cpu_fir[31];
81 static TCGv cpu_pc;
82 static TCGv cpu_lock_addr;
83 static TCGv cpu_lock_value;
84 
85 #ifndef CONFIG_USER_ONLY
86 static TCGv cpu_pal_ir[31];
87 #endif
88 
89 #include "exec/gen-icount.h"
90 
91 void alpha_translate_init(void)
92 {
93 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
94 
95     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
96     static const GlobalVar vars[] = {
97         DEF_VAR(pc),
98         DEF_VAR(lock_addr),
99         DEF_VAR(lock_value),
100     };
101 
102 #undef DEF_VAR
103 
104     /* Use the symbolic register names that match the disassembler.  */
105     static const char greg_names[31][4] = {
106         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
107         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
108         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
109         "t10", "t11", "ra", "t12", "at", "gp", "sp"
110     };
111     static const char freg_names[31][4] = {
112         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
113         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
114         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
115         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
116     };
117 #ifndef CONFIG_USER_ONLY
118     static const char shadow_names[8][8] = {
119         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
120         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
121     };
122 #endif
123 
124     int i;
125 
126     for (i = 0; i < 31; i++) {
127         cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
128                                                offsetof(CPUAlphaState, ir[i]),
129                                                greg_names[i]);
130     }
131 
132     for (i = 0; i < 31; i++) {
133         cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
134                                             offsetof(CPUAlphaState, fir[i]),
135                                             freg_names[i]);
136     }
137 
138 #ifndef CONFIG_USER_ONLY
139     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
140     for (i = 0; i < 8; i++) {
141         int r = (i == 7 ? 25 : i + 8);
142         cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
143                                                offsetof(CPUAlphaState,
144                                                         shadow[i]),
145                                                shadow_names[i]);
146     }
147 #endif
148 
149     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
150         const GlobalVar *v = &vars[i];
151         *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
152     }
153 }
154 
155 static TCGv load_zero(DisasContext *ctx)
156 {
157     if (!ctx->zero) {
158         ctx->zero = tcg_constant_i64(0);
159     }
160     return ctx->zero;
161 }
162 
163 static TCGv dest_sink(DisasContext *ctx)
164 {
165     if (!ctx->sink) {
166         ctx->sink = tcg_temp_new();
167     }
168     return ctx->sink;
169 }
170 
171 static void free_context_temps(DisasContext *ctx)
172 {
173     if (ctx->sink) {
174         tcg_gen_discard_i64(ctx->sink);
175         tcg_temp_free(ctx->sink);
176         ctx->sink = NULL;
177     }
178 }
179 
180 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
181 {
182     if (likely(reg < 31)) {
183         return ctx->ir[reg];
184     } else {
185         return load_zero(ctx);
186     }
187 }
188 
189 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
190                          uint8_t lit, bool islit)
191 {
192     if (islit) {
193         return tcg_constant_i64(lit);
194     } else if (likely(reg < 31)) {
195         return ctx->ir[reg];
196     } else {
197         return load_zero(ctx);
198     }
199 }
200 
201 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
202 {
203     if (likely(reg < 31)) {
204         return ctx->ir[reg];
205     } else {
206         return dest_sink(ctx);
207     }
208 }
209 
210 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
211 {
212     if (likely(reg < 31)) {
213         return cpu_fir[reg];
214     } else {
215         return load_zero(ctx);
216     }
217 }
218 
219 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
220 {
221     if (likely(reg < 31)) {
222         return cpu_fir[reg];
223     } else {
224         return dest_sink(ctx);
225     }
226 }
227 
228 static int get_flag_ofs(unsigned shift)
229 {
230     int ofs = offsetof(CPUAlphaState, flags);
231 #ifdef HOST_WORDS_BIGENDIAN
232     ofs += 3 - (shift / 8);
233 #else
234     ofs += shift / 8;
235 #endif
236     return ofs;
237 }
238 
239 static void ld_flag_byte(TCGv val, unsigned shift)
240 {
241     tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
242 }
243 
244 static void st_flag_byte(TCGv val, unsigned shift)
245 {
246     tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
247 }
248 
249 static void gen_excp_1(int exception, int error_code)
250 {
251     TCGv_i32 tmp1, tmp2;
252 
253     tmp1 = tcg_constant_i32(exception);
254     tmp2 = tcg_constant_i32(error_code);
255     gen_helper_excp(cpu_env, tmp1, tmp2);
256 }
257 
258 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
259 {
260     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
261     gen_excp_1(exception, error_code);
262     return DISAS_NORETURN;
263 }
264 
265 static inline DisasJumpType gen_invalid(DisasContext *ctx)
266 {
267     return gen_excp(ctx, EXCP_OPCDEC, 0);
268 }
269 
270 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
271 {
272     TCGv_i32 tmp32 = tcg_temp_new_i32();
273     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
274     gen_helper_memory_to_f(t0, tmp32);
275     tcg_temp_free_i32(tmp32);
276 }
277 
278 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
279 {
280     TCGv tmp = tcg_temp_new();
281     tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
282     gen_helper_memory_to_g(t0, tmp);
283     tcg_temp_free(tmp);
284 }
285 
286 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
287 {
288     TCGv_i32 tmp32 = tcg_temp_new_i32();
289     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
290     gen_helper_memory_to_s(t0, tmp32);
291     tcg_temp_free_i32(tmp32);
292 }
293 
294 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
295 {
296     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
297     tcg_gen_mov_i64(cpu_lock_addr, t1);
298     tcg_gen_mov_i64(cpu_lock_value, t0);
299 }
300 
301 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
302 {
303     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
304     tcg_gen_mov_i64(cpu_lock_addr, t1);
305     tcg_gen_mov_i64(cpu_lock_value, t0);
306 }
307 
308 static inline void gen_load_mem(DisasContext *ctx,
309                                 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
310                                                           int flags),
311                                 int ra, int rb, int32_t disp16, bool fp,
312                                 bool clear)
313 {
314     TCGv tmp, addr, va;
315 
316     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
317        prefetches, which we can treat as nops.  No worries about
318        missed exceptions here.  */
319     if (unlikely(ra == 31)) {
320         return;
321     }
322 
323     tmp = tcg_temp_new();
324     addr = load_gpr(ctx, rb);
325 
326     if (disp16) {
327         tcg_gen_addi_i64(tmp, addr, disp16);
328         addr = tmp;
329     }
330     if (clear) {
331         tcg_gen_andi_i64(tmp, addr, ~0x7);
332         addr = tmp;
333     }
334 
335     va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
336     tcg_gen_qemu_load(va, addr, ctx->mem_idx);
337 
338     tcg_temp_free(tmp);
339 }
340 
341 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
342 {
343     TCGv_i32 tmp32 = tcg_temp_new_i32();
344     gen_helper_f_to_memory(tmp32, t0);
345     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
346     tcg_temp_free_i32(tmp32);
347 }
348 
349 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
350 {
351     TCGv tmp = tcg_temp_new();
352     gen_helper_g_to_memory(tmp, t0);
353     tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
354     tcg_temp_free(tmp);
355 }
356 
357 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
358 {
359     TCGv_i32 tmp32 = tcg_temp_new_i32();
360     gen_helper_s_to_memory(tmp32, t0);
361     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
362     tcg_temp_free_i32(tmp32);
363 }
364 
365 static inline void gen_store_mem(DisasContext *ctx,
366                                  void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
367                                                             int flags),
368                                  int ra, int rb, int32_t disp16, bool fp,
369                                  bool clear)
370 {
371     TCGv tmp, addr, va;
372 
373     tmp = tcg_temp_new();
374     addr = load_gpr(ctx, rb);
375 
376     if (disp16) {
377         tcg_gen_addi_i64(tmp, addr, disp16);
378         addr = tmp;
379     }
380     if (clear) {
381         tcg_gen_andi_i64(tmp, addr, ~0x7);
382         addr = tmp;
383     }
384 
385     va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
386     tcg_gen_qemu_store(va, addr, ctx->mem_idx);
387 
388     tcg_temp_free(tmp);
389 }
390 
391 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
392                                            int32_t disp16, int mem_idx,
393                                            MemOp op)
394 {
395     TCGLabel *lab_fail, *lab_done;
396     TCGv addr, val;
397 
398     addr = tcg_temp_new_i64();
399     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
400     free_context_temps(ctx);
401 
402     lab_fail = gen_new_label();
403     lab_done = gen_new_label();
404     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
405     tcg_temp_free_i64(addr);
406 
407     val = tcg_temp_new_i64();
408     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
409                                load_gpr(ctx, ra), mem_idx, op);
410     free_context_temps(ctx);
411 
412     if (ra != 31) {
413         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
414     }
415     tcg_temp_free_i64(val);
416     tcg_gen_br(lab_done);
417 
418     gen_set_label(lab_fail);
419     if (ra != 31) {
420         tcg_gen_movi_i64(ctx->ir[ra], 0);
421     }
422 
423     gen_set_label(lab_done);
424     tcg_gen_movi_i64(cpu_lock_addr, -1);
425     return DISAS_NEXT;
426 }
427 
428 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
429 {
430     return translator_use_goto_tb(&ctx->base, dest);
431 }
432 
433 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
434 {
435     uint64_t dest = ctx->base.pc_next + (disp << 2);
436 
437     if (ra != 31) {
438         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
439     }
440 
441     /* Notice branch-to-next; used to initialize RA with the PC.  */
442     if (disp == 0) {
443         return 0;
444     } else if (use_goto_tb(ctx, dest)) {
445         tcg_gen_goto_tb(0);
446         tcg_gen_movi_i64(cpu_pc, dest);
447         tcg_gen_exit_tb(ctx->base.tb, 0);
448         return DISAS_NORETURN;
449     } else {
450         tcg_gen_movi_i64(cpu_pc, dest);
451         return DISAS_PC_UPDATED;
452     }
453 }
454 
455 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
456                                         TCGv cmp, int32_t disp)
457 {
458     uint64_t dest = ctx->base.pc_next + (disp << 2);
459     TCGLabel *lab_true = gen_new_label();
460 
461     if (use_goto_tb(ctx, dest)) {
462         tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
463 
464         tcg_gen_goto_tb(0);
465         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
466         tcg_gen_exit_tb(ctx->base.tb, 0);
467 
468         gen_set_label(lab_true);
469         tcg_gen_goto_tb(1);
470         tcg_gen_movi_i64(cpu_pc, dest);
471         tcg_gen_exit_tb(ctx->base.tb, 1);
472 
473         return DISAS_NORETURN;
474     } else {
475         TCGv_i64 z = load_zero(ctx);
476         TCGv_i64 d = tcg_constant_i64(dest);
477         TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
478 
479         tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
480         return DISAS_PC_UPDATED;
481     }
482 }
483 
484 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
485                                int32_t disp, int mask)
486 {
487     if (mask) {
488         TCGv tmp = tcg_temp_new();
489         DisasJumpType ret;
490 
491         tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
492         ret = gen_bcond_internal(ctx, cond, tmp, disp);
493         tcg_temp_free(tmp);
494         return ret;
495     }
496     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
497 }
498 
499 /* Fold -0.0 for comparison with COND.  */
500 
501 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
502 {
503     uint64_t mzero = 1ull << 63;
504 
505     switch (cond) {
506     case TCG_COND_LE:
507     case TCG_COND_GT:
508         /* For <= or >, the -0.0 value directly compares the way we want.  */
509         tcg_gen_mov_i64(dest, src);
510         break;
511 
512     case TCG_COND_EQ:
513     case TCG_COND_NE:
514         /* For == or !=, we can simply mask off the sign bit and compare.  */
515         tcg_gen_andi_i64(dest, src, mzero - 1);
516         break;
517 
518     case TCG_COND_GE:
519     case TCG_COND_LT:
520         /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
521         tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
522         tcg_gen_neg_i64(dest, dest);
523         tcg_gen_and_i64(dest, dest, src);
524         break;
525 
526     default:
527         abort();
528     }
529 }
530 
531 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
532                                 int32_t disp)
533 {
534     TCGv cmp_tmp = tcg_temp_new();
535     DisasJumpType ret;
536 
537     gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
538     ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
539     tcg_temp_free(cmp_tmp);
540     return ret;
541 }
542 
543 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
544 {
545     TCGv_i64 va, vb, z;
546 
547     z = load_zero(ctx);
548     vb = load_fpr(ctx, rb);
549     va = tcg_temp_new();
550     gen_fold_mzero(cond, va, load_fpr(ctx, ra));
551 
552     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
553 
554     tcg_temp_free(va);
555 }
556 
557 #define QUAL_RM_N       0x080   /* Round mode nearest even */
558 #define QUAL_RM_C       0x000   /* Round mode chopped */
559 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
560 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
561 #define QUAL_RM_MASK    0x0c0
562 
563 #define QUAL_U          0x100   /* Underflow enable (fp output) */
564 #define QUAL_V          0x100   /* Overflow enable (int output) */
565 #define QUAL_S          0x400   /* Software completion enable */
566 #define QUAL_I          0x200   /* Inexact detection enable */
567 
568 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
569 {
570     TCGv_i32 tmp;
571 
572     fn11 &= QUAL_RM_MASK;
573     if (fn11 == ctx->tb_rm) {
574         return;
575     }
576     ctx->tb_rm = fn11;
577 
578     tmp = tcg_temp_new_i32();
579     switch (fn11) {
580     case QUAL_RM_N:
581         tcg_gen_movi_i32(tmp, float_round_nearest_even);
582         break;
583     case QUAL_RM_C:
584         tcg_gen_movi_i32(tmp, float_round_to_zero);
585         break;
586     case QUAL_RM_M:
587         tcg_gen_movi_i32(tmp, float_round_down);
588         break;
589     case QUAL_RM_D:
590         tcg_gen_ld8u_i32(tmp, cpu_env,
591                          offsetof(CPUAlphaState, fpcr_dyn_round));
592         break;
593     }
594 
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
597        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
598        sets the one field.  */
599     tcg_gen_st8_i32(tmp, cpu_env,
600                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
601 #else
602     gen_helper_setroundmode(tmp);
603 #endif
604 
605     tcg_temp_free_i32(tmp);
606 }
607 
608 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
609 {
610     TCGv_i32 tmp;
611 
612     fn11 &= QUAL_U;
613     if (fn11 == ctx->tb_ftz) {
614         return;
615     }
616     ctx->tb_ftz = fn11;
617 
618     tmp = tcg_temp_new_i32();
619     if (fn11) {
620         /* Underflow is enabled, use the FPCR setting.  */
621         tcg_gen_ld8u_i32(tmp, cpu_env,
622                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
623     } else {
624         /* Underflow is disabled, force flush-to-zero.  */
625         tcg_gen_movi_i32(tmp, 1);
626     }
627 
628 #if defined(CONFIG_SOFTFLOAT_INLINE)
629     tcg_gen_st8_i32(tmp, cpu_env,
630                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
631 #else
632     gen_helper_setflushzero(tmp);
633 #endif
634 
635     tcg_temp_free_i32(tmp);
636 }
637 
638 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
639 {
640     TCGv val;
641 
642     if (unlikely(reg == 31)) {
643         val = load_zero(ctx);
644     } else {
645         val = cpu_fir[reg];
646         if ((fn11 & QUAL_S) == 0) {
647             if (is_cmp) {
648                 gen_helper_ieee_input_cmp(cpu_env, val);
649             } else {
650                 gen_helper_ieee_input(cpu_env, val);
651             }
652         } else {
653 #ifndef CONFIG_USER_ONLY
654             /* In system mode, raise exceptions for denormals like real
655                hardware.  In user mode, proceed as if the OS completion
656                handler is handling the denormal as per spec.  */
657             gen_helper_ieee_input_s(cpu_env, val);
658 #endif
659         }
660     }
661     return val;
662 }
663 
664 static void gen_fp_exc_raise(int rc, int fn11)
665 {
666     /* ??? We ought to be able to do something with imprecise exceptions.
667        E.g. notice we're still in the trap shadow of something within the
668        TB and do not generate the code to signal the exception; end the TB
669        when an exception is forced to arrive, either by consumption of a
670        register value or TRAPB or EXCB.  */
671     TCGv_i32 reg, ign;
672     uint32_t ignore = 0;
673 
674     if (!(fn11 & QUAL_U)) {
675         /* Note that QUAL_U == QUAL_V, so ignore either.  */
676         ignore |= FPCR_UNF | FPCR_IOV;
677     }
678     if (!(fn11 & QUAL_I)) {
679         ignore |= FPCR_INE;
680     }
681     ign = tcg_constant_i32(ignore);
682 
683     /* ??? Pass in the regno of the destination so that the helper can
684        set EXC_MASK, which contains a bitmask of destination registers
685        that have caused arithmetic traps.  A simple userspace emulation
686        does not require this.  We do need it for a guest kernel's entArith,
687        or if we were to do something clever with imprecise exceptions.  */
688     reg = tcg_constant_i32(rc + 32);
689     if (fn11 & QUAL_S) {
690         gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
691     } else {
692         gen_helper_fp_exc_raise(cpu_env, ign, reg);
693     }
694 }
695 
696 static void gen_cvtlq(TCGv vc, TCGv vb)
697 {
698     TCGv tmp = tcg_temp_new();
699 
700     /* The arithmetic right shift here, plus the sign-extended mask below
701        yields a sign-extended result without an explicit ext32s_i64.  */
702     tcg_gen_shri_i64(tmp, vb, 29);
703     tcg_gen_sari_i64(vc, vb, 32);
704     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
705 
706     tcg_temp_free(tmp);
707 }
708 
709 static void gen_ieee_arith2(DisasContext *ctx,
710                             void (*helper)(TCGv, TCGv_ptr, TCGv),
711                             int rb, int rc, int fn11)
712 {
713     TCGv vb;
714 
715     gen_qual_roundmode(ctx, fn11);
716     gen_qual_flushzero(ctx, fn11);
717 
718     vb = gen_ieee_input(ctx, rb, fn11, 0);
719     helper(dest_fpr(ctx, rc), cpu_env, vb);
720 
721     gen_fp_exc_raise(rc, fn11);
722 }
723 
724 #define IEEE_ARITH2(name)                                       \
725 static inline void glue(gen_, name)(DisasContext *ctx,          \
726                                     int rb, int rc, int fn11)   \
727 {                                                               \
728     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
729 }
730 IEEE_ARITH2(sqrts)
731 IEEE_ARITH2(sqrtt)
732 IEEE_ARITH2(cvtst)
733 IEEE_ARITH2(cvtts)
734 
735 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
736 {
737     TCGv vb, vc;
738 
739     /* No need to set flushzero, since we have an integer output.  */
740     vb = gen_ieee_input(ctx, rb, fn11, 0);
741     vc = dest_fpr(ctx, rc);
742 
743     /* Almost all integer conversions use cropped rounding;
744        special case that.  */
745     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
746         gen_helper_cvttq_c(vc, cpu_env, vb);
747     } else {
748         gen_qual_roundmode(ctx, fn11);
749         gen_helper_cvttq(vc, cpu_env, vb);
750     }
751     gen_fp_exc_raise(rc, fn11);
752 }
753 
754 static void gen_ieee_intcvt(DisasContext *ctx,
755                             void (*helper)(TCGv, TCGv_ptr, TCGv),
756                             int rb, int rc, int fn11)
757 {
758     TCGv vb, vc;
759 
760     gen_qual_roundmode(ctx, fn11);
761     vb = load_fpr(ctx, rb);
762     vc = dest_fpr(ctx, rc);
763 
764     /* The only exception that can be raised by integer conversion
765        is inexact.  Thus we only need to worry about exceptions when
766        inexact handling is requested.  */
767     if (fn11 & QUAL_I) {
768         helper(vc, cpu_env, vb);
769         gen_fp_exc_raise(rc, fn11);
770     } else {
771         helper(vc, cpu_env, vb);
772     }
773 }
774 
775 #define IEEE_INTCVT(name)                                       \
776 static inline void glue(gen_, name)(DisasContext *ctx,          \
777                                     int rb, int rc, int fn11)   \
778 {                                                               \
779     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
780 }
781 IEEE_INTCVT(cvtqs)
782 IEEE_INTCVT(cvtqt)
783 
784 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
785 {
786     TCGv vmask = tcg_constant_i64(mask);
787     TCGv tmp = tcg_temp_new_i64();
788 
789     if (inv_a) {
790         tcg_gen_andc_i64(tmp, vmask, va);
791     } else {
792         tcg_gen_and_i64(tmp, va, vmask);
793     }
794 
795     tcg_gen_andc_i64(vc, vb, vmask);
796     tcg_gen_or_i64(vc, vc, tmp);
797 
798     tcg_temp_free(tmp);
799 }
800 
801 static void gen_ieee_arith3(DisasContext *ctx,
802                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
803                             int ra, int rb, int rc, int fn11)
804 {
805     TCGv va, vb, vc;
806 
807     gen_qual_roundmode(ctx, fn11);
808     gen_qual_flushzero(ctx, fn11);
809 
810     va = gen_ieee_input(ctx, ra, fn11, 0);
811     vb = gen_ieee_input(ctx, rb, fn11, 0);
812     vc = dest_fpr(ctx, rc);
813     helper(vc, cpu_env, va, vb);
814 
815     gen_fp_exc_raise(rc, fn11);
816 }
817 
818 #define IEEE_ARITH3(name)                                               \
819 static inline void glue(gen_, name)(DisasContext *ctx,                  \
820                                     int ra, int rb, int rc, int fn11)   \
821 {                                                                       \
822     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
823 }
824 IEEE_ARITH3(adds)
825 IEEE_ARITH3(subs)
826 IEEE_ARITH3(muls)
827 IEEE_ARITH3(divs)
828 IEEE_ARITH3(addt)
829 IEEE_ARITH3(subt)
830 IEEE_ARITH3(mult)
831 IEEE_ARITH3(divt)
832 
833 static void gen_ieee_compare(DisasContext *ctx,
834                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
835                              int ra, int rb, int rc, int fn11)
836 {
837     TCGv va, vb, vc;
838 
839     va = gen_ieee_input(ctx, ra, fn11, 1);
840     vb = gen_ieee_input(ctx, rb, fn11, 1);
841     vc = dest_fpr(ctx, rc);
842     helper(vc, cpu_env, va, vb);
843 
844     gen_fp_exc_raise(rc, fn11);
845 }
846 
847 #define IEEE_CMP3(name)                                                 \
848 static inline void glue(gen_, name)(DisasContext *ctx,                  \
849                                     int ra, int rb, int rc, int fn11)   \
850 {                                                                       \
851     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
852 }
853 IEEE_CMP3(cmptun)
854 IEEE_CMP3(cmpteq)
855 IEEE_CMP3(cmptlt)
856 IEEE_CMP3(cmptle)
857 
858 static inline uint64_t zapnot_mask(uint8_t lit)
859 {
860     uint64_t mask = 0;
861     int i;
862 
863     for (i = 0; i < 8; ++i) {
864         if ((lit >> i) & 1) {
865             mask |= 0xffull << (i * 8);
866         }
867     }
868     return mask;
869 }
870 
871 /* Implement zapnot with an immediate operand, which expands to some
872    form of immediate AND.  This is a basic building block in the
873    definition of many of the other byte manipulation instructions.  */
874 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
875 {
876     switch (lit) {
877     case 0x00:
878         tcg_gen_movi_i64(dest, 0);
879         break;
880     case 0x01:
881         tcg_gen_ext8u_i64(dest, src);
882         break;
883     case 0x03:
884         tcg_gen_ext16u_i64(dest, src);
885         break;
886     case 0x0f:
887         tcg_gen_ext32u_i64(dest, src);
888         break;
889     case 0xff:
890         tcg_gen_mov_i64(dest, src);
891         break;
892     default:
893         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
894         break;
895     }
896 }
897 
898 /* EXTWH, EXTLH, EXTQH */
899 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
900                       uint8_t lit, uint8_t byte_mask)
901 {
902     if (islit) {
903         int pos = (64 - lit * 8) & 0x3f;
904         int len = cto32(byte_mask) * 8;
905         if (pos < len) {
906             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
907         } else {
908             tcg_gen_movi_i64(vc, 0);
909         }
910     } else {
911         TCGv tmp = tcg_temp_new();
912         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
913         tcg_gen_neg_i64(tmp, tmp);
914         tcg_gen_andi_i64(tmp, tmp, 0x3f);
915         tcg_gen_shl_i64(vc, va, tmp);
916         tcg_temp_free(tmp);
917     }
918     gen_zapnoti(vc, vc, byte_mask);
919 }
920 
921 /* EXTBL, EXTWL, EXTLL, EXTQL */
922 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
923                       uint8_t lit, uint8_t byte_mask)
924 {
925     if (islit) {
926         int pos = (lit & 7) * 8;
927         int len = cto32(byte_mask) * 8;
928         if (pos + len >= 64) {
929             len = 64 - pos;
930         }
931         tcg_gen_extract_i64(vc, va, pos, len);
932     } else {
933         TCGv tmp = tcg_temp_new();
934         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
935         tcg_gen_shli_i64(tmp, tmp, 3);
936         tcg_gen_shr_i64(vc, va, tmp);
937         tcg_temp_free(tmp);
938         gen_zapnoti(vc, vc, byte_mask);
939     }
940 }
941 
942 /* INSWH, INSLH, INSQH */
943 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
944                       uint8_t lit, uint8_t byte_mask)
945 {
946     if (islit) {
947         int pos = 64 - (lit & 7) * 8;
948         int len = cto32(byte_mask) * 8;
949         if (pos < len) {
950             tcg_gen_extract_i64(vc, va, pos, len - pos);
951         } else {
952             tcg_gen_movi_i64(vc, 0);
953         }
954     } else {
955         TCGv tmp = tcg_temp_new();
956         TCGv shift = tcg_temp_new();
957 
958         /* The instruction description has us left-shift the byte mask
959            and extract bits <15:8> and apply that zap at the end.  This
960            is equivalent to simply performing the zap first and shifting
961            afterward.  */
962         gen_zapnoti(tmp, va, byte_mask);
963 
964         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
965            portably by splitting the shift into two parts: shift_count-1 and 1.
966            Arrange for the -1 by using ones-complement instead of
967            twos-complement in the negation: ~(B * 8) & 63.  */
968 
969         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
970         tcg_gen_not_i64(shift, shift);
971         tcg_gen_andi_i64(shift, shift, 0x3f);
972 
973         tcg_gen_shr_i64(vc, tmp, shift);
974         tcg_gen_shri_i64(vc, vc, 1);
975         tcg_temp_free(shift);
976         tcg_temp_free(tmp);
977     }
978 }
979 
980 /* INSBL, INSWL, INSLL, INSQL */
981 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
982                       uint8_t lit, uint8_t byte_mask)
983 {
984     if (islit) {
985         int pos = (lit & 7) * 8;
986         int len = cto32(byte_mask) * 8;
987         if (pos + len > 64) {
988             len = 64 - pos;
989         }
990         tcg_gen_deposit_z_i64(vc, va, pos, len);
991     } else {
992         TCGv tmp = tcg_temp_new();
993         TCGv shift = tcg_temp_new();
994 
995         /* The instruction description has us left-shift the byte mask
996            and extract bits <15:8> and apply that zap at the end.  This
997            is equivalent to simply performing the zap first and shifting
998            afterward.  */
999         gen_zapnoti(tmp, va, byte_mask);
1000 
1001         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1002         tcg_gen_shli_i64(shift, shift, 3);
1003         tcg_gen_shl_i64(vc, tmp, shift);
1004         tcg_temp_free(shift);
1005         tcg_temp_free(tmp);
1006     }
1007 }
1008 
1009 /* MSKWH, MSKLH, MSKQH */
1010 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1011                       uint8_t lit, uint8_t byte_mask)
1012 {
1013     if (islit) {
1014         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1015     } else {
1016         TCGv shift = tcg_temp_new();
1017         TCGv mask = tcg_temp_new();
1018 
1019         /* The instruction description is as above, where the byte_mask
1020            is shifted left, and then we extract bits <15:8>.  This can be
1021            emulated with a right-shift on the expanded byte mask.  This
1022            requires extra care because for an input <2:0> == 0 we need a
1023            shift of 64 bits in order to generate a zero.  This is done by
1024            splitting the shift into two parts, the variable shift - 1
1025            followed by a constant 1 shift.  The code we expand below is
1026            equivalent to ~(B * 8) & 63.  */
1027 
1028         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1029         tcg_gen_not_i64(shift, shift);
1030         tcg_gen_andi_i64(shift, shift, 0x3f);
1031         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1032         tcg_gen_shr_i64(mask, mask, shift);
1033         tcg_gen_shri_i64(mask, mask, 1);
1034 
1035         tcg_gen_andc_i64(vc, va, mask);
1036 
1037         tcg_temp_free(mask);
1038         tcg_temp_free(shift);
1039     }
1040 }
1041 
1042 /* MSKBL, MSKWL, MSKLL, MSKQL */
1043 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1044                       uint8_t lit, uint8_t byte_mask)
1045 {
1046     if (islit) {
1047         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1048     } else {
1049         TCGv shift = tcg_temp_new();
1050         TCGv mask = tcg_temp_new();
1051 
1052         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1053         tcg_gen_shli_i64(shift, shift, 3);
1054         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1055         tcg_gen_shl_i64(mask, mask, shift);
1056 
1057         tcg_gen_andc_i64(vc, va, mask);
1058 
1059         tcg_temp_free(mask);
1060         tcg_temp_free(shift);
1061     }
1062 }
1063 
1064 static void gen_rx(DisasContext *ctx, int ra, int set)
1065 {
1066     if (ra != 31) {
1067         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1068     }
1069 
1070     st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1071 }
1072 
1073 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1074 {
1075     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1076        to internal cpu registers.  */
1077 
1078     /* Unprivileged PAL call */
1079     if (palcode >= 0x80 && palcode < 0xC0) {
1080         switch (palcode) {
1081         case 0x86:
1082             /* IMB */
1083             /* No-op inside QEMU.  */
1084             break;
1085         case 0x9E:
1086             /* RDUNIQUE */
1087             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1088                            offsetof(CPUAlphaState, unique));
1089             break;
1090         case 0x9F:
1091             /* WRUNIQUE */
1092             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1093                            offsetof(CPUAlphaState, unique));
1094             break;
1095         default:
1096             palcode &= 0xbf;
1097             goto do_call_pal;
1098         }
1099         return DISAS_NEXT;
1100     }
1101 
1102 #ifndef CONFIG_USER_ONLY
1103     /* Privileged PAL code */
1104     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1105         switch (palcode) {
1106         case 0x01:
1107             /* CFLUSH */
1108             /* No-op inside QEMU.  */
1109             break;
1110         case 0x02:
1111             /* DRAINA */
1112             /* No-op inside QEMU.  */
1113             break;
1114         case 0x2D:
1115             /* WRVPTPTR */
1116             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1117                            offsetof(CPUAlphaState, vptptr));
1118             break;
1119         case 0x31:
1120             /* WRVAL */
1121             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1122                            offsetof(CPUAlphaState, sysval));
1123             break;
1124         case 0x32:
1125             /* RDVAL */
1126             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1127                            offsetof(CPUAlphaState, sysval));
1128             break;
1129 
1130         case 0x35:
1131             /* SWPIPL */
1132             /* Note that we already know we're in kernel mode, so we know
1133                that PS only contains the 3 IPL bits.  */
1134             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1135 
1136             /* But make sure and store only the 3 IPL bits from the user.  */
1137             {
1138                 TCGv tmp = tcg_temp_new();
1139                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1140                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1141                 tcg_temp_free(tmp);
1142             }
1143 
1144             /* Allow interrupts to be recognized right away.  */
1145             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1146             return DISAS_PC_UPDATED_NOCHAIN;
1147 
1148         case 0x36:
1149             /* RDPS */
1150             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1151             break;
1152 
1153         case 0x38:
1154             /* WRUSP */
1155             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1156                            offsetof(CPUAlphaState, usp));
1157             break;
1158         case 0x3A:
1159             /* RDUSP */
1160             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1161                            offsetof(CPUAlphaState, usp));
1162             break;
1163         case 0x3C:
1164             /* WHAMI */
1165             tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1166                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1167             break;
1168 
1169         case 0x3E:
1170             /* WTINT */
1171             tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1172                            -offsetof(AlphaCPU, env) +
1173                            offsetof(CPUState, halted));
1174             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1175             return gen_excp(ctx, EXCP_HALTED, 0);
1176 
1177         default:
1178             palcode &= 0x3f;
1179             goto do_call_pal;
1180         }
1181         return DISAS_NEXT;
1182     }
1183 #endif
1184     return gen_invalid(ctx);
1185 
1186  do_call_pal:
1187 #ifdef CONFIG_USER_ONLY
1188     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1189 #else
1190     {
1191         TCGv tmp = tcg_temp_new();
1192         uint64_t exc_addr = ctx->base.pc_next;
1193         uint64_t entry = ctx->palbr;
1194 
1195         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1196             exc_addr |= 1;
1197         } else {
1198             tcg_gen_movi_i64(tmp, 1);
1199             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1200         }
1201 
1202         tcg_gen_movi_i64(tmp, exc_addr);
1203         tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1204         tcg_temp_free(tmp);
1205 
1206         entry += (palcode & 0x80
1207                   ? 0x2000 + (palcode - 0x80) * 64
1208                   : 0x1000 + palcode * 64);
1209 
1210         /* Since the destination is running in PALmode, we don't really
1211            need the page permissions check.  We'll see the existence of
1212            the page when we create the TB, and we'll flush all TBs if
1213            we change the PAL base register.  */
1214         if (!ctx->base.singlestep_enabled) {
1215             tcg_gen_goto_tb(0);
1216             tcg_gen_movi_i64(cpu_pc, entry);
1217             tcg_gen_exit_tb(ctx->base.tb, 0);
1218             return DISAS_NORETURN;
1219         } else {
1220             tcg_gen_movi_i64(cpu_pc, entry);
1221             return DISAS_PC_UPDATED;
1222         }
1223     }
1224 #endif
1225 }
1226 
1227 #ifndef CONFIG_USER_ONLY
1228 
1229 #define PR_LONG         0x200000
1230 
1231 static int cpu_pr_data(int pr)
1232 {
1233     switch (pr) {
1234     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1235     case  3: return offsetof(CPUAlphaState, trap_arg0);
1236     case  4: return offsetof(CPUAlphaState, trap_arg1);
1237     case  5: return offsetof(CPUAlphaState, trap_arg2);
1238     case  6: return offsetof(CPUAlphaState, exc_addr);
1239     case  7: return offsetof(CPUAlphaState, palbr);
1240     case  8: return offsetof(CPUAlphaState, ptbr);
1241     case  9: return offsetof(CPUAlphaState, vptptr);
1242     case 10: return offsetof(CPUAlphaState, unique);
1243     case 11: return offsetof(CPUAlphaState, sysval);
1244     case 12: return offsetof(CPUAlphaState, usp);
1245 
1246     case 40 ... 63:
1247         return offsetof(CPUAlphaState, scratch[pr - 40]);
1248 
1249     case 251:
1250         return offsetof(CPUAlphaState, alarm_expire);
1251     }
1252     return 0;
1253 }
1254 
1255 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1256 {
1257     void (*helper)(TCGv);
1258     int data;
1259 
1260     switch (regno) {
1261     case 32 ... 39:
1262         /* Accessing the "non-shadow" general registers.  */
1263         regno = regno == 39 ? 25 : regno - 32 + 8;
1264         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1265         break;
1266 
1267     case 250: /* WALLTIME */
1268         helper = gen_helper_get_walltime;
1269         goto do_helper;
1270     case 249: /* VMTIME */
1271         helper = gen_helper_get_vmtime;
1272     do_helper:
1273         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1274             gen_io_start();
1275             helper(va);
1276             return DISAS_PC_STALE;
1277         } else {
1278             helper(va);
1279         }
1280         break;
1281 
1282     case 0: /* PS */
1283         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1284         break;
1285     case 1: /* FEN */
1286         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1287         break;
1288 
1289     default:
1290         /* The basic registers are data only, and unknown registers
1291            are read-zero, write-ignore.  */
1292         data = cpu_pr_data(regno);
1293         if (data == 0) {
1294             tcg_gen_movi_i64(va, 0);
1295         } else if (data & PR_LONG) {
1296             tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1297         } else {
1298             tcg_gen_ld_i64(va, cpu_env, data);
1299         }
1300         break;
1301     }
1302 
1303     return DISAS_NEXT;
1304 }
1305 
1306 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1307 {
1308     int data;
1309     DisasJumpType ret = DISAS_NEXT;
1310 
1311     switch (regno) {
1312     case 255:
1313         /* TBIA */
1314         gen_helper_tbia(cpu_env);
1315         break;
1316 
1317     case 254:
1318         /* TBIS */
1319         gen_helper_tbis(cpu_env, vb);
1320         break;
1321 
1322     case 253:
1323         /* WAIT */
1324         tcg_gen_st_i32(tcg_constant_i32(1), cpu_env,
1325                        -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1326         return gen_excp(ctx, EXCP_HALTED, 0);
1327 
1328     case 252:
1329         /* HALT */
1330         gen_helper_halt(vb);
1331         return DISAS_PC_STALE;
1332 
1333     case 251:
1334         /* ALARM */
1335         if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
1336             gen_io_start();
1337             ret = DISAS_PC_STALE;
1338         }
1339         gen_helper_set_alarm(cpu_env, vb);
1340         break;
1341 
1342     case 7:
1343         /* PALBR */
1344         tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1345         /* Changing the PAL base register implies un-chaining all of the TBs
1346            that ended with a CALL_PAL.  Since the base register usually only
1347            changes during boot, flushing everything works well.  */
1348         gen_helper_tb_flush(cpu_env);
1349         return DISAS_PC_STALE;
1350 
1351     case 32 ... 39:
1352         /* Accessing the "non-shadow" general registers.  */
1353         regno = regno == 39 ? 25 : regno - 32 + 8;
1354         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1355         break;
1356 
1357     case 0: /* PS */
1358         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1359         break;
1360     case 1: /* FEN */
1361         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1362         break;
1363 
1364     default:
1365         /* The basic registers are data only, and unknown registers
1366            are read-zero, write-ignore.  */
1367         data = cpu_pr_data(regno);
1368         if (data != 0) {
1369             if (data & PR_LONG) {
1370                 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1371             } else {
1372                 tcg_gen_st_i64(vb, cpu_env, data);
1373             }
1374         }
1375         break;
1376     }
1377 
1378     return ret;
1379 }
1380 #endif /* !USER_ONLY*/
1381 
1382 #define REQUIRE_NO_LIT                          \
1383     do {                                        \
1384         if (real_islit) {                       \
1385             goto invalid_opc;                   \
1386         }                                       \
1387     } while (0)
1388 
1389 #define REQUIRE_AMASK(FLAG)                     \
1390     do {                                        \
1391         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1392             goto invalid_opc;                   \
1393         }                                       \
1394     } while (0)
1395 
1396 #define REQUIRE_TB_FLAG(FLAG)                   \
1397     do {                                        \
1398         if ((ctx->tbflags & (FLAG)) == 0) {     \
1399             goto invalid_opc;                   \
1400         }                                       \
1401     } while (0)
1402 
1403 #define REQUIRE_REG_31(WHICH)                   \
1404     do {                                        \
1405         if (WHICH != 31) {                      \
1406             goto invalid_opc;                   \
1407         }                                       \
1408     } while (0)
1409 
1410 #define REQUIRE_FEN                             \
1411     do {                                        \
1412         if (!(ctx->tbflags & ENV_FLAG_FEN)) {   \
1413             goto raise_fen;                     \
1414         }                                       \
1415     } while (0)
1416 
1417 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1418 {
1419     int32_t disp21, disp16, disp12 __attribute__((unused));
1420     uint16_t fn11;
1421     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1422     bool islit, real_islit;
1423     TCGv va, vb, vc, tmp, tmp2;
1424     TCGv_i32 t32;
1425     DisasJumpType ret;
1426 
1427     /* Decode all instruction fields */
1428     opc = extract32(insn, 26, 6);
1429     ra = extract32(insn, 21, 5);
1430     rb = extract32(insn, 16, 5);
1431     rc = extract32(insn, 0, 5);
1432     real_islit = islit = extract32(insn, 12, 1);
1433     lit = extract32(insn, 13, 8);
1434 
1435     disp21 = sextract32(insn, 0, 21);
1436     disp16 = sextract32(insn, 0, 16);
1437     disp12 = sextract32(insn, 0, 12);
1438 
1439     fn11 = extract32(insn, 5, 11);
1440     fpfn = extract32(insn, 5, 6);
1441     fn7 = extract32(insn, 5, 7);
1442 
1443     if (rb == 31 && !islit) {
1444         islit = true;
1445         lit = 0;
1446     }
1447 
1448     ret = DISAS_NEXT;
1449     switch (opc) {
1450     case 0x00:
1451         /* CALL_PAL */
1452         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1453         break;
1454     case 0x01:
1455         /* OPC01 */
1456         goto invalid_opc;
1457     case 0x02:
1458         /* OPC02 */
1459         goto invalid_opc;
1460     case 0x03:
1461         /* OPC03 */
1462         goto invalid_opc;
1463     case 0x04:
1464         /* OPC04 */
1465         goto invalid_opc;
1466     case 0x05:
1467         /* OPC05 */
1468         goto invalid_opc;
1469     case 0x06:
1470         /* OPC06 */
1471         goto invalid_opc;
1472     case 0x07:
1473         /* OPC07 */
1474         goto invalid_opc;
1475 
1476     case 0x09:
1477         /* LDAH */
1478         disp16 = (uint32_t)disp16 << 16;
1479         /* fall through */
1480     case 0x08:
1481         /* LDA */
1482         va = dest_gpr(ctx, ra);
1483         /* It's worth special-casing immediate loads.  */
1484         if (rb == 31) {
1485             tcg_gen_movi_i64(va, disp16);
1486         } else {
1487             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1488         }
1489         break;
1490 
1491     case 0x0A:
1492         /* LDBU */
1493         REQUIRE_AMASK(BWX);
1494         gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1495         break;
1496     case 0x0B:
1497         /* LDQ_U */
1498         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1499         break;
1500     case 0x0C:
1501         /* LDWU */
1502         REQUIRE_AMASK(BWX);
1503         gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1504         break;
1505     case 0x0D:
1506         /* STW */
1507         REQUIRE_AMASK(BWX);
1508         gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1509         break;
1510     case 0x0E:
1511         /* STB */
1512         REQUIRE_AMASK(BWX);
1513         gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1514         break;
1515     case 0x0F:
1516         /* STQ_U */
1517         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1518         break;
1519 
1520     case 0x10:
1521         vc = dest_gpr(ctx, rc);
1522         vb = load_gpr_lit(ctx, rb, lit, islit);
1523 
1524         if (ra == 31) {
1525             if (fn7 == 0x00) {
1526                 /* Special case ADDL as SEXTL.  */
1527                 tcg_gen_ext32s_i64(vc, vb);
1528                 break;
1529             }
1530             if (fn7 == 0x29) {
1531                 /* Special case SUBQ as NEGQ.  */
1532                 tcg_gen_neg_i64(vc, vb);
1533                 break;
1534             }
1535         }
1536 
1537         va = load_gpr(ctx, ra);
1538         switch (fn7) {
1539         case 0x00:
1540             /* ADDL */
1541             tcg_gen_add_i64(vc, va, vb);
1542             tcg_gen_ext32s_i64(vc, vc);
1543             break;
1544         case 0x02:
1545             /* S4ADDL */
1546             tmp = tcg_temp_new();
1547             tcg_gen_shli_i64(tmp, va, 2);
1548             tcg_gen_add_i64(tmp, tmp, vb);
1549             tcg_gen_ext32s_i64(vc, tmp);
1550             tcg_temp_free(tmp);
1551             break;
1552         case 0x09:
1553             /* SUBL */
1554             tcg_gen_sub_i64(vc, va, vb);
1555             tcg_gen_ext32s_i64(vc, vc);
1556             break;
1557         case 0x0B:
1558             /* S4SUBL */
1559             tmp = tcg_temp_new();
1560             tcg_gen_shli_i64(tmp, va, 2);
1561             tcg_gen_sub_i64(tmp, tmp, vb);
1562             tcg_gen_ext32s_i64(vc, tmp);
1563             tcg_temp_free(tmp);
1564             break;
1565         case 0x0F:
1566             /* CMPBGE */
1567             if (ra == 31) {
1568                 /* Special case 0 >= X as X == 0.  */
1569                 gen_helper_cmpbe0(vc, vb);
1570             } else {
1571                 gen_helper_cmpbge(vc, va, vb);
1572             }
1573             break;
1574         case 0x12:
1575             /* S8ADDL */
1576             tmp = tcg_temp_new();
1577             tcg_gen_shli_i64(tmp, va, 3);
1578             tcg_gen_add_i64(tmp, tmp, vb);
1579             tcg_gen_ext32s_i64(vc, tmp);
1580             tcg_temp_free(tmp);
1581             break;
1582         case 0x1B:
1583             /* S8SUBL */
1584             tmp = tcg_temp_new();
1585             tcg_gen_shli_i64(tmp, va, 3);
1586             tcg_gen_sub_i64(tmp, tmp, vb);
1587             tcg_gen_ext32s_i64(vc, tmp);
1588             tcg_temp_free(tmp);
1589             break;
1590         case 0x1D:
1591             /* CMPULT */
1592             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1593             break;
1594         case 0x20:
1595             /* ADDQ */
1596             tcg_gen_add_i64(vc, va, vb);
1597             break;
1598         case 0x22:
1599             /* S4ADDQ */
1600             tmp = tcg_temp_new();
1601             tcg_gen_shli_i64(tmp, va, 2);
1602             tcg_gen_add_i64(vc, tmp, vb);
1603             tcg_temp_free(tmp);
1604             break;
1605         case 0x29:
1606             /* SUBQ */
1607             tcg_gen_sub_i64(vc, va, vb);
1608             break;
1609         case 0x2B:
1610             /* S4SUBQ */
1611             tmp = tcg_temp_new();
1612             tcg_gen_shli_i64(tmp, va, 2);
1613             tcg_gen_sub_i64(vc, tmp, vb);
1614             tcg_temp_free(tmp);
1615             break;
1616         case 0x2D:
1617             /* CMPEQ */
1618             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1619             break;
1620         case 0x32:
1621             /* S8ADDQ */
1622             tmp = tcg_temp_new();
1623             tcg_gen_shli_i64(tmp, va, 3);
1624             tcg_gen_add_i64(vc, tmp, vb);
1625             tcg_temp_free(tmp);
1626             break;
1627         case 0x3B:
1628             /* S8SUBQ */
1629             tmp = tcg_temp_new();
1630             tcg_gen_shli_i64(tmp, va, 3);
1631             tcg_gen_sub_i64(vc, tmp, vb);
1632             tcg_temp_free(tmp);
1633             break;
1634         case 0x3D:
1635             /* CMPULE */
1636             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1637             break;
1638         case 0x40:
1639             /* ADDL/V */
1640             tmp = tcg_temp_new();
1641             tcg_gen_ext32s_i64(tmp, va);
1642             tcg_gen_ext32s_i64(vc, vb);
1643             tcg_gen_add_i64(tmp, tmp, vc);
1644             tcg_gen_ext32s_i64(vc, tmp);
1645             gen_helper_check_overflow(cpu_env, vc, tmp);
1646             tcg_temp_free(tmp);
1647             break;
1648         case 0x49:
1649             /* SUBL/V */
1650             tmp = tcg_temp_new();
1651             tcg_gen_ext32s_i64(tmp, va);
1652             tcg_gen_ext32s_i64(vc, vb);
1653             tcg_gen_sub_i64(tmp, tmp, vc);
1654             tcg_gen_ext32s_i64(vc, tmp);
1655             gen_helper_check_overflow(cpu_env, vc, tmp);
1656             tcg_temp_free(tmp);
1657             break;
1658         case 0x4D:
1659             /* CMPLT */
1660             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1661             break;
1662         case 0x60:
1663             /* ADDQ/V */
1664             tmp = tcg_temp_new();
1665             tmp2 = tcg_temp_new();
1666             tcg_gen_eqv_i64(tmp, va, vb);
1667             tcg_gen_mov_i64(tmp2, va);
1668             tcg_gen_add_i64(vc, va, vb);
1669             tcg_gen_xor_i64(tmp2, tmp2, vc);
1670             tcg_gen_and_i64(tmp, tmp, tmp2);
1671             tcg_gen_shri_i64(tmp, tmp, 63);
1672             tcg_gen_movi_i64(tmp2, 0);
1673             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1674             tcg_temp_free(tmp);
1675             tcg_temp_free(tmp2);
1676             break;
1677         case 0x69:
1678             /* SUBQ/V */
1679             tmp = tcg_temp_new();
1680             tmp2 = tcg_temp_new();
1681             tcg_gen_xor_i64(tmp, va, vb);
1682             tcg_gen_mov_i64(tmp2, va);
1683             tcg_gen_sub_i64(vc, va, vb);
1684             tcg_gen_xor_i64(tmp2, tmp2, vc);
1685             tcg_gen_and_i64(tmp, tmp, tmp2);
1686             tcg_gen_shri_i64(tmp, tmp, 63);
1687             tcg_gen_movi_i64(tmp2, 0);
1688             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1689             tcg_temp_free(tmp);
1690             tcg_temp_free(tmp2);
1691             break;
1692         case 0x6D:
1693             /* CMPLE */
1694             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1695             break;
1696         default:
1697             goto invalid_opc;
1698         }
1699         break;
1700 
1701     case 0x11:
1702         if (fn7 == 0x20) {
1703             if (rc == 31) {
1704                 /* Special case BIS as NOP.  */
1705                 break;
1706             }
1707             if (ra == 31) {
1708                 /* Special case BIS as MOV.  */
1709                 vc = dest_gpr(ctx, rc);
1710                 if (islit) {
1711                     tcg_gen_movi_i64(vc, lit);
1712                 } else {
1713                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1714                 }
1715                 break;
1716             }
1717         }
1718 
1719         vc = dest_gpr(ctx, rc);
1720         vb = load_gpr_lit(ctx, rb, lit, islit);
1721 
1722         if (fn7 == 0x28 && ra == 31) {
1723             /* Special case ORNOT as NOT.  */
1724             tcg_gen_not_i64(vc, vb);
1725             break;
1726         }
1727 
1728         va = load_gpr(ctx, ra);
1729         switch (fn7) {
1730         case 0x00:
1731             /* AND */
1732             tcg_gen_and_i64(vc, va, vb);
1733             break;
1734         case 0x08:
1735             /* BIC */
1736             tcg_gen_andc_i64(vc, va, vb);
1737             break;
1738         case 0x14:
1739             /* CMOVLBS */
1740             tmp = tcg_temp_new();
1741             tcg_gen_andi_i64(tmp, va, 1);
1742             tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1743                                 vb, load_gpr(ctx, rc));
1744             tcg_temp_free(tmp);
1745             break;
1746         case 0x16:
1747             /* CMOVLBC */
1748             tmp = tcg_temp_new();
1749             tcg_gen_andi_i64(tmp, va, 1);
1750             tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1751                                 vb, load_gpr(ctx, rc));
1752             tcg_temp_free(tmp);
1753             break;
1754         case 0x20:
1755             /* BIS */
1756             tcg_gen_or_i64(vc, va, vb);
1757             break;
1758         case 0x24:
1759             /* CMOVEQ */
1760             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1761                                 vb, load_gpr(ctx, rc));
1762             break;
1763         case 0x26:
1764             /* CMOVNE */
1765             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1766                                 vb, load_gpr(ctx, rc));
1767             break;
1768         case 0x28:
1769             /* ORNOT */
1770             tcg_gen_orc_i64(vc, va, vb);
1771             break;
1772         case 0x40:
1773             /* XOR */
1774             tcg_gen_xor_i64(vc, va, vb);
1775             break;
1776         case 0x44:
1777             /* CMOVLT */
1778             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1779                                 vb, load_gpr(ctx, rc));
1780             break;
1781         case 0x46:
1782             /* CMOVGE */
1783             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1784                                 vb, load_gpr(ctx, rc));
1785             break;
1786         case 0x48:
1787             /* EQV */
1788             tcg_gen_eqv_i64(vc, va, vb);
1789             break;
1790         case 0x61:
1791             /* AMASK */
1792             REQUIRE_REG_31(ra);
1793             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1794             break;
1795         case 0x64:
1796             /* CMOVLE */
1797             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1798                                 vb, load_gpr(ctx, rc));
1799             break;
1800         case 0x66:
1801             /* CMOVGT */
1802             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1803                                 vb, load_gpr(ctx, rc));
1804             break;
1805         case 0x6C:
1806             /* IMPLVER */
1807             REQUIRE_REG_31(ra);
1808             tcg_gen_movi_i64(vc, ctx->implver);
1809             break;
1810         default:
1811             goto invalid_opc;
1812         }
1813         break;
1814 
1815     case 0x12:
1816         vc = dest_gpr(ctx, rc);
1817         va = load_gpr(ctx, ra);
1818         switch (fn7) {
1819         case 0x02:
1820             /* MSKBL */
1821             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1822             break;
1823         case 0x06:
1824             /* EXTBL */
1825             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1826             break;
1827         case 0x0B:
1828             /* INSBL */
1829             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1830             break;
1831         case 0x12:
1832             /* MSKWL */
1833             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1834             break;
1835         case 0x16:
1836             /* EXTWL */
1837             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1838             break;
1839         case 0x1B:
1840             /* INSWL */
1841             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1842             break;
1843         case 0x22:
1844             /* MSKLL */
1845             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1846             break;
1847         case 0x26:
1848             /* EXTLL */
1849             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1850             break;
1851         case 0x2B:
1852             /* INSLL */
1853             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1854             break;
1855         case 0x30:
1856             /* ZAP */
1857             if (islit) {
1858                 gen_zapnoti(vc, va, ~lit);
1859             } else {
1860                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1861             }
1862             break;
1863         case 0x31:
1864             /* ZAPNOT */
1865             if (islit) {
1866                 gen_zapnoti(vc, va, lit);
1867             } else {
1868                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1869             }
1870             break;
1871         case 0x32:
1872             /* MSKQL */
1873             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1874             break;
1875         case 0x34:
1876             /* SRL */
1877             if (islit) {
1878                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1879             } else {
1880                 tmp = tcg_temp_new();
1881                 vb = load_gpr(ctx, rb);
1882                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1883                 tcg_gen_shr_i64(vc, va, tmp);
1884                 tcg_temp_free(tmp);
1885             }
1886             break;
1887         case 0x36:
1888             /* EXTQL */
1889             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1890             break;
1891         case 0x39:
1892             /* SLL */
1893             if (islit) {
1894                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1895             } else {
1896                 tmp = tcg_temp_new();
1897                 vb = load_gpr(ctx, rb);
1898                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1899                 tcg_gen_shl_i64(vc, va, tmp);
1900                 tcg_temp_free(tmp);
1901             }
1902             break;
1903         case 0x3B:
1904             /* INSQL */
1905             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1906             break;
1907         case 0x3C:
1908             /* SRA */
1909             if (islit) {
1910                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1911             } else {
1912                 tmp = tcg_temp_new();
1913                 vb = load_gpr(ctx, rb);
1914                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1915                 tcg_gen_sar_i64(vc, va, tmp);
1916                 tcg_temp_free(tmp);
1917             }
1918             break;
1919         case 0x52:
1920             /* MSKWH */
1921             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1922             break;
1923         case 0x57:
1924             /* INSWH */
1925             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1926             break;
1927         case 0x5A:
1928             /* EXTWH */
1929             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1930             break;
1931         case 0x62:
1932             /* MSKLH */
1933             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1934             break;
1935         case 0x67:
1936             /* INSLH */
1937             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1938             break;
1939         case 0x6A:
1940             /* EXTLH */
1941             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1942             break;
1943         case 0x72:
1944             /* MSKQH */
1945             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1946             break;
1947         case 0x77:
1948             /* INSQH */
1949             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1950             break;
1951         case 0x7A:
1952             /* EXTQH */
1953             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1954             break;
1955         default:
1956             goto invalid_opc;
1957         }
1958         break;
1959 
1960     case 0x13:
1961         vc = dest_gpr(ctx, rc);
1962         vb = load_gpr_lit(ctx, rb, lit, islit);
1963         va = load_gpr(ctx, ra);
1964         switch (fn7) {
1965         case 0x00:
1966             /* MULL */
1967             tcg_gen_mul_i64(vc, va, vb);
1968             tcg_gen_ext32s_i64(vc, vc);
1969             break;
1970         case 0x20:
1971             /* MULQ */
1972             tcg_gen_mul_i64(vc, va, vb);
1973             break;
1974         case 0x30:
1975             /* UMULH */
1976             tmp = tcg_temp_new();
1977             tcg_gen_mulu2_i64(tmp, vc, va, vb);
1978             tcg_temp_free(tmp);
1979             break;
1980         case 0x40:
1981             /* MULL/V */
1982             tmp = tcg_temp_new();
1983             tcg_gen_ext32s_i64(tmp, va);
1984             tcg_gen_ext32s_i64(vc, vb);
1985             tcg_gen_mul_i64(tmp, tmp, vc);
1986             tcg_gen_ext32s_i64(vc, tmp);
1987             gen_helper_check_overflow(cpu_env, vc, tmp);
1988             tcg_temp_free(tmp);
1989             break;
1990         case 0x60:
1991             /* MULQ/V */
1992             tmp = tcg_temp_new();
1993             tmp2 = tcg_temp_new();
1994             tcg_gen_muls2_i64(vc, tmp, va, vb);
1995             tcg_gen_sari_i64(tmp2, vc, 63);
1996             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1997             tcg_temp_free(tmp);
1998             tcg_temp_free(tmp2);
1999             break;
2000         default:
2001             goto invalid_opc;
2002         }
2003         break;
2004 
2005     case 0x14:
2006         REQUIRE_AMASK(FIX);
2007         vc = dest_fpr(ctx, rc);
2008         switch (fpfn) { /* fn11 & 0x3F */
2009         case 0x04:
2010             /* ITOFS */
2011             REQUIRE_REG_31(rb);
2012             REQUIRE_FEN;
2013             t32 = tcg_temp_new_i32();
2014             va = load_gpr(ctx, ra);
2015             tcg_gen_extrl_i64_i32(t32, va);
2016             gen_helper_memory_to_s(vc, t32);
2017             tcg_temp_free_i32(t32);
2018             break;
2019         case 0x0A:
2020             /* SQRTF */
2021             REQUIRE_REG_31(ra);
2022             REQUIRE_FEN;
2023             vb = load_fpr(ctx, rb);
2024             gen_helper_sqrtf(vc, cpu_env, vb);
2025             break;
2026         case 0x0B:
2027             /* SQRTS */
2028             REQUIRE_REG_31(ra);
2029             REQUIRE_FEN;
2030             gen_sqrts(ctx, rb, rc, fn11);
2031             break;
2032         case 0x14:
2033             /* ITOFF */
2034             REQUIRE_REG_31(rb);
2035             REQUIRE_FEN;
2036             t32 = tcg_temp_new_i32();
2037             va = load_gpr(ctx, ra);
2038             tcg_gen_extrl_i64_i32(t32, va);
2039             gen_helper_memory_to_f(vc, t32);
2040             tcg_temp_free_i32(t32);
2041             break;
2042         case 0x24:
2043             /* ITOFT */
2044             REQUIRE_REG_31(rb);
2045             REQUIRE_FEN;
2046             va = load_gpr(ctx, ra);
2047             tcg_gen_mov_i64(vc, va);
2048             break;
2049         case 0x2A:
2050             /* SQRTG */
2051             REQUIRE_REG_31(ra);
2052             REQUIRE_FEN;
2053             vb = load_fpr(ctx, rb);
2054             gen_helper_sqrtg(vc, cpu_env, vb);
2055             break;
2056         case 0x02B:
2057             /* SQRTT */
2058             REQUIRE_REG_31(ra);
2059             REQUIRE_FEN;
2060             gen_sqrtt(ctx, rb, rc, fn11);
2061             break;
2062         default:
2063             goto invalid_opc;
2064         }
2065         break;
2066 
2067     case 0x15:
2068         /* VAX floating point */
2069         /* XXX: rounding mode and trap are ignored (!) */
2070         vc = dest_fpr(ctx, rc);
2071         vb = load_fpr(ctx, rb);
2072         va = load_fpr(ctx, ra);
2073         switch (fpfn) { /* fn11 & 0x3F */
2074         case 0x00:
2075             /* ADDF */
2076             REQUIRE_FEN;
2077             gen_helper_addf(vc, cpu_env, va, vb);
2078             break;
2079         case 0x01:
2080             /* SUBF */
2081             REQUIRE_FEN;
2082             gen_helper_subf(vc, cpu_env, va, vb);
2083             break;
2084         case 0x02:
2085             /* MULF */
2086             REQUIRE_FEN;
2087             gen_helper_mulf(vc, cpu_env, va, vb);
2088             break;
2089         case 0x03:
2090             /* DIVF */
2091             REQUIRE_FEN;
2092             gen_helper_divf(vc, cpu_env, va, vb);
2093             break;
2094         case 0x1E:
2095             /* CVTDG -- TODO */
2096             REQUIRE_REG_31(ra);
2097             goto invalid_opc;
2098         case 0x20:
2099             /* ADDG */
2100             REQUIRE_FEN;
2101             gen_helper_addg(vc, cpu_env, va, vb);
2102             break;
2103         case 0x21:
2104             /* SUBG */
2105             REQUIRE_FEN;
2106             gen_helper_subg(vc, cpu_env, va, vb);
2107             break;
2108         case 0x22:
2109             /* MULG */
2110             REQUIRE_FEN;
2111             gen_helper_mulg(vc, cpu_env, va, vb);
2112             break;
2113         case 0x23:
2114             /* DIVG */
2115             REQUIRE_FEN;
2116             gen_helper_divg(vc, cpu_env, va, vb);
2117             break;
2118         case 0x25:
2119             /* CMPGEQ */
2120             REQUIRE_FEN;
2121             gen_helper_cmpgeq(vc, cpu_env, va, vb);
2122             break;
2123         case 0x26:
2124             /* CMPGLT */
2125             REQUIRE_FEN;
2126             gen_helper_cmpglt(vc, cpu_env, va, vb);
2127             break;
2128         case 0x27:
2129             /* CMPGLE */
2130             REQUIRE_FEN;
2131             gen_helper_cmpgle(vc, cpu_env, va, vb);
2132             break;
2133         case 0x2C:
2134             /* CVTGF */
2135             REQUIRE_REG_31(ra);
2136             REQUIRE_FEN;
2137             gen_helper_cvtgf(vc, cpu_env, vb);
2138             break;
2139         case 0x2D:
2140             /* CVTGD -- TODO */
2141             REQUIRE_REG_31(ra);
2142             goto invalid_opc;
2143         case 0x2F:
2144             /* CVTGQ */
2145             REQUIRE_REG_31(ra);
2146             REQUIRE_FEN;
2147             gen_helper_cvtgq(vc, cpu_env, vb);
2148             break;
2149         case 0x3C:
2150             /* CVTQF */
2151             REQUIRE_REG_31(ra);
2152             REQUIRE_FEN;
2153             gen_helper_cvtqf(vc, cpu_env, vb);
2154             break;
2155         case 0x3E:
2156             /* CVTQG */
2157             REQUIRE_REG_31(ra);
2158             REQUIRE_FEN;
2159             gen_helper_cvtqg(vc, cpu_env, vb);
2160             break;
2161         default:
2162             goto invalid_opc;
2163         }
2164         break;
2165 
2166     case 0x16:
2167         /* IEEE floating-point */
2168         switch (fpfn) { /* fn11 & 0x3F */
2169         case 0x00:
2170             /* ADDS */
2171             REQUIRE_FEN;
2172             gen_adds(ctx, ra, rb, rc, fn11);
2173             break;
2174         case 0x01:
2175             /* SUBS */
2176             REQUIRE_FEN;
2177             gen_subs(ctx, ra, rb, rc, fn11);
2178             break;
2179         case 0x02:
2180             /* MULS */
2181             REQUIRE_FEN;
2182             gen_muls(ctx, ra, rb, rc, fn11);
2183             break;
2184         case 0x03:
2185             /* DIVS */
2186             REQUIRE_FEN;
2187             gen_divs(ctx, ra, rb, rc, fn11);
2188             break;
2189         case 0x20:
2190             /* ADDT */
2191             REQUIRE_FEN;
2192             gen_addt(ctx, ra, rb, rc, fn11);
2193             break;
2194         case 0x21:
2195             /* SUBT */
2196             REQUIRE_FEN;
2197             gen_subt(ctx, ra, rb, rc, fn11);
2198             break;
2199         case 0x22:
2200             /* MULT */
2201             REQUIRE_FEN;
2202             gen_mult(ctx, ra, rb, rc, fn11);
2203             break;
2204         case 0x23:
2205             /* DIVT */
2206             REQUIRE_FEN;
2207             gen_divt(ctx, ra, rb, rc, fn11);
2208             break;
2209         case 0x24:
2210             /* CMPTUN */
2211             REQUIRE_FEN;
2212             gen_cmptun(ctx, ra, rb, rc, fn11);
2213             break;
2214         case 0x25:
2215             /* CMPTEQ */
2216             REQUIRE_FEN;
2217             gen_cmpteq(ctx, ra, rb, rc, fn11);
2218             break;
2219         case 0x26:
2220             /* CMPTLT */
2221             REQUIRE_FEN;
2222             gen_cmptlt(ctx, ra, rb, rc, fn11);
2223             break;
2224         case 0x27:
2225             /* CMPTLE */
2226             REQUIRE_FEN;
2227             gen_cmptle(ctx, ra, rb, rc, fn11);
2228             break;
2229         case 0x2C:
2230             REQUIRE_REG_31(ra);
2231             REQUIRE_FEN;
2232             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2233                 /* CVTST */
2234                 gen_cvtst(ctx, rb, rc, fn11);
2235             } else {
2236                 /* CVTTS */
2237                 gen_cvtts(ctx, rb, rc, fn11);
2238             }
2239             break;
2240         case 0x2F:
2241             /* CVTTQ */
2242             REQUIRE_REG_31(ra);
2243             REQUIRE_FEN;
2244             gen_cvttq(ctx, rb, rc, fn11);
2245             break;
2246         case 0x3C:
2247             /* CVTQS */
2248             REQUIRE_REG_31(ra);
2249             REQUIRE_FEN;
2250             gen_cvtqs(ctx, rb, rc, fn11);
2251             break;
2252         case 0x3E:
2253             /* CVTQT */
2254             REQUIRE_REG_31(ra);
2255             REQUIRE_FEN;
2256             gen_cvtqt(ctx, rb, rc, fn11);
2257             break;
2258         default:
2259             goto invalid_opc;
2260         }
2261         break;
2262 
2263     case 0x17:
2264         switch (fn11) {
2265         case 0x010:
2266             /* CVTLQ */
2267             REQUIRE_REG_31(ra);
2268             REQUIRE_FEN;
2269             vc = dest_fpr(ctx, rc);
2270             vb = load_fpr(ctx, rb);
2271             gen_cvtlq(vc, vb);
2272             break;
2273         case 0x020:
2274             /* CPYS */
2275             REQUIRE_FEN;
2276             if (rc == 31) {
2277                 /* Special case CPYS as FNOP.  */
2278             } else {
2279                 vc = dest_fpr(ctx, rc);
2280                 va = load_fpr(ctx, ra);
2281                 if (ra == rb) {
2282                     /* Special case CPYS as FMOV.  */
2283                     tcg_gen_mov_i64(vc, va);
2284                 } else {
2285                     vb = load_fpr(ctx, rb);
2286                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2287                 }
2288             }
2289             break;
2290         case 0x021:
2291             /* CPYSN */
2292             REQUIRE_FEN;
2293             vc = dest_fpr(ctx, rc);
2294             vb = load_fpr(ctx, rb);
2295             va = load_fpr(ctx, ra);
2296             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2297             break;
2298         case 0x022:
2299             /* CPYSE */
2300             REQUIRE_FEN;
2301             vc = dest_fpr(ctx, rc);
2302             vb = load_fpr(ctx, rb);
2303             va = load_fpr(ctx, ra);
2304             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2305             break;
2306         case 0x024:
2307             /* MT_FPCR */
2308             REQUIRE_FEN;
2309             va = load_fpr(ctx, ra);
2310             gen_helper_store_fpcr(cpu_env, va);
2311             if (ctx->tb_rm == QUAL_RM_D) {
2312                 /* Re-do the copy of the rounding mode to fp_status
2313                    the next time we use dynamic rounding.  */
2314                 ctx->tb_rm = -1;
2315             }
2316             break;
2317         case 0x025:
2318             /* MF_FPCR */
2319             REQUIRE_FEN;
2320             va = dest_fpr(ctx, ra);
2321             gen_helper_load_fpcr(va, cpu_env);
2322             break;
2323         case 0x02A:
2324             /* FCMOVEQ */
2325             REQUIRE_FEN;
2326             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2327             break;
2328         case 0x02B:
2329             /* FCMOVNE */
2330             REQUIRE_FEN;
2331             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2332             break;
2333         case 0x02C:
2334             /* FCMOVLT */
2335             REQUIRE_FEN;
2336             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2337             break;
2338         case 0x02D:
2339             /* FCMOVGE */
2340             REQUIRE_FEN;
2341             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2342             break;
2343         case 0x02E:
2344             /* FCMOVLE */
2345             REQUIRE_FEN;
2346             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2347             break;
2348         case 0x02F:
2349             /* FCMOVGT */
2350             REQUIRE_FEN;
2351             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2352             break;
2353         case 0x030: /* CVTQL */
2354         case 0x130: /* CVTQL/V */
2355         case 0x530: /* CVTQL/SV */
2356             REQUIRE_REG_31(ra);
2357             REQUIRE_FEN;
2358             vc = dest_fpr(ctx, rc);
2359             vb = load_fpr(ctx, rb);
2360             gen_helper_cvtql(vc, cpu_env, vb);
2361             gen_fp_exc_raise(rc, fn11);
2362             break;
2363         default:
2364             goto invalid_opc;
2365         }
2366         break;
2367 
2368     case 0x18:
2369         switch ((uint16_t)disp16) {
2370         case 0x0000:
2371             /* TRAPB */
2372             /* No-op.  */
2373             break;
2374         case 0x0400:
2375             /* EXCB */
2376             /* No-op.  */
2377             break;
2378         case 0x4000:
2379             /* MB */
2380             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2381             break;
2382         case 0x4400:
2383             /* WMB */
2384             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2385             break;
2386         case 0x8000:
2387             /* FETCH */
2388             /* No-op */
2389             break;
2390         case 0xA000:
2391             /* FETCH_M */
2392             /* No-op */
2393             break;
2394         case 0xC000:
2395             /* RPCC */
2396             va = dest_gpr(ctx, ra);
2397             if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2398                 gen_io_start();
2399                 gen_helper_load_pcc(va, cpu_env);
2400                 ret = DISAS_PC_STALE;
2401             } else {
2402                 gen_helper_load_pcc(va, cpu_env);
2403             }
2404             break;
2405         case 0xE000:
2406             /* RC */
2407             gen_rx(ctx, ra, 0);
2408             break;
2409         case 0xE800:
2410             /* ECB */
2411             break;
2412         case 0xF000:
2413             /* RS */
2414             gen_rx(ctx, ra, 1);
2415             break;
2416         case 0xF800:
2417             /* WH64 */
2418             /* No-op */
2419             break;
2420         case 0xFC00:
2421             /* WH64EN */
2422             /* No-op */
2423             break;
2424         default:
2425             goto invalid_opc;
2426         }
2427         break;
2428 
2429     case 0x19:
2430         /* HW_MFPR (PALcode) */
2431 #ifndef CONFIG_USER_ONLY
2432         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2433         va = dest_gpr(ctx, ra);
2434         ret = gen_mfpr(ctx, va, insn & 0xffff);
2435         break;
2436 #else
2437         goto invalid_opc;
2438 #endif
2439 
2440     case 0x1A:
2441         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2442            prediction stack action, which of course we don't implement.  */
2443         vb = load_gpr(ctx, rb);
2444         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2445         if (ra != 31) {
2446             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2447         }
2448         ret = DISAS_PC_UPDATED;
2449         break;
2450 
2451     case 0x1B:
2452         /* HW_LD (PALcode) */
2453 #ifndef CONFIG_USER_ONLY
2454         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2455         {
2456             TCGv addr = tcg_temp_new();
2457             vb = load_gpr(ctx, rb);
2458             va = dest_gpr(ctx, ra);
2459 
2460             tcg_gen_addi_i64(addr, vb, disp12);
2461             switch ((insn >> 12) & 0xF) {
2462             case 0x0:
2463                 /* Longword physical access (hw_ldl/p) */
2464                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2465                 break;
2466             case 0x1:
2467                 /* Quadword physical access (hw_ldq/p) */
2468                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2469                 break;
2470             case 0x2:
2471                 /* Longword physical access with lock (hw_ldl_l/p) */
2472                 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2473                 break;
2474             case 0x3:
2475                 /* Quadword physical access with lock (hw_ldq_l/p) */
2476                 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2477                 break;
2478             case 0x4:
2479                 /* Longword virtual PTE fetch (hw_ldl/v) */
2480                 goto invalid_opc;
2481             case 0x5:
2482                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2483                 goto invalid_opc;
2484                 break;
2485             case 0x6:
2486                 /* Invalid */
2487                 goto invalid_opc;
2488             case 0x7:
2489                 /* Invaliid */
2490                 goto invalid_opc;
2491             case 0x8:
2492                 /* Longword virtual access (hw_ldl) */
2493                 goto invalid_opc;
2494             case 0x9:
2495                 /* Quadword virtual access (hw_ldq) */
2496                 goto invalid_opc;
2497             case 0xA:
2498                 /* Longword virtual access with protection check (hw_ldl/w) */
2499                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2500                 break;
2501             case 0xB:
2502                 /* Quadword virtual access with protection check (hw_ldq/w) */
2503                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2504                 break;
2505             case 0xC:
2506                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2507                 goto invalid_opc;
2508             case 0xD:
2509                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2510                 goto invalid_opc;
2511             case 0xE:
2512                 /* Longword virtual access with alternate access mode and
2513                    protection checks (hw_ldl/wa) */
2514                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2515                 break;
2516             case 0xF:
2517                 /* Quadword virtual access with alternate access mode and
2518                    protection checks (hw_ldq/wa) */
2519                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2520                 break;
2521             }
2522             tcg_temp_free(addr);
2523             break;
2524         }
2525 #else
2526         goto invalid_opc;
2527 #endif
2528 
2529     case 0x1C:
2530         vc = dest_gpr(ctx, rc);
2531         if (fn7 == 0x70) {
2532             /* FTOIT */
2533             REQUIRE_AMASK(FIX);
2534             REQUIRE_REG_31(rb);
2535             va = load_fpr(ctx, ra);
2536             tcg_gen_mov_i64(vc, va);
2537             break;
2538         } else if (fn7 == 0x78) {
2539             /* FTOIS */
2540             REQUIRE_AMASK(FIX);
2541             REQUIRE_REG_31(rb);
2542             t32 = tcg_temp_new_i32();
2543             va = load_fpr(ctx, ra);
2544             gen_helper_s_to_memory(t32, va);
2545             tcg_gen_ext_i32_i64(vc, t32);
2546             tcg_temp_free_i32(t32);
2547             break;
2548         }
2549 
2550         vb = load_gpr_lit(ctx, rb, lit, islit);
2551         switch (fn7) {
2552         case 0x00:
2553             /* SEXTB */
2554             REQUIRE_AMASK(BWX);
2555             REQUIRE_REG_31(ra);
2556             tcg_gen_ext8s_i64(vc, vb);
2557             break;
2558         case 0x01:
2559             /* SEXTW */
2560             REQUIRE_AMASK(BWX);
2561             REQUIRE_REG_31(ra);
2562             tcg_gen_ext16s_i64(vc, vb);
2563             break;
2564         case 0x30:
2565             /* CTPOP */
2566             REQUIRE_AMASK(CIX);
2567             REQUIRE_REG_31(ra);
2568             REQUIRE_NO_LIT;
2569             tcg_gen_ctpop_i64(vc, vb);
2570             break;
2571         case 0x31:
2572             /* PERR */
2573             REQUIRE_AMASK(MVI);
2574             REQUIRE_NO_LIT;
2575             va = load_gpr(ctx, ra);
2576             gen_helper_perr(vc, va, vb);
2577             break;
2578         case 0x32:
2579             /* CTLZ */
2580             REQUIRE_AMASK(CIX);
2581             REQUIRE_REG_31(ra);
2582             REQUIRE_NO_LIT;
2583             tcg_gen_clzi_i64(vc, vb, 64);
2584             break;
2585         case 0x33:
2586             /* CTTZ */
2587             REQUIRE_AMASK(CIX);
2588             REQUIRE_REG_31(ra);
2589             REQUIRE_NO_LIT;
2590             tcg_gen_ctzi_i64(vc, vb, 64);
2591             break;
2592         case 0x34:
2593             /* UNPKBW */
2594             REQUIRE_AMASK(MVI);
2595             REQUIRE_REG_31(ra);
2596             REQUIRE_NO_LIT;
2597             gen_helper_unpkbw(vc, vb);
2598             break;
2599         case 0x35:
2600             /* UNPKBL */
2601             REQUIRE_AMASK(MVI);
2602             REQUIRE_REG_31(ra);
2603             REQUIRE_NO_LIT;
2604             gen_helper_unpkbl(vc, vb);
2605             break;
2606         case 0x36:
2607             /* PKWB */
2608             REQUIRE_AMASK(MVI);
2609             REQUIRE_REG_31(ra);
2610             REQUIRE_NO_LIT;
2611             gen_helper_pkwb(vc, vb);
2612             break;
2613         case 0x37:
2614             /* PKLB */
2615             REQUIRE_AMASK(MVI);
2616             REQUIRE_REG_31(ra);
2617             REQUIRE_NO_LIT;
2618             gen_helper_pklb(vc, vb);
2619             break;
2620         case 0x38:
2621             /* MINSB8 */
2622             REQUIRE_AMASK(MVI);
2623             va = load_gpr(ctx, ra);
2624             gen_helper_minsb8(vc, va, vb);
2625             break;
2626         case 0x39:
2627             /* MINSW4 */
2628             REQUIRE_AMASK(MVI);
2629             va = load_gpr(ctx, ra);
2630             gen_helper_minsw4(vc, va, vb);
2631             break;
2632         case 0x3A:
2633             /* MINUB8 */
2634             REQUIRE_AMASK(MVI);
2635             va = load_gpr(ctx, ra);
2636             gen_helper_minub8(vc, va, vb);
2637             break;
2638         case 0x3B:
2639             /* MINUW4 */
2640             REQUIRE_AMASK(MVI);
2641             va = load_gpr(ctx, ra);
2642             gen_helper_minuw4(vc, va, vb);
2643             break;
2644         case 0x3C:
2645             /* MAXUB8 */
2646             REQUIRE_AMASK(MVI);
2647             va = load_gpr(ctx, ra);
2648             gen_helper_maxub8(vc, va, vb);
2649             break;
2650         case 0x3D:
2651             /* MAXUW4 */
2652             REQUIRE_AMASK(MVI);
2653             va = load_gpr(ctx, ra);
2654             gen_helper_maxuw4(vc, va, vb);
2655             break;
2656         case 0x3E:
2657             /* MAXSB8 */
2658             REQUIRE_AMASK(MVI);
2659             va = load_gpr(ctx, ra);
2660             gen_helper_maxsb8(vc, va, vb);
2661             break;
2662         case 0x3F:
2663             /* MAXSW4 */
2664             REQUIRE_AMASK(MVI);
2665             va = load_gpr(ctx, ra);
2666             gen_helper_maxsw4(vc, va, vb);
2667             break;
2668         default:
2669             goto invalid_opc;
2670         }
2671         break;
2672 
2673     case 0x1D:
2674         /* HW_MTPR (PALcode) */
2675 #ifndef CONFIG_USER_ONLY
2676         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2677         vb = load_gpr(ctx, rb);
2678         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2679         break;
2680 #else
2681         goto invalid_opc;
2682 #endif
2683 
2684     case 0x1E:
2685         /* HW_RET (PALcode) */
2686 #ifndef CONFIG_USER_ONLY
2687         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2688         if (rb == 31) {
2689             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2690                address from EXC_ADDR.  This turns out to be useful for our
2691                emulation PALcode, so continue to accept it.  */
2692             vb = dest_sink(ctx);
2693             tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2694         } else {
2695             vb = load_gpr(ctx, rb);
2696         }
2697         tcg_gen_movi_i64(cpu_lock_addr, -1);
2698         st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2699         tmp = tcg_temp_new();
2700         tcg_gen_andi_i64(tmp, vb, 1);
2701         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2702         tcg_temp_free(tmp);
2703         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2704         /* Allow interrupts to be recognized right away.  */
2705         ret = DISAS_PC_UPDATED_NOCHAIN;
2706         break;
2707 #else
2708         goto invalid_opc;
2709 #endif
2710 
2711     case 0x1F:
2712         /* HW_ST (PALcode) */
2713 #ifndef CONFIG_USER_ONLY
2714         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2715         {
2716             switch ((insn >> 12) & 0xF) {
2717             case 0x0:
2718                 /* Longword physical access */
2719                 va = load_gpr(ctx, ra);
2720                 vb = load_gpr(ctx, rb);
2721                 tmp = tcg_temp_new();
2722                 tcg_gen_addi_i64(tmp, vb, disp12);
2723                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2724                 tcg_temp_free(tmp);
2725                 break;
2726             case 0x1:
2727                 /* Quadword physical access */
2728                 va = load_gpr(ctx, ra);
2729                 vb = load_gpr(ctx, rb);
2730                 tmp = tcg_temp_new();
2731                 tcg_gen_addi_i64(tmp, vb, disp12);
2732                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2733                 tcg_temp_free(tmp);
2734                 break;
2735             case 0x2:
2736                 /* Longword physical access with lock */
2737                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2738                                             MMU_PHYS_IDX, MO_LESL);
2739                 break;
2740             case 0x3:
2741                 /* Quadword physical access with lock */
2742                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2743                                             MMU_PHYS_IDX, MO_LEQ);
2744                 break;
2745             case 0x4:
2746                 /* Longword virtual access */
2747                 goto invalid_opc;
2748             case 0x5:
2749                 /* Quadword virtual access */
2750                 goto invalid_opc;
2751             case 0x6:
2752                 /* Invalid */
2753                 goto invalid_opc;
2754             case 0x7:
2755                 /* Invalid */
2756                 goto invalid_opc;
2757             case 0x8:
2758                 /* Invalid */
2759                 goto invalid_opc;
2760             case 0x9:
2761                 /* Invalid */
2762                 goto invalid_opc;
2763             case 0xA:
2764                 /* Invalid */
2765                 goto invalid_opc;
2766             case 0xB:
2767                 /* Invalid */
2768                 goto invalid_opc;
2769             case 0xC:
2770                 /* Longword virtual access with alternate access mode */
2771                 goto invalid_opc;
2772             case 0xD:
2773                 /* Quadword virtual access with alternate access mode */
2774                 goto invalid_opc;
2775             case 0xE:
2776                 /* Invalid */
2777                 goto invalid_opc;
2778             case 0xF:
2779                 /* Invalid */
2780                 goto invalid_opc;
2781             }
2782             break;
2783         }
2784 #else
2785         goto invalid_opc;
2786 #endif
2787     case 0x20:
2788         /* LDF */
2789         REQUIRE_FEN;
2790         gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2791         break;
2792     case 0x21:
2793         /* LDG */
2794         REQUIRE_FEN;
2795         gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2796         break;
2797     case 0x22:
2798         /* LDS */
2799         REQUIRE_FEN;
2800         gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2801         break;
2802     case 0x23:
2803         /* LDT */
2804         REQUIRE_FEN;
2805         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2806         break;
2807     case 0x24:
2808         /* STF */
2809         REQUIRE_FEN;
2810         gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2811         break;
2812     case 0x25:
2813         /* STG */
2814         REQUIRE_FEN;
2815         gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2816         break;
2817     case 0x26:
2818         /* STS */
2819         REQUIRE_FEN;
2820         gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2821         break;
2822     case 0x27:
2823         /* STT */
2824         REQUIRE_FEN;
2825         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2826         break;
2827     case 0x28:
2828         /* LDL */
2829         gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2830         break;
2831     case 0x29:
2832         /* LDQ */
2833         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2834         break;
2835     case 0x2A:
2836         /* LDL_L */
2837         gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2838         break;
2839     case 0x2B:
2840         /* LDQ_L */
2841         gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2842         break;
2843     case 0x2C:
2844         /* STL */
2845         gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2846         break;
2847     case 0x2D:
2848         /* STQ */
2849         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2850         break;
2851     case 0x2E:
2852         /* STL_C */
2853         ret = gen_store_conditional(ctx, ra, rb, disp16,
2854                                     ctx->mem_idx, MO_LESL);
2855         break;
2856     case 0x2F:
2857         /* STQ_C */
2858         ret = gen_store_conditional(ctx, ra, rb, disp16,
2859                                     ctx->mem_idx, MO_LEQ);
2860         break;
2861     case 0x30:
2862         /* BR */
2863         ret = gen_bdirect(ctx, ra, disp21);
2864         break;
2865     case 0x31: /* FBEQ */
2866         REQUIRE_FEN;
2867         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2868         break;
2869     case 0x32: /* FBLT */
2870         REQUIRE_FEN;
2871         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2872         break;
2873     case 0x33: /* FBLE */
2874         REQUIRE_FEN;
2875         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2876         break;
2877     case 0x34:
2878         /* BSR */
2879         ret = gen_bdirect(ctx, ra, disp21);
2880         break;
2881     case 0x35: /* FBNE */
2882         REQUIRE_FEN;
2883         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2884         break;
2885     case 0x36: /* FBGE */
2886         REQUIRE_FEN;
2887         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2888         break;
2889     case 0x37: /* FBGT */
2890         REQUIRE_FEN;
2891         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2892         break;
2893     case 0x38:
2894         /* BLBC */
2895         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2896         break;
2897     case 0x39:
2898         /* BEQ */
2899         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2900         break;
2901     case 0x3A:
2902         /* BLT */
2903         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2904         break;
2905     case 0x3B:
2906         /* BLE */
2907         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2908         break;
2909     case 0x3C:
2910         /* BLBS */
2911         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2912         break;
2913     case 0x3D:
2914         /* BNE */
2915         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2916         break;
2917     case 0x3E:
2918         /* BGE */
2919         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2920         break;
2921     case 0x3F:
2922         /* BGT */
2923         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2924         break;
2925     invalid_opc:
2926         ret = gen_invalid(ctx);
2927         break;
2928     raise_fen:
2929         ret = gen_excp(ctx, EXCP_FEN, 0);
2930         break;
2931     }
2932 
2933     return ret;
2934 }
2935 
2936 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2937 {
2938     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2939     CPUAlphaState *env = cpu->env_ptr;
2940     int64_t bound;
2941 
2942     ctx->tbflags = ctx->base.tb->flags;
2943     ctx->mem_idx = cpu_mmu_index(env, false);
2944     ctx->implver = env->implver;
2945     ctx->amask = env->amask;
2946 
2947 #ifdef CONFIG_USER_ONLY
2948     ctx->ir = cpu_std_ir;
2949 #else
2950     ctx->palbr = env->palbr;
2951     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2952 #endif
2953 
2954     /* ??? Every TB begins with unset rounding mode, to be initialized on
2955        the first fp insn of the TB.  Alternately we could define a proper
2956        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2957        to reset the FP_STATUS to that default at the end of any TB that
2958        changes the default.  We could even (gasp) dynamiclly figure out
2959        what default would be most efficient given the running program.  */
2960     ctx->tb_rm = -1;
2961     /* Similarly for flush-to-zero.  */
2962     ctx->tb_ftz = -1;
2963 
2964     ctx->zero = NULL;
2965     ctx->sink = NULL;
2966 
2967     /* Bound the number of insns to execute to those left on the page.  */
2968     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2969     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2970 }
2971 
2972 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2973 {
2974 }
2975 
2976 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2977 {
2978     tcg_gen_insn_start(dcbase->pc_next);
2979 }
2980 
2981 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2982                                       const CPUBreakpoint *bp)
2983 {
2984     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2985 
2986     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2987 
2988     /* The address covered by the breakpoint must be included in
2989        [tb->pc, tb->pc + tb->size) in order to for it to be
2990        properly cleared -- thus we increment the PC here so that
2991        the logic setting tb->size below does the right thing.  */
2992     ctx->base.pc_next += 4;
2993     return true;
2994 }
2995 
2996 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2997 {
2998     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2999     CPUAlphaState *env = cpu->env_ptr;
3000     uint32_t insn = translator_ldl(env, ctx->base.pc_next);
3001 
3002     ctx->base.pc_next += 4;
3003     ctx->base.is_jmp = translate_one(ctx, insn);
3004 
3005     free_context_temps(ctx);
3006     translator_loop_temp_check(&ctx->base);
3007 }
3008 
3009 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3010 {
3011     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3012 
3013     switch (ctx->base.is_jmp) {
3014     case DISAS_NORETURN:
3015         break;
3016     case DISAS_TOO_MANY:
3017         if (use_goto_tb(ctx, ctx->base.pc_next)) {
3018             tcg_gen_goto_tb(0);
3019             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3020             tcg_gen_exit_tb(ctx->base.tb, 0);
3021         }
3022         /* FALLTHRU */
3023     case DISAS_PC_STALE:
3024         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3025         /* FALLTHRU */
3026     case DISAS_PC_UPDATED:
3027         if (!ctx->base.singlestep_enabled) {
3028             tcg_gen_lookup_and_goto_ptr();
3029             break;
3030         }
3031         /* FALLTHRU */
3032     case DISAS_PC_UPDATED_NOCHAIN:
3033         if (ctx->base.singlestep_enabled) {
3034             gen_excp_1(EXCP_DEBUG, 0);
3035         } else {
3036             tcg_gen_exit_tb(NULL, 0);
3037         }
3038         break;
3039     default:
3040         g_assert_not_reached();
3041     }
3042 }
3043 
3044 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3045 {
3046     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3047     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3048 }
3049 
3050 static const TranslatorOps alpha_tr_ops = {
3051     .init_disas_context = alpha_tr_init_disas_context,
3052     .tb_start           = alpha_tr_tb_start,
3053     .insn_start         = alpha_tr_insn_start,
3054     .breakpoint_check   = alpha_tr_breakpoint_check,
3055     .translate_insn     = alpha_tr_translate_insn,
3056     .tb_stop            = alpha_tr_tb_stop,
3057     .disas_log          = alpha_tr_disas_log,
3058 };
3059 
3060 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3061 {
3062     DisasContext dc;
3063     translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3064 }
3065 
3066 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3067                           target_ulong *data)
3068 {
3069     env->pc = data[0];
3070 }
3071