xref: /openbmc/qemu/target/alpha/translate.c (revision c55c9744)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 
34 
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37 
38 #ifdef ALPHA_DEBUG_DISAS
39 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 #  define LOG_DISAS(...) do { } while (0)
42 #endif
43 
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46     DisasContextBase base;
47 
48 #ifndef CONFIG_USER_ONLY
49     uint64_t palbr;
50 #endif
51     uint32_t tbflags;
52     int mem_idx;
53 
54     /* implver and amask values for this CPU.  */
55     int implver;
56     int amask;
57 
58     /* Current rounding mode for this TB.  */
59     int tb_rm;
60     /* Current flush-to-zero setting for this TB.  */
61     int tb_ftz;
62 
63     /* The set of registers active in the current context.  */
64     TCGv *ir;
65 
66     /* Temporaries for $31 and $f31 as source and destination.  */
67     TCGv zero;
68     TCGv sink;
69     /* Temporary for immediate constants.  */
70     TCGv lit;
71 };
72 
73 /* Target-specific return values from translate_one, indicating the
74    state of the TB.  Note that DISAS_NEXT indicates that we are not
75    exiting the TB.  */
76 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
77 #define DISAS_PC_UPDATED          DISAS_TARGET_1
78 #define DISAS_PC_STALE            DISAS_TARGET_2
79 
80 /* global register indexes */
81 static TCGv cpu_std_ir[31];
82 static TCGv cpu_fir[31];
83 static TCGv cpu_pc;
84 static TCGv cpu_lock_addr;
85 static TCGv cpu_lock_value;
86 
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir[31];
89 #endif
90 
91 #include "exec/gen-icount.h"
92 
93 void alpha_translate_init(void)
94 {
95 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
96 
97     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
98     static const GlobalVar vars[] = {
99         DEF_VAR(pc),
100         DEF_VAR(lock_addr),
101         DEF_VAR(lock_value),
102     };
103 
104 #undef DEF_VAR
105 
106     /* Use the symbolic register names that match the disassembler.  */
107     static const char greg_names[31][4] = {
108         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111         "t10", "t11", "ra", "t12", "at", "gp", "sp"
112     };
113     static const char freg_names[31][4] = {
114         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
118     };
119 #ifndef CONFIG_USER_ONLY
120     static const char shadow_names[8][8] = {
121         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
123     };
124 #endif
125 
126     int i;
127 
128     for (i = 0; i < 31; i++) {
129         cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
130                                                offsetof(CPUAlphaState, ir[i]),
131                                                greg_names[i]);
132     }
133 
134     for (i = 0; i < 31; i++) {
135         cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
136                                             offsetof(CPUAlphaState, fir[i]),
137                                             freg_names[i]);
138     }
139 
140 #ifndef CONFIG_USER_ONLY
141     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
142     for (i = 0; i < 8; i++) {
143         int r = (i == 7 ? 25 : i + 8);
144         cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
145                                                offsetof(CPUAlphaState,
146                                                         shadow[i]),
147                                                shadow_names[i]);
148     }
149 #endif
150 
151     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
152         const GlobalVar *v = &vars[i];
153         *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
154     }
155 }
156 
157 static TCGv load_zero(DisasContext *ctx)
158 {
159     if (!ctx->zero) {
160         ctx->zero = tcg_const_i64(0);
161     }
162     return ctx->zero;
163 }
164 
165 static TCGv dest_sink(DisasContext *ctx)
166 {
167     if (!ctx->sink) {
168         ctx->sink = tcg_temp_new();
169     }
170     return ctx->sink;
171 }
172 
173 static void free_context_temps(DisasContext *ctx)
174 {
175     if (ctx->sink) {
176         tcg_gen_discard_i64(ctx->sink);
177         tcg_temp_free(ctx->sink);
178         ctx->sink = NULL;
179     }
180     if (ctx->zero) {
181         tcg_temp_free(ctx->zero);
182         ctx->zero = NULL;
183     }
184     if (ctx->lit) {
185         tcg_temp_free(ctx->lit);
186         ctx->lit = NULL;
187     }
188 }
189 
190 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
191 {
192     if (likely(reg < 31)) {
193         return ctx->ir[reg];
194     } else {
195         return load_zero(ctx);
196     }
197 }
198 
199 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
200                          uint8_t lit, bool islit)
201 {
202     if (islit) {
203         ctx->lit = tcg_const_i64(lit);
204         return ctx->lit;
205     } else if (likely(reg < 31)) {
206         return ctx->ir[reg];
207     } else {
208         return load_zero(ctx);
209     }
210 }
211 
212 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
213 {
214     if (likely(reg < 31)) {
215         return ctx->ir[reg];
216     } else {
217         return dest_sink(ctx);
218     }
219 }
220 
221 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
222 {
223     if (likely(reg < 31)) {
224         return cpu_fir[reg];
225     } else {
226         return load_zero(ctx);
227     }
228 }
229 
230 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
231 {
232     if (likely(reg < 31)) {
233         return cpu_fir[reg];
234     } else {
235         return dest_sink(ctx);
236     }
237 }
238 
239 static int get_flag_ofs(unsigned shift)
240 {
241     int ofs = offsetof(CPUAlphaState, flags);
242 #ifdef HOST_WORDS_BIGENDIAN
243     ofs += 3 - (shift / 8);
244 #else
245     ofs += shift / 8;
246 #endif
247     return ofs;
248 }
249 
250 static void ld_flag_byte(TCGv val, unsigned shift)
251 {
252     tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
253 }
254 
255 static void st_flag_byte(TCGv val, unsigned shift)
256 {
257     tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
258 }
259 
260 static void gen_excp_1(int exception, int error_code)
261 {
262     TCGv_i32 tmp1, tmp2;
263 
264     tmp1 = tcg_const_i32(exception);
265     tmp2 = tcg_const_i32(error_code);
266     gen_helper_excp(cpu_env, tmp1, tmp2);
267     tcg_temp_free_i32(tmp2);
268     tcg_temp_free_i32(tmp1);
269 }
270 
271 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
272 {
273     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
274     gen_excp_1(exception, error_code);
275     return DISAS_NORETURN;
276 }
277 
278 static inline DisasJumpType gen_invalid(DisasContext *ctx)
279 {
280     return gen_excp(ctx, EXCP_OPCDEC, 0);
281 }
282 
283 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
284 {
285     TCGv_i32 tmp32 = tcg_temp_new_i32();
286     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
287     gen_helper_memory_to_f(t0, tmp32);
288     tcg_temp_free_i32(tmp32);
289 }
290 
291 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
292 {
293     TCGv tmp = tcg_temp_new();
294     tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
295     gen_helper_memory_to_g(t0, tmp);
296     tcg_temp_free(tmp);
297 }
298 
299 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
300 {
301     TCGv_i32 tmp32 = tcg_temp_new_i32();
302     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
303     gen_helper_memory_to_s(t0, tmp32);
304     tcg_temp_free_i32(tmp32);
305 }
306 
307 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
308 {
309     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
310     tcg_gen_mov_i64(cpu_lock_addr, t1);
311     tcg_gen_mov_i64(cpu_lock_value, t0);
312 }
313 
314 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
315 {
316     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
317     tcg_gen_mov_i64(cpu_lock_addr, t1);
318     tcg_gen_mov_i64(cpu_lock_value, t0);
319 }
320 
321 static inline void gen_load_mem(DisasContext *ctx,
322                                 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
323                                                           int flags),
324                                 int ra, int rb, int32_t disp16, bool fp,
325                                 bool clear)
326 {
327     TCGv tmp, addr, va;
328 
329     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
330        prefetches, which we can treat as nops.  No worries about
331        missed exceptions here.  */
332     if (unlikely(ra == 31)) {
333         return;
334     }
335 
336     tmp = tcg_temp_new();
337     addr = load_gpr(ctx, rb);
338 
339     if (disp16) {
340         tcg_gen_addi_i64(tmp, addr, disp16);
341         addr = tmp;
342     }
343     if (clear) {
344         tcg_gen_andi_i64(tmp, addr, ~0x7);
345         addr = tmp;
346     }
347 
348     va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
349     tcg_gen_qemu_load(va, addr, ctx->mem_idx);
350 
351     tcg_temp_free(tmp);
352 }
353 
354 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
355 {
356     TCGv_i32 tmp32 = tcg_temp_new_i32();
357     gen_helper_f_to_memory(tmp32, t0);
358     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
359     tcg_temp_free_i32(tmp32);
360 }
361 
362 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
363 {
364     TCGv tmp = tcg_temp_new();
365     gen_helper_g_to_memory(tmp, t0);
366     tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
367     tcg_temp_free(tmp);
368 }
369 
370 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
371 {
372     TCGv_i32 tmp32 = tcg_temp_new_i32();
373     gen_helper_s_to_memory(tmp32, t0);
374     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
375     tcg_temp_free_i32(tmp32);
376 }
377 
378 static inline void gen_store_mem(DisasContext *ctx,
379                                  void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
380                                                             int flags),
381                                  int ra, int rb, int32_t disp16, bool fp,
382                                  bool clear)
383 {
384     TCGv tmp, addr, va;
385 
386     tmp = tcg_temp_new();
387     addr = load_gpr(ctx, rb);
388 
389     if (disp16) {
390         tcg_gen_addi_i64(tmp, addr, disp16);
391         addr = tmp;
392     }
393     if (clear) {
394         tcg_gen_andi_i64(tmp, addr, ~0x7);
395         addr = tmp;
396     }
397 
398     va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
399     tcg_gen_qemu_store(va, addr, ctx->mem_idx);
400 
401     tcg_temp_free(tmp);
402 }
403 
404 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
405                                            int32_t disp16, int mem_idx,
406                                            MemOp op)
407 {
408     TCGLabel *lab_fail, *lab_done;
409     TCGv addr, val;
410 
411     addr = tcg_temp_new_i64();
412     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
413     free_context_temps(ctx);
414 
415     lab_fail = gen_new_label();
416     lab_done = gen_new_label();
417     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
418     tcg_temp_free_i64(addr);
419 
420     val = tcg_temp_new_i64();
421     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
422                                load_gpr(ctx, ra), mem_idx, op);
423     free_context_temps(ctx);
424 
425     if (ra != 31) {
426         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
427     }
428     tcg_temp_free_i64(val);
429     tcg_gen_br(lab_done);
430 
431     gen_set_label(lab_fail);
432     if (ra != 31) {
433         tcg_gen_movi_i64(ctx->ir[ra], 0);
434     }
435 
436     gen_set_label(lab_done);
437     tcg_gen_movi_i64(cpu_lock_addr, -1);
438     return DISAS_NEXT;
439 }
440 
441 static bool in_superpage(DisasContext *ctx, int64_t addr)
442 {
443 #ifndef CONFIG_USER_ONLY
444     return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
445             && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
446             && ((addr >> 41) & 3) == 2);
447 #else
448     return false;
449 #endif
450 }
451 
452 static bool use_exit_tb(DisasContext *ctx)
453 {
454     return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
455             || ctx->base.singlestep_enabled
456             || singlestep);
457 }
458 
459 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
460 {
461     /* Suppress goto_tb in the case of single-steping and IO.  */
462     if (unlikely(use_exit_tb(ctx))) {
463         return false;
464     }
465 #ifndef CONFIG_USER_ONLY
466     /* If the destination is in the superpage, the page perms can't change.  */
467     if (in_superpage(ctx, dest)) {
468         return true;
469     }
470     /* Check for the dest on the same page as the start of the TB.  */
471     return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
472 #else
473     return true;
474 #endif
475 }
476 
477 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
478 {
479     uint64_t dest = ctx->base.pc_next + (disp << 2);
480 
481     if (ra != 31) {
482         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
483     }
484 
485     /* Notice branch-to-next; used to initialize RA with the PC.  */
486     if (disp == 0) {
487         return 0;
488     } else if (use_goto_tb(ctx, dest)) {
489         tcg_gen_goto_tb(0);
490         tcg_gen_movi_i64(cpu_pc, dest);
491         tcg_gen_exit_tb(ctx->base.tb, 0);
492         return DISAS_NORETURN;
493     } else {
494         tcg_gen_movi_i64(cpu_pc, dest);
495         return DISAS_PC_UPDATED;
496     }
497 }
498 
499 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
500                                         TCGv cmp, int32_t disp)
501 {
502     uint64_t dest = ctx->base.pc_next + (disp << 2);
503     TCGLabel *lab_true = gen_new_label();
504 
505     if (use_goto_tb(ctx, dest)) {
506         tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
507 
508         tcg_gen_goto_tb(0);
509         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
510         tcg_gen_exit_tb(ctx->base.tb, 0);
511 
512         gen_set_label(lab_true);
513         tcg_gen_goto_tb(1);
514         tcg_gen_movi_i64(cpu_pc, dest);
515         tcg_gen_exit_tb(ctx->base.tb, 1);
516 
517         return DISAS_NORETURN;
518     } else {
519         TCGv_i64 z = tcg_const_i64(0);
520         TCGv_i64 d = tcg_const_i64(dest);
521         TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
522 
523         tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
524 
525         tcg_temp_free_i64(z);
526         tcg_temp_free_i64(d);
527         tcg_temp_free_i64(p);
528         return DISAS_PC_UPDATED;
529     }
530 }
531 
532 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
533                                int32_t disp, int mask)
534 {
535     if (mask) {
536         TCGv tmp = tcg_temp_new();
537         DisasJumpType ret;
538 
539         tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
540         ret = gen_bcond_internal(ctx, cond, tmp, disp);
541         tcg_temp_free(tmp);
542         return ret;
543     }
544     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
545 }
546 
547 /* Fold -0.0 for comparison with COND.  */
548 
549 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
550 {
551     uint64_t mzero = 1ull << 63;
552 
553     switch (cond) {
554     case TCG_COND_LE:
555     case TCG_COND_GT:
556         /* For <= or >, the -0.0 value directly compares the way we want.  */
557         tcg_gen_mov_i64(dest, src);
558         break;
559 
560     case TCG_COND_EQ:
561     case TCG_COND_NE:
562         /* For == or !=, we can simply mask off the sign bit and compare.  */
563         tcg_gen_andi_i64(dest, src, mzero - 1);
564         break;
565 
566     case TCG_COND_GE:
567     case TCG_COND_LT:
568         /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
569         tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
570         tcg_gen_neg_i64(dest, dest);
571         tcg_gen_and_i64(dest, dest, src);
572         break;
573 
574     default:
575         abort();
576     }
577 }
578 
579 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
580                                 int32_t disp)
581 {
582     TCGv cmp_tmp = tcg_temp_new();
583     DisasJumpType ret;
584 
585     gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
586     ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
587     tcg_temp_free(cmp_tmp);
588     return ret;
589 }
590 
591 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
592 {
593     TCGv_i64 va, vb, z;
594 
595     z = load_zero(ctx);
596     vb = load_fpr(ctx, rb);
597     va = tcg_temp_new();
598     gen_fold_mzero(cond, va, load_fpr(ctx, ra));
599 
600     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
601 
602     tcg_temp_free(va);
603 }
604 
605 #define QUAL_RM_N       0x080   /* Round mode nearest even */
606 #define QUAL_RM_C       0x000   /* Round mode chopped */
607 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
608 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
609 #define QUAL_RM_MASK    0x0c0
610 
611 #define QUAL_U          0x100   /* Underflow enable (fp output) */
612 #define QUAL_V          0x100   /* Overflow enable (int output) */
613 #define QUAL_S          0x400   /* Software completion enable */
614 #define QUAL_I          0x200   /* Inexact detection enable */
615 
616 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
617 {
618     TCGv_i32 tmp;
619 
620     fn11 &= QUAL_RM_MASK;
621     if (fn11 == ctx->tb_rm) {
622         return;
623     }
624     ctx->tb_rm = fn11;
625 
626     tmp = tcg_temp_new_i32();
627     switch (fn11) {
628     case QUAL_RM_N:
629         tcg_gen_movi_i32(tmp, float_round_nearest_even);
630         break;
631     case QUAL_RM_C:
632         tcg_gen_movi_i32(tmp, float_round_to_zero);
633         break;
634     case QUAL_RM_M:
635         tcg_gen_movi_i32(tmp, float_round_down);
636         break;
637     case QUAL_RM_D:
638         tcg_gen_ld8u_i32(tmp, cpu_env,
639                          offsetof(CPUAlphaState, fpcr_dyn_round));
640         break;
641     }
642 
643 #if defined(CONFIG_SOFTFLOAT_INLINE)
644     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
645        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
646        sets the one field.  */
647     tcg_gen_st8_i32(tmp, cpu_env,
648                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
649 #else
650     gen_helper_setroundmode(tmp);
651 #endif
652 
653     tcg_temp_free_i32(tmp);
654 }
655 
656 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
657 {
658     TCGv_i32 tmp;
659 
660     fn11 &= QUAL_U;
661     if (fn11 == ctx->tb_ftz) {
662         return;
663     }
664     ctx->tb_ftz = fn11;
665 
666     tmp = tcg_temp_new_i32();
667     if (fn11) {
668         /* Underflow is enabled, use the FPCR setting.  */
669         tcg_gen_ld8u_i32(tmp, cpu_env,
670                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
671     } else {
672         /* Underflow is disabled, force flush-to-zero.  */
673         tcg_gen_movi_i32(tmp, 1);
674     }
675 
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677     tcg_gen_st8_i32(tmp, cpu_env,
678                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
679 #else
680     gen_helper_setflushzero(tmp);
681 #endif
682 
683     tcg_temp_free_i32(tmp);
684 }
685 
686 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
687 {
688     TCGv val;
689 
690     if (unlikely(reg == 31)) {
691         val = load_zero(ctx);
692     } else {
693         val = cpu_fir[reg];
694         if ((fn11 & QUAL_S) == 0) {
695             if (is_cmp) {
696                 gen_helper_ieee_input_cmp(cpu_env, val);
697             } else {
698                 gen_helper_ieee_input(cpu_env, val);
699             }
700         } else {
701 #ifndef CONFIG_USER_ONLY
702             /* In system mode, raise exceptions for denormals like real
703                hardware.  In user mode, proceed as if the OS completion
704                handler is handling the denormal as per spec.  */
705             gen_helper_ieee_input_s(cpu_env, val);
706 #endif
707         }
708     }
709     return val;
710 }
711 
712 static void gen_fp_exc_raise(int rc, int fn11)
713 {
714     /* ??? We ought to be able to do something with imprecise exceptions.
715        E.g. notice we're still in the trap shadow of something within the
716        TB and do not generate the code to signal the exception; end the TB
717        when an exception is forced to arrive, either by consumption of a
718        register value or TRAPB or EXCB.  */
719     TCGv_i32 reg, ign;
720     uint32_t ignore = 0;
721 
722     if (!(fn11 & QUAL_U)) {
723         /* Note that QUAL_U == QUAL_V, so ignore either.  */
724         ignore |= FPCR_UNF | FPCR_IOV;
725     }
726     if (!(fn11 & QUAL_I)) {
727         ignore |= FPCR_INE;
728     }
729     ign = tcg_const_i32(ignore);
730 
731     /* ??? Pass in the regno of the destination so that the helper can
732        set EXC_MASK, which contains a bitmask of destination registers
733        that have caused arithmetic traps.  A simple userspace emulation
734        does not require this.  We do need it for a guest kernel's entArith,
735        or if we were to do something clever with imprecise exceptions.  */
736     reg = tcg_const_i32(rc + 32);
737     if (fn11 & QUAL_S) {
738         gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
739     } else {
740         gen_helper_fp_exc_raise(cpu_env, ign, reg);
741     }
742 
743     tcg_temp_free_i32(reg);
744     tcg_temp_free_i32(ign);
745 }
746 
747 static void gen_cvtlq(TCGv vc, TCGv vb)
748 {
749     TCGv tmp = tcg_temp_new();
750 
751     /* The arithmetic right shift here, plus the sign-extended mask below
752        yields a sign-extended result without an explicit ext32s_i64.  */
753     tcg_gen_shri_i64(tmp, vb, 29);
754     tcg_gen_sari_i64(vc, vb, 32);
755     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
756 
757     tcg_temp_free(tmp);
758 }
759 
760 static void gen_ieee_arith2(DisasContext *ctx,
761                             void (*helper)(TCGv, TCGv_ptr, TCGv),
762                             int rb, int rc, int fn11)
763 {
764     TCGv vb;
765 
766     gen_qual_roundmode(ctx, fn11);
767     gen_qual_flushzero(ctx, fn11);
768 
769     vb = gen_ieee_input(ctx, rb, fn11, 0);
770     helper(dest_fpr(ctx, rc), cpu_env, vb);
771 
772     gen_fp_exc_raise(rc, fn11);
773 }
774 
775 #define IEEE_ARITH2(name)                                       \
776 static inline void glue(gen_, name)(DisasContext *ctx,          \
777                                     int rb, int rc, int fn11)   \
778 {                                                               \
779     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
780 }
781 IEEE_ARITH2(sqrts)
782 IEEE_ARITH2(sqrtt)
783 IEEE_ARITH2(cvtst)
784 IEEE_ARITH2(cvtts)
785 
786 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
787 {
788     TCGv vb, vc;
789 
790     /* No need to set flushzero, since we have an integer output.  */
791     vb = gen_ieee_input(ctx, rb, fn11, 0);
792     vc = dest_fpr(ctx, rc);
793 
794     /* Almost all integer conversions use cropped rounding;
795        special case that.  */
796     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
797         gen_helper_cvttq_c(vc, cpu_env, vb);
798     } else {
799         gen_qual_roundmode(ctx, fn11);
800         gen_helper_cvttq(vc, cpu_env, vb);
801     }
802     gen_fp_exc_raise(rc, fn11);
803 }
804 
805 static void gen_ieee_intcvt(DisasContext *ctx,
806                             void (*helper)(TCGv, TCGv_ptr, TCGv),
807                             int rb, int rc, int fn11)
808 {
809     TCGv vb, vc;
810 
811     gen_qual_roundmode(ctx, fn11);
812     vb = load_fpr(ctx, rb);
813     vc = dest_fpr(ctx, rc);
814 
815     /* The only exception that can be raised by integer conversion
816        is inexact.  Thus we only need to worry about exceptions when
817        inexact handling is requested.  */
818     if (fn11 & QUAL_I) {
819         helper(vc, cpu_env, vb);
820         gen_fp_exc_raise(rc, fn11);
821     } else {
822         helper(vc, cpu_env, vb);
823     }
824 }
825 
826 #define IEEE_INTCVT(name)                                       \
827 static inline void glue(gen_, name)(DisasContext *ctx,          \
828                                     int rb, int rc, int fn11)   \
829 {                                                               \
830     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
831 }
832 IEEE_INTCVT(cvtqs)
833 IEEE_INTCVT(cvtqt)
834 
835 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
836 {
837     TCGv vmask = tcg_const_i64(mask);
838     TCGv tmp = tcg_temp_new_i64();
839 
840     if (inv_a) {
841         tcg_gen_andc_i64(tmp, vmask, va);
842     } else {
843         tcg_gen_and_i64(tmp, va, vmask);
844     }
845 
846     tcg_gen_andc_i64(vc, vb, vmask);
847     tcg_gen_or_i64(vc, vc, tmp);
848 
849     tcg_temp_free(vmask);
850     tcg_temp_free(tmp);
851 }
852 
853 static void gen_ieee_arith3(DisasContext *ctx,
854                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
855                             int ra, int rb, int rc, int fn11)
856 {
857     TCGv va, vb, vc;
858 
859     gen_qual_roundmode(ctx, fn11);
860     gen_qual_flushzero(ctx, fn11);
861 
862     va = gen_ieee_input(ctx, ra, fn11, 0);
863     vb = gen_ieee_input(ctx, rb, fn11, 0);
864     vc = dest_fpr(ctx, rc);
865     helper(vc, cpu_env, va, vb);
866 
867     gen_fp_exc_raise(rc, fn11);
868 }
869 
870 #define IEEE_ARITH3(name)                                               \
871 static inline void glue(gen_, name)(DisasContext *ctx,                  \
872                                     int ra, int rb, int rc, int fn11)   \
873 {                                                                       \
874     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
875 }
876 IEEE_ARITH3(adds)
877 IEEE_ARITH3(subs)
878 IEEE_ARITH3(muls)
879 IEEE_ARITH3(divs)
880 IEEE_ARITH3(addt)
881 IEEE_ARITH3(subt)
882 IEEE_ARITH3(mult)
883 IEEE_ARITH3(divt)
884 
885 static void gen_ieee_compare(DisasContext *ctx,
886                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
887                              int ra, int rb, int rc, int fn11)
888 {
889     TCGv va, vb, vc;
890 
891     va = gen_ieee_input(ctx, ra, fn11, 1);
892     vb = gen_ieee_input(ctx, rb, fn11, 1);
893     vc = dest_fpr(ctx, rc);
894     helper(vc, cpu_env, va, vb);
895 
896     gen_fp_exc_raise(rc, fn11);
897 }
898 
899 #define IEEE_CMP3(name)                                                 \
900 static inline void glue(gen_, name)(DisasContext *ctx,                  \
901                                     int ra, int rb, int rc, int fn11)   \
902 {                                                                       \
903     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
904 }
905 IEEE_CMP3(cmptun)
906 IEEE_CMP3(cmpteq)
907 IEEE_CMP3(cmptlt)
908 IEEE_CMP3(cmptle)
909 
910 static inline uint64_t zapnot_mask(uint8_t lit)
911 {
912     uint64_t mask = 0;
913     int i;
914 
915     for (i = 0; i < 8; ++i) {
916         if ((lit >> i) & 1) {
917             mask |= 0xffull << (i * 8);
918         }
919     }
920     return mask;
921 }
922 
923 /* Implement zapnot with an immediate operand, which expands to some
924    form of immediate AND.  This is a basic building block in the
925    definition of many of the other byte manipulation instructions.  */
926 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
927 {
928     switch (lit) {
929     case 0x00:
930         tcg_gen_movi_i64(dest, 0);
931         break;
932     case 0x01:
933         tcg_gen_ext8u_i64(dest, src);
934         break;
935     case 0x03:
936         tcg_gen_ext16u_i64(dest, src);
937         break;
938     case 0x0f:
939         tcg_gen_ext32u_i64(dest, src);
940         break;
941     case 0xff:
942         tcg_gen_mov_i64(dest, src);
943         break;
944     default:
945         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
946         break;
947     }
948 }
949 
950 /* EXTWH, EXTLH, EXTQH */
951 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
952                       uint8_t lit, uint8_t byte_mask)
953 {
954     if (islit) {
955         int pos = (64 - lit * 8) & 0x3f;
956         int len = cto32(byte_mask) * 8;
957         if (pos < len) {
958             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
959         } else {
960             tcg_gen_movi_i64(vc, 0);
961         }
962     } else {
963         TCGv tmp = tcg_temp_new();
964         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
965         tcg_gen_neg_i64(tmp, tmp);
966         tcg_gen_andi_i64(tmp, tmp, 0x3f);
967         tcg_gen_shl_i64(vc, va, tmp);
968         tcg_temp_free(tmp);
969     }
970     gen_zapnoti(vc, vc, byte_mask);
971 }
972 
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
975                       uint8_t lit, uint8_t byte_mask)
976 {
977     if (islit) {
978         int pos = (lit & 7) * 8;
979         int len = cto32(byte_mask) * 8;
980         if (pos + len >= 64) {
981             len = 64 - pos;
982         }
983         tcg_gen_extract_i64(vc, va, pos, len);
984     } else {
985         TCGv tmp = tcg_temp_new();
986         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
987         tcg_gen_shli_i64(tmp, tmp, 3);
988         tcg_gen_shr_i64(vc, va, tmp);
989         tcg_temp_free(tmp);
990         gen_zapnoti(vc, vc, byte_mask);
991     }
992 }
993 
994 /* INSWH, INSLH, INSQH */
995 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
996                       uint8_t lit, uint8_t byte_mask)
997 {
998     if (islit) {
999         int pos = 64 - (lit & 7) * 8;
1000         int len = cto32(byte_mask) * 8;
1001         if (pos < len) {
1002             tcg_gen_extract_i64(vc, va, pos, len - pos);
1003         } else {
1004             tcg_gen_movi_i64(vc, 0);
1005         }
1006     } else {
1007         TCGv tmp = tcg_temp_new();
1008         TCGv shift = tcg_temp_new();
1009 
1010         /* The instruction description has us left-shift the byte mask
1011            and extract bits <15:8> and apply that zap at the end.  This
1012            is equivalent to simply performing the zap first and shifting
1013            afterward.  */
1014         gen_zapnoti(tmp, va, byte_mask);
1015 
1016         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
1017            portably by splitting the shift into two parts: shift_count-1 and 1.
1018            Arrange for the -1 by using ones-complement instead of
1019            twos-complement in the negation: ~(B * 8) & 63.  */
1020 
1021         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1022         tcg_gen_not_i64(shift, shift);
1023         tcg_gen_andi_i64(shift, shift, 0x3f);
1024 
1025         tcg_gen_shr_i64(vc, tmp, shift);
1026         tcg_gen_shri_i64(vc, vc, 1);
1027         tcg_temp_free(shift);
1028         tcg_temp_free(tmp);
1029     }
1030 }
1031 
1032 /* INSBL, INSWL, INSLL, INSQL */
1033 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1034                       uint8_t lit, uint8_t byte_mask)
1035 {
1036     if (islit) {
1037         int pos = (lit & 7) * 8;
1038         int len = cto32(byte_mask) * 8;
1039         if (pos + len > 64) {
1040             len = 64 - pos;
1041         }
1042         tcg_gen_deposit_z_i64(vc, va, pos, len);
1043     } else {
1044         TCGv tmp = tcg_temp_new();
1045         TCGv shift = tcg_temp_new();
1046 
1047         /* The instruction description has us left-shift the byte mask
1048            and extract bits <15:8> and apply that zap at the end.  This
1049            is equivalent to simply performing the zap first and shifting
1050            afterward.  */
1051         gen_zapnoti(tmp, va, byte_mask);
1052 
1053         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1054         tcg_gen_shli_i64(shift, shift, 3);
1055         tcg_gen_shl_i64(vc, tmp, shift);
1056         tcg_temp_free(shift);
1057         tcg_temp_free(tmp);
1058     }
1059 }
1060 
1061 /* MSKWH, MSKLH, MSKQH */
1062 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1063                       uint8_t lit, uint8_t byte_mask)
1064 {
1065     if (islit) {
1066         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1067     } else {
1068         TCGv shift = tcg_temp_new();
1069         TCGv mask = tcg_temp_new();
1070 
1071         /* The instruction description is as above, where the byte_mask
1072            is shifted left, and then we extract bits <15:8>.  This can be
1073            emulated with a right-shift on the expanded byte mask.  This
1074            requires extra care because for an input <2:0> == 0 we need a
1075            shift of 64 bits in order to generate a zero.  This is done by
1076            splitting the shift into two parts, the variable shift - 1
1077            followed by a constant 1 shift.  The code we expand below is
1078            equivalent to ~(B * 8) & 63.  */
1079 
1080         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1081         tcg_gen_not_i64(shift, shift);
1082         tcg_gen_andi_i64(shift, shift, 0x3f);
1083         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1084         tcg_gen_shr_i64(mask, mask, shift);
1085         tcg_gen_shri_i64(mask, mask, 1);
1086 
1087         tcg_gen_andc_i64(vc, va, mask);
1088 
1089         tcg_temp_free(mask);
1090         tcg_temp_free(shift);
1091     }
1092 }
1093 
1094 /* MSKBL, MSKWL, MSKLL, MSKQL */
1095 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1096                       uint8_t lit, uint8_t byte_mask)
1097 {
1098     if (islit) {
1099         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1100     } else {
1101         TCGv shift = tcg_temp_new();
1102         TCGv mask = tcg_temp_new();
1103 
1104         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1105         tcg_gen_shli_i64(shift, shift, 3);
1106         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1107         tcg_gen_shl_i64(mask, mask, shift);
1108 
1109         tcg_gen_andc_i64(vc, va, mask);
1110 
1111         tcg_temp_free(mask);
1112         tcg_temp_free(shift);
1113     }
1114 }
1115 
1116 static void gen_rx(DisasContext *ctx, int ra, int set)
1117 {
1118     TCGv tmp;
1119 
1120     if (ra != 31) {
1121         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1122     }
1123 
1124     tmp = tcg_const_i64(set);
1125     st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1126     tcg_temp_free(tmp);
1127 }
1128 
1129 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1130 {
1131     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1132        to internal cpu registers.  */
1133 
1134     /* Unprivileged PAL call */
1135     if (palcode >= 0x80 && palcode < 0xC0) {
1136         switch (palcode) {
1137         case 0x86:
1138             /* IMB */
1139             /* No-op inside QEMU.  */
1140             break;
1141         case 0x9E:
1142             /* RDUNIQUE */
1143             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1144                            offsetof(CPUAlphaState, unique));
1145             break;
1146         case 0x9F:
1147             /* WRUNIQUE */
1148             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1149                            offsetof(CPUAlphaState, unique));
1150             break;
1151         default:
1152             palcode &= 0xbf;
1153             goto do_call_pal;
1154         }
1155         return DISAS_NEXT;
1156     }
1157 
1158 #ifndef CONFIG_USER_ONLY
1159     /* Privileged PAL code */
1160     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1161         switch (palcode) {
1162         case 0x01:
1163             /* CFLUSH */
1164             /* No-op inside QEMU.  */
1165             break;
1166         case 0x02:
1167             /* DRAINA */
1168             /* No-op inside QEMU.  */
1169             break;
1170         case 0x2D:
1171             /* WRVPTPTR */
1172             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1173                            offsetof(CPUAlphaState, vptptr));
1174             break;
1175         case 0x31:
1176             /* WRVAL */
1177             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1178                            offsetof(CPUAlphaState, sysval));
1179             break;
1180         case 0x32:
1181             /* RDVAL */
1182             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1183                            offsetof(CPUAlphaState, sysval));
1184             break;
1185 
1186         case 0x35:
1187             /* SWPIPL */
1188             /* Note that we already know we're in kernel mode, so we know
1189                that PS only contains the 3 IPL bits.  */
1190             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1191 
1192             /* But make sure and store only the 3 IPL bits from the user.  */
1193             {
1194                 TCGv tmp = tcg_temp_new();
1195                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1196                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1197                 tcg_temp_free(tmp);
1198             }
1199 
1200             /* Allow interrupts to be recognized right away.  */
1201             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1202             return DISAS_PC_UPDATED_NOCHAIN;
1203 
1204         case 0x36:
1205             /* RDPS */
1206             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1207             break;
1208 
1209         case 0x38:
1210             /* WRUSP */
1211             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1212                            offsetof(CPUAlphaState, usp));
1213             break;
1214         case 0x3A:
1215             /* RDUSP */
1216             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1217                            offsetof(CPUAlphaState, usp));
1218             break;
1219         case 0x3C:
1220             /* WHAMI */
1221             tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1222                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1223             break;
1224 
1225         case 0x3E:
1226             /* WTINT */
1227             {
1228                 TCGv_i32 tmp = tcg_const_i32(1);
1229                 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1230                                              offsetof(CPUState, halted));
1231                 tcg_temp_free_i32(tmp);
1232             }
1233             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1234             return gen_excp(ctx, EXCP_HALTED, 0);
1235 
1236         default:
1237             palcode &= 0x3f;
1238             goto do_call_pal;
1239         }
1240         return DISAS_NEXT;
1241     }
1242 #endif
1243     return gen_invalid(ctx);
1244 
1245  do_call_pal:
1246 #ifdef CONFIG_USER_ONLY
1247     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1248 #else
1249     {
1250         TCGv tmp = tcg_temp_new();
1251         uint64_t exc_addr = ctx->base.pc_next;
1252         uint64_t entry = ctx->palbr;
1253 
1254         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1255             exc_addr |= 1;
1256         } else {
1257             tcg_gen_movi_i64(tmp, 1);
1258             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1259         }
1260 
1261         tcg_gen_movi_i64(tmp, exc_addr);
1262         tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1263         tcg_temp_free(tmp);
1264 
1265         entry += (palcode & 0x80
1266                   ? 0x2000 + (palcode - 0x80) * 64
1267                   : 0x1000 + palcode * 64);
1268 
1269         /* Since the destination is running in PALmode, we don't really
1270            need the page permissions check.  We'll see the existence of
1271            the page when we create the TB, and we'll flush all TBs if
1272            we change the PAL base register.  */
1273         if (!use_exit_tb(ctx)) {
1274             tcg_gen_goto_tb(0);
1275             tcg_gen_movi_i64(cpu_pc, entry);
1276             tcg_gen_exit_tb(ctx->base.tb, 0);
1277             return DISAS_NORETURN;
1278         } else {
1279             tcg_gen_movi_i64(cpu_pc, entry);
1280             return DISAS_PC_UPDATED;
1281         }
1282     }
1283 #endif
1284 }
1285 
1286 #ifndef CONFIG_USER_ONLY
1287 
1288 #define PR_LONG         0x200000
1289 
1290 static int cpu_pr_data(int pr)
1291 {
1292     switch (pr) {
1293     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1294     case  3: return offsetof(CPUAlphaState, trap_arg0);
1295     case  4: return offsetof(CPUAlphaState, trap_arg1);
1296     case  5: return offsetof(CPUAlphaState, trap_arg2);
1297     case  6: return offsetof(CPUAlphaState, exc_addr);
1298     case  7: return offsetof(CPUAlphaState, palbr);
1299     case  8: return offsetof(CPUAlphaState, ptbr);
1300     case  9: return offsetof(CPUAlphaState, vptptr);
1301     case 10: return offsetof(CPUAlphaState, unique);
1302     case 11: return offsetof(CPUAlphaState, sysval);
1303     case 12: return offsetof(CPUAlphaState, usp);
1304 
1305     case 40 ... 63:
1306         return offsetof(CPUAlphaState, scratch[pr - 40]);
1307 
1308     case 251:
1309         return offsetof(CPUAlphaState, alarm_expire);
1310     }
1311     return 0;
1312 }
1313 
1314 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1315 {
1316     void (*helper)(TCGv);
1317     int data;
1318 
1319     switch (regno) {
1320     case 32 ... 39:
1321         /* Accessing the "non-shadow" general registers.  */
1322         regno = regno == 39 ? 25 : regno - 32 + 8;
1323         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1324         break;
1325 
1326     case 250: /* WALLTIME */
1327         helper = gen_helper_get_walltime;
1328         goto do_helper;
1329     case 249: /* VMTIME */
1330         helper = gen_helper_get_vmtime;
1331     do_helper:
1332         if (use_icount) {
1333             gen_io_start();
1334             helper(va);
1335             return DISAS_PC_STALE;
1336         } else {
1337             helper(va);
1338         }
1339         break;
1340 
1341     case 0: /* PS */
1342         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1343         break;
1344     case 1: /* FEN */
1345         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1346         break;
1347 
1348     default:
1349         /* The basic registers are data only, and unknown registers
1350            are read-zero, write-ignore.  */
1351         data = cpu_pr_data(regno);
1352         if (data == 0) {
1353             tcg_gen_movi_i64(va, 0);
1354         } else if (data & PR_LONG) {
1355             tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1356         } else {
1357             tcg_gen_ld_i64(va, cpu_env, data);
1358         }
1359         break;
1360     }
1361 
1362     return DISAS_NEXT;
1363 }
1364 
1365 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1366 {
1367     int data;
1368 
1369     switch (regno) {
1370     case 255:
1371         /* TBIA */
1372         gen_helper_tbia(cpu_env);
1373         break;
1374 
1375     case 254:
1376         /* TBIS */
1377         gen_helper_tbis(cpu_env, vb);
1378         break;
1379 
1380     case 253:
1381         /* WAIT */
1382         {
1383             TCGv_i32 tmp = tcg_const_i32(1);
1384             tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1385                                          offsetof(CPUState, halted));
1386             tcg_temp_free_i32(tmp);
1387         }
1388         return gen_excp(ctx, EXCP_HALTED, 0);
1389 
1390     case 252:
1391         /* HALT */
1392         gen_helper_halt(vb);
1393         return DISAS_PC_STALE;
1394 
1395     case 251:
1396         /* ALARM */
1397         gen_helper_set_alarm(cpu_env, vb);
1398         break;
1399 
1400     case 7:
1401         /* PALBR */
1402         tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1403         /* Changing the PAL base register implies un-chaining all of the TBs
1404            that ended with a CALL_PAL.  Since the base register usually only
1405            changes during boot, flushing everything works well.  */
1406         gen_helper_tb_flush(cpu_env);
1407         return DISAS_PC_STALE;
1408 
1409     case 32 ... 39:
1410         /* Accessing the "non-shadow" general registers.  */
1411         regno = regno == 39 ? 25 : regno - 32 + 8;
1412         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1413         break;
1414 
1415     case 0: /* PS */
1416         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1417         break;
1418     case 1: /* FEN */
1419         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1420         break;
1421 
1422     default:
1423         /* The basic registers are data only, and unknown registers
1424            are read-zero, write-ignore.  */
1425         data = cpu_pr_data(regno);
1426         if (data != 0) {
1427             if (data & PR_LONG) {
1428                 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1429             } else {
1430                 tcg_gen_st_i64(vb, cpu_env, data);
1431             }
1432         }
1433         break;
1434     }
1435 
1436     return DISAS_NEXT;
1437 }
1438 #endif /* !USER_ONLY*/
1439 
1440 #define REQUIRE_NO_LIT                          \
1441     do {                                        \
1442         if (real_islit) {                       \
1443             goto invalid_opc;                   \
1444         }                                       \
1445     } while (0)
1446 
1447 #define REQUIRE_AMASK(FLAG)                     \
1448     do {                                        \
1449         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1450             goto invalid_opc;                   \
1451         }                                       \
1452     } while (0)
1453 
1454 #define REQUIRE_TB_FLAG(FLAG)                   \
1455     do {                                        \
1456         if ((ctx->tbflags & (FLAG)) == 0) {     \
1457             goto invalid_opc;                   \
1458         }                                       \
1459     } while (0)
1460 
1461 #define REQUIRE_REG_31(WHICH)                   \
1462     do {                                        \
1463         if (WHICH != 31) {                      \
1464             goto invalid_opc;                   \
1465         }                                       \
1466     } while (0)
1467 
1468 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1469 {
1470     int32_t disp21, disp16, disp12 __attribute__((unused));
1471     uint16_t fn11;
1472     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1473     bool islit, real_islit;
1474     TCGv va, vb, vc, tmp, tmp2;
1475     TCGv_i32 t32;
1476     DisasJumpType ret;
1477 
1478     /* Decode all instruction fields */
1479     opc = extract32(insn, 26, 6);
1480     ra = extract32(insn, 21, 5);
1481     rb = extract32(insn, 16, 5);
1482     rc = extract32(insn, 0, 5);
1483     real_islit = islit = extract32(insn, 12, 1);
1484     lit = extract32(insn, 13, 8);
1485 
1486     disp21 = sextract32(insn, 0, 21);
1487     disp16 = sextract32(insn, 0, 16);
1488     disp12 = sextract32(insn, 0, 12);
1489 
1490     fn11 = extract32(insn, 5, 11);
1491     fpfn = extract32(insn, 5, 6);
1492     fn7 = extract32(insn, 5, 7);
1493 
1494     if (rb == 31 && !islit) {
1495         islit = true;
1496         lit = 0;
1497     }
1498 
1499     ret = DISAS_NEXT;
1500     switch (opc) {
1501     case 0x00:
1502         /* CALL_PAL */
1503         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1504         break;
1505     case 0x01:
1506         /* OPC01 */
1507         goto invalid_opc;
1508     case 0x02:
1509         /* OPC02 */
1510         goto invalid_opc;
1511     case 0x03:
1512         /* OPC03 */
1513         goto invalid_opc;
1514     case 0x04:
1515         /* OPC04 */
1516         goto invalid_opc;
1517     case 0x05:
1518         /* OPC05 */
1519         goto invalid_opc;
1520     case 0x06:
1521         /* OPC06 */
1522         goto invalid_opc;
1523     case 0x07:
1524         /* OPC07 */
1525         goto invalid_opc;
1526 
1527     case 0x09:
1528         /* LDAH */
1529         disp16 = (uint32_t)disp16 << 16;
1530         /* fall through */
1531     case 0x08:
1532         /* LDA */
1533         va = dest_gpr(ctx, ra);
1534         /* It's worth special-casing immediate loads.  */
1535         if (rb == 31) {
1536             tcg_gen_movi_i64(va, disp16);
1537         } else {
1538             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1539         }
1540         break;
1541 
1542     case 0x0A:
1543         /* LDBU */
1544         REQUIRE_AMASK(BWX);
1545         gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1546         break;
1547     case 0x0B:
1548         /* LDQ_U */
1549         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1550         break;
1551     case 0x0C:
1552         /* LDWU */
1553         REQUIRE_AMASK(BWX);
1554         gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1555         break;
1556     case 0x0D:
1557         /* STW */
1558         REQUIRE_AMASK(BWX);
1559         gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1560         break;
1561     case 0x0E:
1562         /* STB */
1563         REQUIRE_AMASK(BWX);
1564         gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1565         break;
1566     case 0x0F:
1567         /* STQ_U */
1568         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1569         break;
1570 
1571     case 0x10:
1572         vc = dest_gpr(ctx, rc);
1573         vb = load_gpr_lit(ctx, rb, lit, islit);
1574 
1575         if (ra == 31) {
1576             if (fn7 == 0x00) {
1577                 /* Special case ADDL as SEXTL.  */
1578                 tcg_gen_ext32s_i64(vc, vb);
1579                 break;
1580             }
1581             if (fn7 == 0x29) {
1582                 /* Special case SUBQ as NEGQ.  */
1583                 tcg_gen_neg_i64(vc, vb);
1584                 break;
1585             }
1586         }
1587 
1588         va = load_gpr(ctx, ra);
1589         switch (fn7) {
1590         case 0x00:
1591             /* ADDL */
1592             tcg_gen_add_i64(vc, va, vb);
1593             tcg_gen_ext32s_i64(vc, vc);
1594             break;
1595         case 0x02:
1596             /* S4ADDL */
1597             tmp = tcg_temp_new();
1598             tcg_gen_shli_i64(tmp, va, 2);
1599             tcg_gen_add_i64(tmp, tmp, vb);
1600             tcg_gen_ext32s_i64(vc, tmp);
1601             tcg_temp_free(tmp);
1602             break;
1603         case 0x09:
1604             /* SUBL */
1605             tcg_gen_sub_i64(vc, va, vb);
1606             tcg_gen_ext32s_i64(vc, vc);
1607             break;
1608         case 0x0B:
1609             /* S4SUBL */
1610             tmp = tcg_temp_new();
1611             tcg_gen_shli_i64(tmp, va, 2);
1612             tcg_gen_sub_i64(tmp, tmp, vb);
1613             tcg_gen_ext32s_i64(vc, tmp);
1614             tcg_temp_free(tmp);
1615             break;
1616         case 0x0F:
1617             /* CMPBGE */
1618             if (ra == 31) {
1619                 /* Special case 0 >= X as X == 0.  */
1620                 gen_helper_cmpbe0(vc, vb);
1621             } else {
1622                 gen_helper_cmpbge(vc, va, vb);
1623             }
1624             break;
1625         case 0x12:
1626             /* S8ADDL */
1627             tmp = tcg_temp_new();
1628             tcg_gen_shli_i64(tmp, va, 3);
1629             tcg_gen_add_i64(tmp, tmp, vb);
1630             tcg_gen_ext32s_i64(vc, tmp);
1631             tcg_temp_free(tmp);
1632             break;
1633         case 0x1B:
1634             /* S8SUBL */
1635             tmp = tcg_temp_new();
1636             tcg_gen_shli_i64(tmp, va, 3);
1637             tcg_gen_sub_i64(tmp, tmp, vb);
1638             tcg_gen_ext32s_i64(vc, tmp);
1639             tcg_temp_free(tmp);
1640             break;
1641         case 0x1D:
1642             /* CMPULT */
1643             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1644             break;
1645         case 0x20:
1646             /* ADDQ */
1647             tcg_gen_add_i64(vc, va, vb);
1648             break;
1649         case 0x22:
1650             /* S4ADDQ */
1651             tmp = tcg_temp_new();
1652             tcg_gen_shli_i64(tmp, va, 2);
1653             tcg_gen_add_i64(vc, tmp, vb);
1654             tcg_temp_free(tmp);
1655             break;
1656         case 0x29:
1657             /* SUBQ */
1658             tcg_gen_sub_i64(vc, va, vb);
1659             break;
1660         case 0x2B:
1661             /* S4SUBQ */
1662             tmp = tcg_temp_new();
1663             tcg_gen_shli_i64(tmp, va, 2);
1664             tcg_gen_sub_i64(vc, tmp, vb);
1665             tcg_temp_free(tmp);
1666             break;
1667         case 0x2D:
1668             /* CMPEQ */
1669             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1670             break;
1671         case 0x32:
1672             /* S8ADDQ */
1673             tmp = tcg_temp_new();
1674             tcg_gen_shli_i64(tmp, va, 3);
1675             tcg_gen_add_i64(vc, tmp, vb);
1676             tcg_temp_free(tmp);
1677             break;
1678         case 0x3B:
1679             /* S8SUBQ */
1680             tmp = tcg_temp_new();
1681             tcg_gen_shli_i64(tmp, va, 3);
1682             tcg_gen_sub_i64(vc, tmp, vb);
1683             tcg_temp_free(tmp);
1684             break;
1685         case 0x3D:
1686             /* CMPULE */
1687             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1688             break;
1689         case 0x40:
1690             /* ADDL/V */
1691             tmp = tcg_temp_new();
1692             tcg_gen_ext32s_i64(tmp, va);
1693             tcg_gen_ext32s_i64(vc, vb);
1694             tcg_gen_add_i64(tmp, tmp, vc);
1695             tcg_gen_ext32s_i64(vc, tmp);
1696             gen_helper_check_overflow(cpu_env, vc, tmp);
1697             tcg_temp_free(tmp);
1698             break;
1699         case 0x49:
1700             /* SUBL/V */
1701             tmp = tcg_temp_new();
1702             tcg_gen_ext32s_i64(tmp, va);
1703             tcg_gen_ext32s_i64(vc, vb);
1704             tcg_gen_sub_i64(tmp, tmp, vc);
1705             tcg_gen_ext32s_i64(vc, tmp);
1706             gen_helper_check_overflow(cpu_env, vc, tmp);
1707             tcg_temp_free(tmp);
1708             break;
1709         case 0x4D:
1710             /* CMPLT */
1711             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1712             break;
1713         case 0x60:
1714             /* ADDQ/V */
1715             tmp = tcg_temp_new();
1716             tmp2 = tcg_temp_new();
1717             tcg_gen_eqv_i64(tmp, va, vb);
1718             tcg_gen_mov_i64(tmp2, va);
1719             tcg_gen_add_i64(vc, va, vb);
1720             tcg_gen_xor_i64(tmp2, tmp2, vc);
1721             tcg_gen_and_i64(tmp, tmp, tmp2);
1722             tcg_gen_shri_i64(tmp, tmp, 63);
1723             tcg_gen_movi_i64(tmp2, 0);
1724             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1725             tcg_temp_free(tmp);
1726             tcg_temp_free(tmp2);
1727             break;
1728         case 0x69:
1729             /* SUBQ/V */
1730             tmp = tcg_temp_new();
1731             tmp2 = tcg_temp_new();
1732             tcg_gen_xor_i64(tmp, va, vb);
1733             tcg_gen_mov_i64(tmp2, va);
1734             tcg_gen_sub_i64(vc, va, vb);
1735             tcg_gen_xor_i64(tmp2, tmp2, vc);
1736             tcg_gen_and_i64(tmp, tmp, tmp2);
1737             tcg_gen_shri_i64(tmp, tmp, 63);
1738             tcg_gen_movi_i64(tmp2, 0);
1739             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1740             tcg_temp_free(tmp);
1741             tcg_temp_free(tmp2);
1742             break;
1743         case 0x6D:
1744             /* CMPLE */
1745             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1746             break;
1747         default:
1748             goto invalid_opc;
1749         }
1750         break;
1751 
1752     case 0x11:
1753         if (fn7 == 0x20) {
1754             if (rc == 31) {
1755                 /* Special case BIS as NOP.  */
1756                 break;
1757             }
1758             if (ra == 31) {
1759                 /* Special case BIS as MOV.  */
1760                 vc = dest_gpr(ctx, rc);
1761                 if (islit) {
1762                     tcg_gen_movi_i64(vc, lit);
1763                 } else {
1764                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1765                 }
1766                 break;
1767             }
1768         }
1769 
1770         vc = dest_gpr(ctx, rc);
1771         vb = load_gpr_lit(ctx, rb, lit, islit);
1772 
1773         if (fn7 == 0x28 && ra == 31) {
1774             /* Special case ORNOT as NOT.  */
1775             tcg_gen_not_i64(vc, vb);
1776             break;
1777         }
1778 
1779         va = load_gpr(ctx, ra);
1780         switch (fn7) {
1781         case 0x00:
1782             /* AND */
1783             tcg_gen_and_i64(vc, va, vb);
1784             break;
1785         case 0x08:
1786             /* BIC */
1787             tcg_gen_andc_i64(vc, va, vb);
1788             break;
1789         case 0x14:
1790             /* CMOVLBS */
1791             tmp = tcg_temp_new();
1792             tcg_gen_andi_i64(tmp, va, 1);
1793             tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1794                                 vb, load_gpr(ctx, rc));
1795             tcg_temp_free(tmp);
1796             break;
1797         case 0x16:
1798             /* CMOVLBC */
1799             tmp = tcg_temp_new();
1800             tcg_gen_andi_i64(tmp, va, 1);
1801             tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1802                                 vb, load_gpr(ctx, rc));
1803             tcg_temp_free(tmp);
1804             break;
1805         case 0x20:
1806             /* BIS */
1807             tcg_gen_or_i64(vc, va, vb);
1808             break;
1809         case 0x24:
1810             /* CMOVEQ */
1811             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1812                                 vb, load_gpr(ctx, rc));
1813             break;
1814         case 0x26:
1815             /* CMOVNE */
1816             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1817                                 vb, load_gpr(ctx, rc));
1818             break;
1819         case 0x28:
1820             /* ORNOT */
1821             tcg_gen_orc_i64(vc, va, vb);
1822             break;
1823         case 0x40:
1824             /* XOR */
1825             tcg_gen_xor_i64(vc, va, vb);
1826             break;
1827         case 0x44:
1828             /* CMOVLT */
1829             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1830                                 vb, load_gpr(ctx, rc));
1831             break;
1832         case 0x46:
1833             /* CMOVGE */
1834             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1835                                 vb, load_gpr(ctx, rc));
1836             break;
1837         case 0x48:
1838             /* EQV */
1839             tcg_gen_eqv_i64(vc, va, vb);
1840             break;
1841         case 0x61:
1842             /* AMASK */
1843             REQUIRE_REG_31(ra);
1844             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1845             break;
1846         case 0x64:
1847             /* CMOVLE */
1848             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1849                                 vb, load_gpr(ctx, rc));
1850             break;
1851         case 0x66:
1852             /* CMOVGT */
1853             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1854                                 vb, load_gpr(ctx, rc));
1855             break;
1856         case 0x6C:
1857             /* IMPLVER */
1858             REQUIRE_REG_31(ra);
1859             tcg_gen_movi_i64(vc, ctx->implver);
1860             break;
1861         default:
1862             goto invalid_opc;
1863         }
1864         break;
1865 
1866     case 0x12:
1867         vc = dest_gpr(ctx, rc);
1868         va = load_gpr(ctx, ra);
1869         switch (fn7) {
1870         case 0x02:
1871             /* MSKBL */
1872             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1873             break;
1874         case 0x06:
1875             /* EXTBL */
1876             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1877             break;
1878         case 0x0B:
1879             /* INSBL */
1880             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1881             break;
1882         case 0x12:
1883             /* MSKWL */
1884             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1885             break;
1886         case 0x16:
1887             /* EXTWL */
1888             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1889             break;
1890         case 0x1B:
1891             /* INSWL */
1892             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1893             break;
1894         case 0x22:
1895             /* MSKLL */
1896             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1897             break;
1898         case 0x26:
1899             /* EXTLL */
1900             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1901             break;
1902         case 0x2B:
1903             /* INSLL */
1904             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1905             break;
1906         case 0x30:
1907             /* ZAP */
1908             if (islit) {
1909                 gen_zapnoti(vc, va, ~lit);
1910             } else {
1911                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1912             }
1913             break;
1914         case 0x31:
1915             /* ZAPNOT */
1916             if (islit) {
1917                 gen_zapnoti(vc, va, lit);
1918             } else {
1919                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1920             }
1921             break;
1922         case 0x32:
1923             /* MSKQL */
1924             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1925             break;
1926         case 0x34:
1927             /* SRL */
1928             if (islit) {
1929                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1930             } else {
1931                 tmp = tcg_temp_new();
1932                 vb = load_gpr(ctx, rb);
1933                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1934                 tcg_gen_shr_i64(vc, va, tmp);
1935                 tcg_temp_free(tmp);
1936             }
1937             break;
1938         case 0x36:
1939             /* EXTQL */
1940             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1941             break;
1942         case 0x39:
1943             /* SLL */
1944             if (islit) {
1945                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1946             } else {
1947                 tmp = tcg_temp_new();
1948                 vb = load_gpr(ctx, rb);
1949                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1950                 tcg_gen_shl_i64(vc, va, tmp);
1951                 tcg_temp_free(tmp);
1952             }
1953             break;
1954         case 0x3B:
1955             /* INSQL */
1956             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1957             break;
1958         case 0x3C:
1959             /* SRA */
1960             if (islit) {
1961                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1962             } else {
1963                 tmp = tcg_temp_new();
1964                 vb = load_gpr(ctx, rb);
1965                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1966                 tcg_gen_sar_i64(vc, va, tmp);
1967                 tcg_temp_free(tmp);
1968             }
1969             break;
1970         case 0x52:
1971             /* MSKWH */
1972             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1973             break;
1974         case 0x57:
1975             /* INSWH */
1976             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1977             break;
1978         case 0x5A:
1979             /* EXTWH */
1980             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1981             break;
1982         case 0x62:
1983             /* MSKLH */
1984             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1985             break;
1986         case 0x67:
1987             /* INSLH */
1988             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1989             break;
1990         case 0x6A:
1991             /* EXTLH */
1992             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1993             break;
1994         case 0x72:
1995             /* MSKQH */
1996             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1997             break;
1998         case 0x77:
1999             /* INSQH */
2000             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2001             break;
2002         case 0x7A:
2003             /* EXTQH */
2004             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2005             break;
2006         default:
2007             goto invalid_opc;
2008         }
2009         break;
2010 
2011     case 0x13:
2012         vc = dest_gpr(ctx, rc);
2013         vb = load_gpr_lit(ctx, rb, lit, islit);
2014         va = load_gpr(ctx, ra);
2015         switch (fn7) {
2016         case 0x00:
2017             /* MULL */
2018             tcg_gen_mul_i64(vc, va, vb);
2019             tcg_gen_ext32s_i64(vc, vc);
2020             break;
2021         case 0x20:
2022             /* MULQ */
2023             tcg_gen_mul_i64(vc, va, vb);
2024             break;
2025         case 0x30:
2026             /* UMULH */
2027             tmp = tcg_temp_new();
2028             tcg_gen_mulu2_i64(tmp, vc, va, vb);
2029             tcg_temp_free(tmp);
2030             break;
2031         case 0x40:
2032             /* MULL/V */
2033             tmp = tcg_temp_new();
2034             tcg_gen_ext32s_i64(tmp, va);
2035             tcg_gen_ext32s_i64(vc, vb);
2036             tcg_gen_mul_i64(tmp, tmp, vc);
2037             tcg_gen_ext32s_i64(vc, tmp);
2038             gen_helper_check_overflow(cpu_env, vc, tmp);
2039             tcg_temp_free(tmp);
2040             break;
2041         case 0x60:
2042             /* MULQ/V */
2043             tmp = tcg_temp_new();
2044             tmp2 = tcg_temp_new();
2045             tcg_gen_muls2_i64(vc, tmp, va, vb);
2046             tcg_gen_sari_i64(tmp2, vc, 63);
2047             gen_helper_check_overflow(cpu_env, tmp, tmp2);
2048             tcg_temp_free(tmp);
2049             tcg_temp_free(tmp2);
2050             break;
2051         default:
2052             goto invalid_opc;
2053         }
2054         break;
2055 
2056     case 0x14:
2057         REQUIRE_AMASK(FIX);
2058         vc = dest_fpr(ctx, rc);
2059         switch (fpfn) { /* fn11 & 0x3F */
2060         case 0x04:
2061             /* ITOFS */
2062             REQUIRE_REG_31(rb);
2063             t32 = tcg_temp_new_i32();
2064             va = load_gpr(ctx, ra);
2065             tcg_gen_extrl_i64_i32(t32, va);
2066             gen_helper_memory_to_s(vc, t32);
2067             tcg_temp_free_i32(t32);
2068             break;
2069         case 0x0A:
2070             /* SQRTF */
2071             REQUIRE_REG_31(ra);
2072             vb = load_fpr(ctx, rb);
2073             gen_helper_sqrtf(vc, cpu_env, vb);
2074             break;
2075         case 0x0B:
2076             /* SQRTS */
2077             REQUIRE_REG_31(ra);
2078             gen_sqrts(ctx, rb, rc, fn11);
2079             break;
2080         case 0x14:
2081             /* ITOFF */
2082             REQUIRE_REG_31(rb);
2083             t32 = tcg_temp_new_i32();
2084             va = load_gpr(ctx, ra);
2085             tcg_gen_extrl_i64_i32(t32, va);
2086             gen_helper_memory_to_f(vc, t32);
2087             tcg_temp_free_i32(t32);
2088             break;
2089         case 0x24:
2090             /* ITOFT */
2091             REQUIRE_REG_31(rb);
2092             va = load_gpr(ctx, ra);
2093             tcg_gen_mov_i64(vc, va);
2094             break;
2095         case 0x2A:
2096             /* SQRTG */
2097             REQUIRE_REG_31(ra);
2098             vb = load_fpr(ctx, rb);
2099             gen_helper_sqrtg(vc, cpu_env, vb);
2100             break;
2101         case 0x02B:
2102             /* SQRTT */
2103             REQUIRE_REG_31(ra);
2104             gen_sqrtt(ctx, rb, rc, fn11);
2105             break;
2106         default:
2107             goto invalid_opc;
2108         }
2109         break;
2110 
2111     case 0x15:
2112         /* VAX floating point */
2113         /* XXX: rounding mode and trap are ignored (!) */
2114         vc = dest_fpr(ctx, rc);
2115         vb = load_fpr(ctx, rb);
2116         va = load_fpr(ctx, ra);
2117         switch (fpfn) { /* fn11 & 0x3F */
2118         case 0x00:
2119             /* ADDF */
2120             gen_helper_addf(vc, cpu_env, va, vb);
2121             break;
2122         case 0x01:
2123             /* SUBF */
2124             gen_helper_subf(vc, cpu_env, va, vb);
2125             break;
2126         case 0x02:
2127             /* MULF */
2128             gen_helper_mulf(vc, cpu_env, va, vb);
2129             break;
2130         case 0x03:
2131             /* DIVF */
2132             gen_helper_divf(vc, cpu_env, va, vb);
2133             break;
2134         case 0x1E:
2135             /* CVTDG -- TODO */
2136             REQUIRE_REG_31(ra);
2137             goto invalid_opc;
2138         case 0x20:
2139             /* ADDG */
2140             gen_helper_addg(vc, cpu_env, va, vb);
2141             break;
2142         case 0x21:
2143             /* SUBG */
2144             gen_helper_subg(vc, cpu_env, va, vb);
2145             break;
2146         case 0x22:
2147             /* MULG */
2148             gen_helper_mulg(vc, cpu_env, va, vb);
2149             break;
2150         case 0x23:
2151             /* DIVG */
2152             gen_helper_divg(vc, cpu_env, va, vb);
2153             break;
2154         case 0x25:
2155             /* CMPGEQ */
2156             gen_helper_cmpgeq(vc, cpu_env, va, vb);
2157             break;
2158         case 0x26:
2159             /* CMPGLT */
2160             gen_helper_cmpglt(vc, cpu_env, va, vb);
2161             break;
2162         case 0x27:
2163             /* CMPGLE */
2164             gen_helper_cmpgle(vc, cpu_env, va, vb);
2165             break;
2166         case 0x2C:
2167             /* CVTGF */
2168             REQUIRE_REG_31(ra);
2169             gen_helper_cvtgf(vc, cpu_env, vb);
2170             break;
2171         case 0x2D:
2172             /* CVTGD -- TODO */
2173             REQUIRE_REG_31(ra);
2174             goto invalid_opc;
2175         case 0x2F:
2176             /* CVTGQ */
2177             REQUIRE_REG_31(ra);
2178             gen_helper_cvtgq(vc, cpu_env, vb);
2179             break;
2180         case 0x3C:
2181             /* CVTQF */
2182             REQUIRE_REG_31(ra);
2183             gen_helper_cvtqf(vc, cpu_env, vb);
2184             break;
2185         case 0x3E:
2186             /* CVTQG */
2187             REQUIRE_REG_31(ra);
2188             gen_helper_cvtqg(vc, cpu_env, vb);
2189             break;
2190         default:
2191             goto invalid_opc;
2192         }
2193         break;
2194 
2195     case 0x16:
2196         /* IEEE floating-point */
2197         switch (fpfn) { /* fn11 & 0x3F */
2198         case 0x00:
2199             /* ADDS */
2200             gen_adds(ctx, ra, rb, rc, fn11);
2201             break;
2202         case 0x01:
2203             /* SUBS */
2204             gen_subs(ctx, ra, rb, rc, fn11);
2205             break;
2206         case 0x02:
2207             /* MULS */
2208             gen_muls(ctx, ra, rb, rc, fn11);
2209             break;
2210         case 0x03:
2211             /* DIVS */
2212             gen_divs(ctx, ra, rb, rc, fn11);
2213             break;
2214         case 0x20:
2215             /* ADDT */
2216             gen_addt(ctx, ra, rb, rc, fn11);
2217             break;
2218         case 0x21:
2219             /* SUBT */
2220             gen_subt(ctx, ra, rb, rc, fn11);
2221             break;
2222         case 0x22:
2223             /* MULT */
2224             gen_mult(ctx, ra, rb, rc, fn11);
2225             break;
2226         case 0x23:
2227             /* DIVT */
2228             gen_divt(ctx, ra, rb, rc, fn11);
2229             break;
2230         case 0x24:
2231             /* CMPTUN */
2232             gen_cmptun(ctx, ra, rb, rc, fn11);
2233             break;
2234         case 0x25:
2235             /* CMPTEQ */
2236             gen_cmpteq(ctx, ra, rb, rc, fn11);
2237             break;
2238         case 0x26:
2239             /* CMPTLT */
2240             gen_cmptlt(ctx, ra, rb, rc, fn11);
2241             break;
2242         case 0x27:
2243             /* CMPTLE */
2244             gen_cmptle(ctx, ra, rb, rc, fn11);
2245             break;
2246         case 0x2C:
2247             REQUIRE_REG_31(ra);
2248             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2249                 /* CVTST */
2250                 gen_cvtst(ctx, rb, rc, fn11);
2251             } else {
2252                 /* CVTTS */
2253                 gen_cvtts(ctx, rb, rc, fn11);
2254             }
2255             break;
2256         case 0x2F:
2257             /* CVTTQ */
2258             REQUIRE_REG_31(ra);
2259             gen_cvttq(ctx, rb, rc, fn11);
2260             break;
2261         case 0x3C:
2262             /* CVTQS */
2263             REQUIRE_REG_31(ra);
2264             gen_cvtqs(ctx, rb, rc, fn11);
2265             break;
2266         case 0x3E:
2267             /* CVTQT */
2268             REQUIRE_REG_31(ra);
2269             gen_cvtqt(ctx, rb, rc, fn11);
2270             break;
2271         default:
2272             goto invalid_opc;
2273         }
2274         break;
2275 
2276     case 0x17:
2277         switch (fn11) {
2278         case 0x010:
2279             /* CVTLQ */
2280             REQUIRE_REG_31(ra);
2281             vc = dest_fpr(ctx, rc);
2282             vb = load_fpr(ctx, rb);
2283             gen_cvtlq(vc, vb);
2284             break;
2285         case 0x020:
2286             /* CPYS */
2287             if (rc == 31) {
2288                 /* Special case CPYS as FNOP.  */
2289             } else {
2290                 vc = dest_fpr(ctx, rc);
2291                 va = load_fpr(ctx, ra);
2292                 if (ra == rb) {
2293                     /* Special case CPYS as FMOV.  */
2294                     tcg_gen_mov_i64(vc, va);
2295                 } else {
2296                     vb = load_fpr(ctx, rb);
2297                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2298                 }
2299             }
2300             break;
2301         case 0x021:
2302             /* CPYSN */
2303             vc = dest_fpr(ctx, rc);
2304             vb = load_fpr(ctx, rb);
2305             va = load_fpr(ctx, ra);
2306             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2307             break;
2308         case 0x022:
2309             /* CPYSE */
2310             vc = dest_fpr(ctx, rc);
2311             vb = load_fpr(ctx, rb);
2312             va = load_fpr(ctx, ra);
2313             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2314             break;
2315         case 0x024:
2316             /* MT_FPCR */
2317             va = load_fpr(ctx, ra);
2318             gen_helper_store_fpcr(cpu_env, va);
2319             if (ctx->tb_rm == QUAL_RM_D) {
2320                 /* Re-do the copy of the rounding mode to fp_status
2321                    the next time we use dynamic rounding.  */
2322                 ctx->tb_rm = -1;
2323             }
2324             break;
2325         case 0x025:
2326             /* MF_FPCR */
2327             va = dest_fpr(ctx, ra);
2328             gen_helper_load_fpcr(va, cpu_env);
2329             break;
2330         case 0x02A:
2331             /* FCMOVEQ */
2332             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2333             break;
2334         case 0x02B:
2335             /* FCMOVNE */
2336             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2337             break;
2338         case 0x02C:
2339             /* FCMOVLT */
2340             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2341             break;
2342         case 0x02D:
2343             /* FCMOVGE */
2344             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2345             break;
2346         case 0x02E:
2347             /* FCMOVLE */
2348             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2349             break;
2350         case 0x02F:
2351             /* FCMOVGT */
2352             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2353             break;
2354         case 0x030: /* CVTQL */
2355         case 0x130: /* CVTQL/V */
2356         case 0x530: /* CVTQL/SV */
2357             REQUIRE_REG_31(ra);
2358             vc = dest_fpr(ctx, rc);
2359             vb = load_fpr(ctx, rb);
2360             gen_helper_cvtql(vc, cpu_env, vb);
2361             gen_fp_exc_raise(rc, fn11);
2362             break;
2363         default:
2364             goto invalid_opc;
2365         }
2366         break;
2367 
2368     case 0x18:
2369         switch ((uint16_t)disp16) {
2370         case 0x0000:
2371             /* TRAPB */
2372             /* No-op.  */
2373             break;
2374         case 0x0400:
2375             /* EXCB */
2376             /* No-op.  */
2377             break;
2378         case 0x4000:
2379             /* MB */
2380             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2381             break;
2382         case 0x4400:
2383             /* WMB */
2384             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2385             break;
2386         case 0x8000:
2387             /* FETCH */
2388             /* No-op */
2389             break;
2390         case 0xA000:
2391             /* FETCH_M */
2392             /* No-op */
2393             break;
2394         case 0xC000:
2395             /* RPCC */
2396             va = dest_gpr(ctx, ra);
2397             if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2398                 gen_io_start();
2399                 gen_helper_load_pcc(va, cpu_env);
2400                 ret = DISAS_PC_STALE;
2401             } else {
2402                 gen_helper_load_pcc(va, cpu_env);
2403             }
2404             break;
2405         case 0xE000:
2406             /* RC */
2407             gen_rx(ctx, ra, 0);
2408             break;
2409         case 0xE800:
2410             /* ECB */
2411             break;
2412         case 0xF000:
2413             /* RS */
2414             gen_rx(ctx, ra, 1);
2415             break;
2416         case 0xF800:
2417             /* WH64 */
2418             /* No-op */
2419             break;
2420         case 0xFC00:
2421             /* WH64EN */
2422             /* No-op */
2423             break;
2424         default:
2425             goto invalid_opc;
2426         }
2427         break;
2428 
2429     case 0x19:
2430         /* HW_MFPR (PALcode) */
2431 #ifndef CONFIG_USER_ONLY
2432         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2433         va = dest_gpr(ctx, ra);
2434         ret = gen_mfpr(ctx, va, insn & 0xffff);
2435         break;
2436 #else
2437         goto invalid_opc;
2438 #endif
2439 
2440     case 0x1A:
2441         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2442            prediction stack action, which of course we don't implement.  */
2443         vb = load_gpr(ctx, rb);
2444         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2445         if (ra != 31) {
2446             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2447         }
2448         ret = DISAS_PC_UPDATED;
2449         break;
2450 
2451     case 0x1B:
2452         /* HW_LD (PALcode) */
2453 #ifndef CONFIG_USER_ONLY
2454         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2455         {
2456             TCGv addr = tcg_temp_new();
2457             vb = load_gpr(ctx, rb);
2458             va = dest_gpr(ctx, ra);
2459 
2460             tcg_gen_addi_i64(addr, vb, disp12);
2461             switch ((insn >> 12) & 0xF) {
2462             case 0x0:
2463                 /* Longword physical access (hw_ldl/p) */
2464                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2465                 break;
2466             case 0x1:
2467                 /* Quadword physical access (hw_ldq/p) */
2468                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2469                 break;
2470             case 0x2:
2471                 /* Longword physical access with lock (hw_ldl_l/p) */
2472                 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2473                 break;
2474             case 0x3:
2475                 /* Quadword physical access with lock (hw_ldq_l/p) */
2476                 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2477                 break;
2478             case 0x4:
2479                 /* Longword virtual PTE fetch (hw_ldl/v) */
2480                 goto invalid_opc;
2481             case 0x5:
2482                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2483                 goto invalid_opc;
2484                 break;
2485             case 0x6:
2486                 /* Invalid */
2487                 goto invalid_opc;
2488             case 0x7:
2489                 /* Invaliid */
2490                 goto invalid_opc;
2491             case 0x8:
2492                 /* Longword virtual access (hw_ldl) */
2493                 goto invalid_opc;
2494             case 0x9:
2495                 /* Quadword virtual access (hw_ldq) */
2496                 goto invalid_opc;
2497             case 0xA:
2498                 /* Longword virtual access with protection check (hw_ldl/w) */
2499                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2500                 break;
2501             case 0xB:
2502                 /* Quadword virtual access with protection check (hw_ldq/w) */
2503                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2504                 break;
2505             case 0xC:
2506                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2507                 goto invalid_opc;
2508             case 0xD:
2509                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2510                 goto invalid_opc;
2511             case 0xE:
2512                 /* Longword virtual access with alternate access mode and
2513                    protection checks (hw_ldl/wa) */
2514                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2515                 break;
2516             case 0xF:
2517                 /* Quadword virtual access with alternate access mode and
2518                    protection checks (hw_ldq/wa) */
2519                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2520                 break;
2521             }
2522             tcg_temp_free(addr);
2523             break;
2524         }
2525 #else
2526         goto invalid_opc;
2527 #endif
2528 
2529     case 0x1C:
2530         vc = dest_gpr(ctx, rc);
2531         if (fn7 == 0x70) {
2532             /* FTOIT */
2533             REQUIRE_AMASK(FIX);
2534             REQUIRE_REG_31(rb);
2535             va = load_fpr(ctx, ra);
2536             tcg_gen_mov_i64(vc, va);
2537             break;
2538         } else if (fn7 == 0x78) {
2539             /* FTOIS */
2540             REQUIRE_AMASK(FIX);
2541             REQUIRE_REG_31(rb);
2542             t32 = tcg_temp_new_i32();
2543             va = load_fpr(ctx, ra);
2544             gen_helper_s_to_memory(t32, va);
2545             tcg_gen_ext_i32_i64(vc, t32);
2546             tcg_temp_free_i32(t32);
2547             break;
2548         }
2549 
2550         vb = load_gpr_lit(ctx, rb, lit, islit);
2551         switch (fn7) {
2552         case 0x00:
2553             /* SEXTB */
2554             REQUIRE_AMASK(BWX);
2555             REQUIRE_REG_31(ra);
2556             tcg_gen_ext8s_i64(vc, vb);
2557             break;
2558         case 0x01:
2559             /* SEXTW */
2560             REQUIRE_AMASK(BWX);
2561             REQUIRE_REG_31(ra);
2562             tcg_gen_ext16s_i64(vc, vb);
2563             break;
2564         case 0x30:
2565             /* CTPOP */
2566             REQUIRE_AMASK(CIX);
2567             REQUIRE_REG_31(ra);
2568             REQUIRE_NO_LIT;
2569             tcg_gen_ctpop_i64(vc, vb);
2570             break;
2571         case 0x31:
2572             /* PERR */
2573             REQUIRE_AMASK(MVI);
2574             REQUIRE_NO_LIT;
2575             va = load_gpr(ctx, ra);
2576             gen_helper_perr(vc, va, vb);
2577             break;
2578         case 0x32:
2579             /* CTLZ */
2580             REQUIRE_AMASK(CIX);
2581             REQUIRE_REG_31(ra);
2582             REQUIRE_NO_LIT;
2583             tcg_gen_clzi_i64(vc, vb, 64);
2584             break;
2585         case 0x33:
2586             /* CTTZ */
2587             REQUIRE_AMASK(CIX);
2588             REQUIRE_REG_31(ra);
2589             REQUIRE_NO_LIT;
2590             tcg_gen_ctzi_i64(vc, vb, 64);
2591             break;
2592         case 0x34:
2593             /* UNPKBW */
2594             REQUIRE_AMASK(MVI);
2595             REQUIRE_REG_31(ra);
2596             REQUIRE_NO_LIT;
2597             gen_helper_unpkbw(vc, vb);
2598             break;
2599         case 0x35:
2600             /* UNPKBL */
2601             REQUIRE_AMASK(MVI);
2602             REQUIRE_REG_31(ra);
2603             REQUIRE_NO_LIT;
2604             gen_helper_unpkbl(vc, vb);
2605             break;
2606         case 0x36:
2607             /* PKWB */
2608             REQUIRE_AMASK(MVI);
2609             REQUIRE_REG_31(ra);
2610             REQUIRE_NO_LIT;
2611             gen_helper_pkwb(vc, vb);
2612             break;
2613         case 0x37:
2614             /* PKLB */
2615             REQUIRE_AMASK(MVI);
2616             REQUIRE_REG_31(ra);
2617             REQUIRE_NO_LIT;
2618             gen_helper_pklb(vc, vb);
2619             break;
2620         case 0x38:
2621             /* MINSB8 */
2622             REQUIRE_AMASK(MVI);
2623             va = load_gpr(ctx, ra);
2624             gen_helper_minsb8(vc, va, vb);
2625             break;
2626         case 0x39:
2627             /* MINSW4 */
2628             REQUIRE_AMASK(MVI);
2629             va = load_gpr(ctx, ra);
2630             gen_helper_minsw4(vc, va, vb);
2631             break;
2632         case 0x3A:
2633             /* MINUB8 */
2634             REQUIRE_AMASK(MVI);
2635             va = load_gpr(ctx, ra);
2636             gen_helper_minub8(vc, va, vb);
2637             break;
2638         case 0x3B:
2639             /* MINUW4 */
2640             REQUIRE_AMASK(MVI);
2641             va = load_gpr(ctx, ra);
2642             gen_helper_minuw4(vc, va, vb);
2643             break;
2644         case 0x3C:
2645             /* MAXUB8 */
2646             REQUIRE_AMASK(MVI);
2647             va = load_gpr(ctx, ra);
2648             gen_helper_maxub8(vc, va, vb);
2649             break;
2650         case 0x3D:
2651             /* MAXUW4 */
2652             REQUIRE_AMASK(MVI);
2653             va = load_gpr(ctx, ra);
2654             gen_helper_maxuw4(vc, va, vb);
2655             break;
2656         case 0x3E:
2657             /* MAXSB8 */
2658             REQUIRE_AMASK(MVI);
2659             va = load_gpr(ctx, ra);
2660             gen_helper_maxsb8(vc, va, vb);
2661             break;
2662         case 0x3F:
2663             /* MAXSW4 */
2664             REQUIRE_AMASK(MVI);
2665             va = load_gpr(ctx, ra);
2666             gen_helper_maxsw4(vc, va, vb);
2667             break;
2668         default:
2669             goto invalid_opc;
2670         }
2671         break;
2672 
2673     case 0x1D:
2674         /* HW_MTPR (PALcode) */
2675 #ifndef CONFIG_USER_ONLY
2676         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2677         vb = load_gpr(ctx, rb);
2678         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2679         break;
2680 #else
2681         goto invalid_opc;
2682 #endif
2683 
2684     case 0x1E:
2685         /* HW_RET (PALcode) */
2686 #ifndef CONFIG_USER_ONLY
2687         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2688         if (rb == 31) {
2689             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2690                address from EXC_ADDR.  This turns out to be useful for our
2691                emulation PALcode, so continue to accept it.  */
2692             ctx->lit = vb = tcg_temp_new();
2693             tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2694         } else {
2695             vb = load_gpr(ctx, rb);
2696         }
2697         tcg_gen_movi_i64(cpu_lock_addr, -1);
2698         tmp = tcg_temp_new();
2699         tcg_gen_movi_i64(tmp, 0);
2700         st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2701         tcg_gen_andi_i64(tmp, vb, 1);
2702         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2703         tcg_temp_free(tmp);
2704         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2705         /* Allow interrupts to be recognized right away.  */
2706         ret = DISAS_PC_UPDATED_NOCHAIN;
2707         break;
2708 #else
2709         goto invalid_opc;
2710 #endif
2711 
2712     case 0x1F:
2713         /* HW_ST (PALcode) */
2714 #ifndef CONFIG_USER_ONLY
2715         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2716         {
2717             switch ((insn >> 12) & 0xF) {
2718             case 0x0:
2719                 /* Longword physical access */
2720                 va = load_gpr(ctx, ra);
2721                 vb = load_gpr(ctx, rb);
2722                 tmp = tcg_temp_new();
2723                 tcg_gen_addi_i64(tmp, vb, disp12);
2724                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2725                 tcg_temp_free(tmp);
2726                 break;
2727             case 0x1:
2728                 /* Quadword physical access */
2729                 va = load_gpr(ctx, ra);
2730                 vb = load_gpr(ctx, rb);
2731                 tmp = tcg_temp_new();
2732                 tcg_gen_addi_i64(tmp, vb, disp12);
2733                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2734                 tcg_temp_free(tmp);
2735                 break;
2736             case 0x2:
2737                 /* Longword physical access with lock */
2738                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2739                                             MMU_PHYS_IDX, MO_LESL);
2740                 break;
2741             case 0x3:
2742                 /* Quadword physical access with lock */
2743                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2744                                             MMU_PHYS_IDX, MO_LEQ);
2745                 break;
2746             case 0x4:
2747                 /* Longword virtual access */
2748                 goto invalid_opc;
2749             case 0x5:
2750                 /* Quadword virtual access */
2751                 goto invalid_opc;
2752             case 0x6:
2753                 /* Invalid */
2754                 goto invalid_opc;
2755             case 0x7:
2756                 /* Invalid */
2757                 goto invalid_opc;
2758             case 0x8:
2759                 /* Invalid */
2760                 goto invalid_opc;
2761             case 0x9:
2762                 /* Invalid */
2763                 goto invalid_opc;
2764             case 0xA:
2765                 /* Invalid */
2766                 goto invalid_opc;
2767             case 0xB:
2768                 /* Invalid */
2769                 goto invalid_opc;
2770             case 0xC:
2771                 /* Longword virtual access with alternate access mode */
2772                 goto invalid_opc;
2773             case 0xD:
2774                 /* Quadword virtual access with alternate access mode */
2775                 goto invalid_opc;
2776             case 0xE:
2777                 /* Invalid */
2778                 goto invalid_opc;
2779             case 0xF:
2780                 /* Invalid */
2781                 goto invalid_opc;
2782             }
2783             break;
2784         }
2785 #else
2786         goto invalid_opc;
2787 #endif
2788     case 0x20:
2789         /* LDF */
2790         gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2791         break;
2792     case 0x21:
2793         /* LDG */
2794         gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2795         break;
2796     case 0x22:
2797         /* LDS */
2798         gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2799         break;
2800     case 0x23:
2801         /* LDT */
2802         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2803         break;
2804     case 0x24:
2805         /* STF */
2806         gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2807         break;
2808     case 0x25:
2809         /* STG */
2810         gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2811         break;
2812     case 0x26:
2813         /* STS */
2814         gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2815         break;
2816     case 0x27:
2817         /* STT */
2818         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2819         break;
2820     case 0x28:
2821         /* LDL */
2822         gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2823         break;
2824     case 0x29:
2825         /* LDQ */
2826         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2827         break;
2828     case 0x2A:
2829         /* LDL_L */
2830         gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2831         break;
2832     case 0x2B:
2833         /* LDQ_L */
2834         gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2835         break;
2836     case 0x2C:
2837         /* STL */
2838         gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2839         break;
2840     case 0x2D:
2841         /* STQ */
2842         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2843         break;
2844     case 0x2E:
2845         /* STL_C */
2846         ret = gen_store_conditional(ctx, ra, rb, disp16,
2847                                     ctx->mem_idx, MO_LESL);
2848         break;
2849     case 0x2F:
2850         /* STQ_C */
2851         ret = gen_store_conditional(ctx, ra, rb, disp16,
2852                                     ctx->mem_idx, MO_LEQ);
2853         break;
2854     case 0x30:
2855         /* BR */
2856         ret = gen_bdirect(ctx, ra, disp21);
2857         break;
2858     case 0x31: /* FBEQ */
2859         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2860         break;
2861     case 0x32: /* FBLT */
2862         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2863         break;
2864     case 0x33: /* FBLE */
2865         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2866         break;
2867     case 0x34:
2868         /* BSR */
2869         ret = gen_bdirect(ctx, ra, disp21);
2870         break;
2871     case 0x35: /* FBNE */
2872         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2873         break;
2874     case 0x36: /* FBGE */
2875         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2876         break;
2877     case 0x37: /* FBGT */
2878         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2879         break;
2880     case 0x38:
2881         /* BLBC */
2882         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2883         break;
2884     case 0x39:
2885         /* BEQ */
2886         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2887         break;
2888     case 0x3A:
2889         /* BLT */
2890         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2891         break;
2892     case 0x3B:
2893         /* BLE */
2894         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2895         break;
2896     case 0x3C:
2897         /* BLBS */
2898         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2899         break;
2900     case 0x3D:
2901         /* BNE */
2902         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2903         break;
2904     case 0x3E:
2905         /* BGE */
2906         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2907         break;
2908     case 0x3F:
2909         /* BGT */
2910         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2911         break;
2912     invalid_opc:
2913         ret = gen_invalid(ctx);
2914         break;
2915     }
2916 
2917     return ret;
2918 }
2919 
2920 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2921 {
2922     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2923     CPUAlphaState *env = cpu->env_ptr;
2924     int64_t bound, mask;
2925 
2926     ctx->tbflags = ctx->base.tb->flags;
2927     ctx->mem_idx = cpu_mmu_index(env, false);
2928     ctx->implver = env->implver;
2929     ctx->amask = env->amask;
2930 
2931 #ifdef CONFIG_USER_ONLY
2932     ctx->ir = cpu_std_ir;
2933 #else
2934     ctx->palbr = env->palbr;
2935     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2936 #endif
2937 
2938     /* ??? Every TB begins with unset rounding mode, to be initialized on
2939        the first fp insn of the TB.  Alternately we could define a proper
2940        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2941        to reset the FP_STATUS to that default at the end of any TB that
2942        changes the default.  We could even (gasp) dynamiclly figure out
2943        what default would be most efficient given the running program.  */
2944     ctx->tb_rm = -1;
2945     /* Similarly for flush-to-zero.  */
2946     ctx->tb_ftz = -1;
2947 
2948     ctx->zero = NULL;
2949     ctx->sink = NULL;
2950     ctx->lit = NULL;
2951 
2952     /* Bound the number of insns to execute to those left on the page.  */
2953     if (in_superpage(ctx, ctx->base.pc_first)) {
2954         mask = -1ULL << 41;
2955     } else {
2956         mask = TARGET_PAGE_MASK;
2957     }
2958     bound = -(ctx->base.pc_first | mask) / 4;
2959     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2960 }
2961 
2962 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2963 {
2964 }
2965 
2966 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2967 {
2968     tcg_gen_insn_start(dcbase->pc_next);
2969 }
2970 
2971 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2972                                       const CPUBreakpoint *bp)
2973 {
2974     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2975 
2976     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2977 
2978     /* The address covered by the breakpoint must be included in
2979        [tb->pc, tb->pc + tb->size) in order to for it to be
2980        properly cleared -- thus we increment the PC here so that
2981        the logic setting tb->size below does the right thing.  */
2982     ctx->base.pc_next += 4;
2983     return true;
2984 }
2985 
2986 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2987 {
2988     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2989     CPUAlphaState *env = cpu->env_ptr;
2990     uint32_t insn = translator_ldl(env, ctx->base.pc_next);
2991 
2992     ctx->base.pc_next += 4;
2993     ctx->base.is_jmp = translate_one(ctx, insn);
2994 
2995     free_context_temps(ctx);
2996     translator_loop_temp_check(&ctx->base);
2997 }
2998 
2999 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3000 {
3001     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3002 
3003     switch (ctx->base.is_jmp) {
3004     case DISAS_NORETURN:
3005         break;
3006     case DISAS_TOO_MANY:
3007         if (use_goto_tb(ctx, ctx->base.pc_next)) {
3008             tcg_gen_goto_tb(0);
3009             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3010             tcg_gen_exit_tb(ctx->base.tb, 0);
3011         }
3012         /* FALLTHRU */
3013     case DISAS_PC_STALE:
3014         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3015         /* FALLTHRU */
3016     case DISAS_PC_UPDATED:
3017         if (!use_exit_tb(ctx)) {
3018             tcg_gen_lookup_and_goto_ptr();
3019             break;
3020         }
3021         /* FALLTHRU */
3022     case DISAS_PC_UPDATED_NOCHAIN:
3023         if (ctx->base.singlestep_enabled) {
3024             gen_excp_1(EXCP_DEBUG, 0);
3025         } else {
3026             tcg_gen_exit_tb(NULL, 0);
3027         }
3028         break;
3029     default:
3030         g_assert_not_reached();
3031     }
3032 }
3033 
3034 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3035 {
3036     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3037     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3038 }
3039 
3040 static const TranslatorOps alpha_tr_ops = {
3041     .init_disas_context = alpha_tr_init_disas_context,
3042     .tb_start           = alpha_tr_tb_start,
3043     .insn_start         = alpha_tr_insn_start,
3044     .breakpoint_check   = alpha_tr_breakpoint_check,
3045     .translate_insn     = alpha_tr_translate_insn,
3046     .tb_stop            = alpha_tr_tb_stop,
3047     .disas_log          = alpha_tr_disas_log,
3048 };
3049 
3050 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
3051 {
3052     DisasContext dc;
3053     translator_loop(&alpha_tr_ops, &dc.base, cpu, tb, max_insns);
3054 }
3055 
3056 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3057                           target_ulong *data)
3058 {
3059     env->pc = data[0];
3060 }
3061