xref: /openbmc/qemu/target/alpha/translate.c (revision 438c78da)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/translator.h"
32 #include "exec/log.h"
33 
34 
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
37 
38 #ifdef ALPHA_DEBUG_DISAS
39 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 #else
41 #  define LOG_DISAS(...) do { } while (0)
42 #endif
43 
44 typedef struct DisasContext DisasContext;
45 struct DisasContext {
46     DisasContextBase base;
47 
48 #ifndef CONFIG_USER_ONLY
49     uint64_t palbr;
50 #endif
51     uint32_t tbflags;
52     int mem_idx;
53 
54     /* implver and amask values for this CPU.  */
55     int implver;
56     int amask;
57 
58     /* Current rounding mode for this TB.  */
59     int tb_rm;
60     /* Current flush-to-zero setting for this TB.  */
61     int tb_ftz;
62 
63     /* The set of registers active in the current context.  */
64     TCGv *ir;
65 
66     /* Temporaries for $31 and $f31 as source and destination.  */
67     TCGv zero;
68     TCGv sink;
69     /* Temporary for immediate constants.  */
70     TCGv lit;
71 };
72 
73 /* Target-specific return values from translate_one, indicating the
74    state of the TB.  Note that DISAS_NEXT indicates that we are not
75    exiting the TB.  */
76 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
77 #define DISAS_PC_UPDATED          DISAS_TARGET_1
78 #define DISAS_PC_STALE            DISAS_TARGET_2
79 
80 /* global register indexes */
81 static TCGv cpu_std_ir[31];
82 static TCGv cpu_fir[31];
83 static TCGv cpu_pc;
84 static TCGv cpu_lock_addr;
85 static TCGv cpu_lock_value;
86 
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_pal_ir[31];
89 #endif
90 
91 #include "exec/gen-icount.h"
92 
93 void alpha_translate_init(void)
94 {
95 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
96 
97     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
98     static const GlobalVar vars[] = {
99         DEF_VAR(pc),
100         DEF_VAR(lock_addr),
101         DEF_VAR(lock_value),
102     };
103 
104 #undef DEF_VAR
105 
106     /* Use the symbolic register names that match the disassembler.  */
107     static const char greg_names[31][4] = {
108         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
109         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
110         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
111         "t10", "t11", "ra", "t12", "at", "gp", "sp"
112     };
113     static const char freg_names[31][4] = {
114         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
115         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
116         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
117         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
118     };
119 #ifndef CONFIG_USER_ONLY
120     static const char shadow_names[8][8] = {
121         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
122         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
123     };
124 #endif
125 
126     int i;
127 
128     for (i = 0; i < 31; i++) {
129         cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
130                                                offsetof(CPUAlphaState, ir[i]),
131                                                greg_names[i]);
132     }
133 
134     for (i = 0; i < 31; i++) {
135         cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
136                                             offsetof(CPUAlphaState, fir[i]),
137                                             freg_names[i]);
138     }
139 
140 #ifndef CONFIG_USER_ONLY
141     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
142     for (i = 0; i < 8; i++) {
143         int r = (i == 7 ? 25 : i + 8);
144         cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
145                                                offsetof(CPUAlphaState,
146                                                         shadow[i]),
147                                                shadow_names[i]);
148     }
149 #endif
150 
151     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
152         const GlobalVar *v = &vars[i];
153         *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
154     }
155 }
156 
157 static TCGv load_zero(DisasContext *ctx)
158 {
159     if (!ctx->zero) {
160         ctx->zero = tcg_const_i64(0);
161     }
162     return ctx->zero;
163 }
164 
165 static TCGv dest_sink(DisasContext *ctx)
166 {
167     if (!ctx->sink) {
168         ctx->sink = tcg_temp_new();
169     }
170     return ctx->sink;
171 }
172 
173 static void free_context_temps(DisasContext *ctx)
174 {
175     if (ctx->sink) {
176         tcg_gen_discard_i64(ctx->sink);
177         tcg_temp_free(ctx->sink);
178         ctx->sink = NULL;
179     }
180     if (ctx->zero) {
181         tcg_temp_free(ctx->zero);
182         ctx->zero = NULL;
183     }
184     if (ctx->lit) {
185         tcg_temp_free(ctx->lit);
186         ctx->lit = NULL;
187     }
188 }
189 
190 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
191 {
192     if (likely(reg < 31)) {
193         return ctx->ir[reg];
194     } else {
195         return load_zero(ctx);
196     }
197 }
198 
199 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
200                          uint8_t lit, bool islit)
201 {
202     if (islit) {
203         ctx->lit = tcg_const_i64(lit);
204         return ctx->lit;
205     } else if (likely(reg < 31)) {
206         return ctx->ir[reg];
207     } else {
208         return load_zero(ctx);
209     }
210 }
211 
212 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
213 {
214     if (likely(reg < 31)) {
215         return ctx->ir[reg];
216     } else {
217         return dest_sink(ctx);
218     }
219 }
220 
221 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
222 {
223     if (likely(reg < 31)) {
224         return cpu_fir[reg];
225     } else {
226         return load_zero(ctx);
227     }
228 }
229 
230 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
231 {
232     if (likely(reg < 31)) {
233         return cpu_fir[reg];
234     } else {
235         return dest_sink(ctx);
236     }
237 }
238 
239 static int get_flag_ofs(unsigned shift)
240 {
241     int ofs = offsetof(CPUAlphaState, flags);
242 #ifdef HOST_WORDS_BIGENDIAN
243     ofs += 3 - (shift / 8);
244 #else
245     ofs += shift / 8;
246 #endif
247     return ofs;
248 }
249 
250 static void ld_flag_byte(TCGv val, unsigned shift)
251 {
252     tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift));
253 }
254 
255 static void st_flag_byte(TCGv val, unsigned shift)
256 {
257     tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift));
258 }
259 
260 static void gen_excp_1(int exception, int error_code)
261 {
262     TCGv_i32 tmp1, tmp2;
263 
264     tmp1 = tcg_const_i32(exception);
265     tmp2 = tcg_const_i32(error_code);
266     gen_helper_excp(cpu_env, tmp1, tmp2);
267     tcg_temp_free_i32(tmp2);
268     tcg_temp_free_i32(tmp1);
269 }
270 
271 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
272 {
273     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
274     gen_excp_1(exception, error_code);
275     return DISAS_NORETURN;
276 }
277 
278 static inline DisasJumpType gen_invalid(DisasContext *ctx)
279 {
280     return gen_excp(ctx, EXCP_OPCDEC, 0);
281 }
282 
283 static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
284 {
285     TCGv_i32 tmp32 = tcg_temp_new_i32();
286     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
287     gen_helper_memory_to_f(t0, tmp32);
288     tcg_temp_free_i32(tmp32);
289 }
290 
291 static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
292 {
293     TCGv tmp = tcg_temp_new();
294     tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
295     gen_helper_memory_to_g(t0, tmp);
296     tcg_temp_free(tmp);
297 }
298 
299 static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
300 {
301     TCGv_i32 tmp32 = tcg_temp_new_i32();
302     tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
303     gen_helper_memory_to_s(t0, tmp32);
304     tcg_temp_free_i32(tmp32);
305 }
306 
307 static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
308 {
309     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
310     tcg_gen_mov_i64(cpu_lock_addr, t1);
311     tcg_gen_mov_i64(cpu_lock_value, t0);
312 }
313 
314 static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
315 {
316     tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
317     tcg_gen_mov_i64(cpu_lock_addr, t1);
318     tcg_gen_mov_i64(cpu_lock_value, t0);
319 }
320 
321 static inline void gen_load_mem(DisasContext *ctx,
322                                 void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
323                                                           int flags),
324                                 int ra, int rb, int32_t disp16, bool fp,
325                                 bool clear)
326 {
327     TCGv tmp, addr, va;
328 
329     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
330        prefetches, which we can treat as nops.  No worries about
331        missed exceptions here.  */
332     if (unlikely(ra == 31)) {
333         return;
334     }
335 
336     tmp = tcg_temp_new();
337     addr = load_gpr(ctx, rb);
338 
339     if (disp16) {
340         tcg_gen_addi_i64(tmp, addr, disp16);
341         addr = tmp;
342     }
343     if (clear) {
344         tcg_gen_andi_i64(tmp, addr, ~0x7);
345         addr = tmp;
346     }
347 
348     va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
349     tcg_gen_qemu_load(va, addr, ctx->mem_idx);
350 
351     tcg_temp_free(tmp);
352 }
353 
354 static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
355 {
356     TCGv_i32 tmp32 = tcg_temp_new_i32();
357     gen_helper_f_to_memory(tmp32, t0);
358     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
359     tcg_temp_free_i32(tmp32);
360 }
361 
362 static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
363 {
364     TCGv tmp = tcg_temp_new();
365     gen_helper_g_to_memory(tmp, t0);
366     tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
367     tcg_temp_free(tmp);
368 }
369 
370 static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
371 {
372     TCGv_i32 tmp32 = tcg_temp_new_i32();
373     gen_helper_s_to_memory(tmp32, t0);
374     tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
375     tcg_temp_free_i32(tmp32);
376 }
377 
378 static inline void gen_store_mem(DisasContext *ctx,
379                                  void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
380                                                             int flags),
381                                  int ra, int rb, int32_t disp16, bool fp,
382                                  bool clear)
383 {
384     TCGv tmp, addr, va;
385 
386     tmp = tcg_temp_new();
387     addr = load_gpr(ctx, rb);
388 
389     if (disp16) {
390         tcg_gen_addi_i64(tmp, addr, disp16);
391         addr = tmp;
392     }
393     if (clear) {
394         tcg_gen_andi_i64(tmp, addr, ~0x7);
395         addr = tmp;
396     }
397 
398     va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
399     tcg_gen_qemu_store(va, addr, ctx->mem_idx);
400 
401     tcg_temp_free(tmp);
402 }
403 
404 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
405                                            int32_t disp16, int mem_idx,
406                                            TCGMemOp op)
407 {
408     TCGLabel *lab_fail, *lab_done;
409     TCGv addr, val;
410 
411     addr = tcg_temp_new_i64();
412     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
413     free_context_temps(ctx);
414 
415     lab_fail = gen_new_label();
416     lab_done = gen_new_label();
417     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
418     tcg_temp_free_i64(addr);
419 
420     val = tcg_temp_new_i64();
421     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
422                                load_gpr(ctx, ra), mem_idx, op);
423     free_context_temps(ctx);
424 
425     if (ra != 31) {
426         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
427     }
428     tcg_temp_free_i64(val);
429     tcg_gen_br(lab_done);
430 
431     gen_set_label(lab_fail);
432     if (ra != 31) {
433         tcg_gen_movi_i64(ctx->ir[ra], 0);
434     }
435 
436     gen_set_label(lab_done);
437     tcg_gen_movi_i64(cpu_lock_addr, -1);
438     return DISAS_NEXT;
439 }
440 
441 static bool in_superpage(DisasContext *ctx, int64_t addr)
442 {
443 #ifndef CONFIG_USER_ONLY
444     return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0
445             && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
446             && ((addr >> 41) & 3) == 2);
447 #else
448     return false;
449 #endif
450 }
451 
452 static bool use_exit_tb(DisasContext *ctx)
453 {
454     return ((tb_cflags(ctx->base.tb) & CF_LAST_IO)
455             || ctx->base.singlestep_enabled
456             || singlestep);
457 }
458 
459 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
460 {
461     /* Suppress goto_tb in the case of single-steping and IO.  */
462     if (unlikely(use_exit_tb(ctx))) {
463         return false;
464     }
465 #ifndef CONFIG_USER_ONLY
466     /* If the destination is in the superpage, the page perms can't change.  */
467     if (in_superpage(ctx, dest)) {
468         return true;
469     }
470     /* Check for the dest on the same page as the start of the TB.  */
471     return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
472 #else
473     return true;
474 #endif
475 }
476 
477 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
478 {
479     uint64_t dest = ctx->base.pc_next + (disp << 2);
480 
481     if (ra != 31) {
482         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
483     }
484 
485     /* Notice branch-to-next; used to initialize RA with the PC.  */
486     if (disp == 0) {
487         return 0;
488     } else if (use_goto_tb(ctx, dest)) {
489         tcg_gen_goto_tb(0);
490         tcg_gen_movi_i64(cpu_pc, dest);
491         tcg_gen_exit_tb(ctx->base.tb, 0);
492         return DISAS_NORETURN;
493     } else {
494         tcg_gen_movi_i64(cpu_pc, dest);
495         return DISAS_PC_UPDATED;
496     }
497 }
498 
499 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
500                                         TCGv cmp, int32_t disp)
501 {
502     uint64_t dest = ctx->base.pc_next + (disp << 2);
503     TCGLabel *lab_true = gen_new_label();
504 
505     if (use_goto_tb(ctx, dest)) {
506         tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
507 
508         tcg_gen_goto_tb(0);
509         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
510         tcg_gen_exit_tb(ctx->base.tb, 0);
511 
512         gen_set_label(lab_true);
513         tcg_gen_goto_tb(1);
514         tcg_gen_movi_i64(cpu_pc, dest);
515         tcg_gen_exit_tb(ctx->base.tb, 1);
516 
517         return DISAS_NORETURN;
518     } else {
519         TCGv_i64 z = tcg_const_i64(0);
520         TCGv_i64 d = tcg_const_i64(dest);
521         TCGv_i64 p = tcg_const_i64(ctx->base.pc_next);
522 
523         tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
524 
525         tcg_temp_free_i64(z);
526         tcg_temp_free_i64(d);
527         tcg_temp_free_i64(p);
528         return DISAS_PC_UPDATED;
529     }
530 }
531 
532 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
533                                int32_t disp, int mask)
534 {
535     if (mask) {
536         TCGv tmp = tcg_temp_new();
537         DisasJumpType ret;
538 
539         tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
540         ret = gen_bcond_internal(ctx, cond, tmp, disp);
541         tcg_temp_free(tmp);
542         return ret;
543     }
544     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
545 }
546 
547 /* Fold -0.0 for comparison with COND.  */
548 
549 static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
550 {
551     uint64_t mzero = 1ull << 63;
552 
553     switch (cond) {
554     case TCG_COND_LE:
555     case TCG_COND_GT:
556         /* For <= or >, the -0.0 value directly compares the way we want.  */
557         tcg_gen_mov_i64(dest, src);
558         break;
559 
560     case TCG_COND_EQ:
561     case TCG_COND_NE:
562         /* For == or !=, we can simply mask off the sign bit and compare.  */
563         tcg_gen_andi_i64(dest, src, mzero - 1);
564         break;
565 
566     case TCG_COND_GE:
567     case TCG_COND_LT:
568         /* For >= or <, map -0.0 to +0.0 via comparison and mask.  */
569         tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
570         tcg_gen_neg_i64(dest, dest);
571         tcg_gen_and_i64(dest, dest, src);
572         break;
573 
574     default:
575         abort();
576     }
577 }
578 
579 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
580                                 int32_t disp)
581 {
582     TCGv cmp_tmp = tcg_temp_new();
583     DisasJumpType ret;
584 
585     gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
586     ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
587     tcg_temp_free(cmp_tmp);
588     return ret;
589 }
590 
591 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
592 {
593     TCGv_i64 va, vb, z;
594 
595     z = load_zero(ctx);
596     vb = load_fpr(ctx, rb);
597     va = tcg_temp_new();
598     gen_fold_mzero(cond, va, load_fpr(ctx, ra));
599 
600     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
601 
602     tcg_temp_free(va);
603 }
604 
605 #define QUAL_RM_N       0x080   /* Round mode nearest even */
606 #define QUAL_RM_C       0x000   /* Round mode chopped */
607 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
608 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
609 #define QUAL_RM_MASK    0x0c0
610 
611 #define QUAL_U          0x100   /* Underflow enable (fp output) */
612 #define QUAL_V          0x100   /* Overflow enable (int output) */
613 #define QUAL_S          0x400   /* Software completion enable */
614 #define QUAL_I          0x200   /* Inexact detection enable */
615 
616 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
617 {
618     TCGv_i32 tmp;
619 
620     fn11 &= QUAL_RM_MASK;
621     if (fn11 == ctx->tb_rm) {
622         return;
623     }
624     ctx->tb_rm = fn11;
625 
626     tmp = tcg_temp_new_i32();
627     switch (fn11) {
628     case QUAL_RM_N:
629         tcg_gen_movi_i32(tmp, float_round_nearest_even);
630         break;
631     case QUAL_RM_C:
632         tcg_gen_movi_i32(tmp, float_round_to_zero);
633         break;
634     case QUAL_RM_M:
635         tcg_gen_movi_i32(tmp, float_round_down);
636         break;
637     case QUAL_RM_D:
638         tcg_gen_ld8u_i32(tmp, cpu_env,
639                          offsetof(CPUAlphaState, fpcr_dyn_round));
640         break;
641     }
642 
643 #if defined(CONFIG_SOFTFLOAT_INLINE)
644     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
645        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
646        sets the one field.  */
647     tcg_gen_st8_i32(tmp, cpu_env,
648                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
649 #else
650     gen_helper_setroundmode(tmp);
651 #endif
652 
653     tcg_temp_free_i32(tmp);
654 }
655 
656 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
657 {
658     TCGv_i32 tmp;
659 
660     fn11 &= QUAL_U;
661     if (fn11 == ctx->tb_ftz) {
662         return;
663     }
664     ctx->tb_ftz = fn11;
665 
666     tmp = tcg_temp_new_i32();
667     if (fn11) {
668         /* Underflow is enabled, use the FPCR setting.  */
669         tcg_gen_ld8u_i32(tmp, cpu_env,
670                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
671     } else {
672         /* Underflow is disabled, force flush-to-zero.  */
673         tcg_gen_movi_i32(tmp, 1);
674     }
675 
676 #if defined(CONFIG_SOFTFLOAT_INLINE)
677     tcg_gen_st8_i32(tmp, cpu_env,
678                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
679 #else
680     gen_helper_setflushzero(tmp);
681 #endif
682 
683     tcg_temp_free_i32(tmp);
684 }
685 
686 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
687 {
688     TCGv val;
689 
690     if (unlikely(reg == 31)) {
691         val = load_zero(ctx);
692     } else {
693         val = cpu_fir[reg];
694         if ((fn11 & QUAL_S) == 0) {
695             if (is_cmp) {
696                 gen_helper_ieee_input_cmp(cpu_env, val);
697             } else {
698                 gen_helper_ieee_input(cpu_env, val);
699             }
700         } else {
701 #ifndef CONFIG_USER_ONLY
702             /* In system mode, raise exceptions for denormals like real
703                hardware.  In user mode, proceed as if the OS completion
704                handler is handling the denormal as per spec.  */
705             gen_helper_ieee_input_s(cpu_env, val);
706 #endif
707         }
708     }
709     return val;
710 }
711 
712 static void gen_fp_exc_raise(int rc, int fn11)
713 {
714     /* ??? We ought to be able to do something with imprecise exceptions.
715        E.g. notice we're still in the trap shadow of something within the
716        TB and do not generate the code to signal the exception; end the TB
717        when an exception is forced to arrive, either by consumption of a
718        register value or TRAPB or EXCB.  */
719     TCGv_i32 reg, ign;
720     uint32_t ignore = 0;
721 
722     if (!(fn11 & QUAL_U)) {
723         /* Note that QUAL_U == QUAL_V, so ignore either.  */
724         ignore |= FPCR_UNF | FPCR_IOV;
725     }
726     if (!(fn11 & QUAL_I)) {
727         ignore |= FPCR_INE;
728     }
729     ign = tcg_const_i32(ignore);
730 
731     /* ??? Pass in the regno of the destination so that the helper can
732        set EXC_MASK, which contains a bitmask of destination registers
733        that have caused arithmetic traps.  A simple userspace emulation
734        does not require this.  We do need it for a guest kernel's entArith,
735        or if we were to do something clever with imprecise exceptions.  */
736     reg = tcg_const_i32(rc + 32);
737     if (fn11 & QUAL_S) {
738         gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
739     } else {
740         gen_helper_fp_exc_raise(cpu_env, ign, reg);
741     }
742 
743     tcg_temp_free_i32(reg);
744     tcg_temp_free_i32(ign);
745 }
746 
747 static void gen_cvtlq(TCGv vc, TCGv vb)
748 {
749     TCGv tmp = tcg_temp_new();
750 
751     /* The arithmetic right shift here, plus the sign-extended mask below
752        yields a sign-extended result without an explicit ext32s_i64.  */
753     tcg_gen_shri_i64(tmp, vb, 29);
754     tcg_gen_sari_i64(vc, vb, 32);
755     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
756 
757     tcg_temp_free(tmp);
758 }
759 
760 static void gen_ieee_arith2(DisasContext *ctx,
761                             void (*helper)(TCGv, TCGv_ptr, TCGv),
762                             int rb, int rc, int fn11)
763 {
764     TCGv vb;
765 
766     gen_qual_roundmode(ctx, fn11);
767     gen_qual_flushzero(ctx, fn11);
768 
769     vb = gen_ieee_input(ctx, rb, fn11, 0);
770     helper(dest_fpr(ctx, rc), cpu_env, vb);
771 
772     gen_fp_exc_raise(rc, fn11);
773 }
774 
775 #define IEEE_ARITH2(name)                                       \
776 static inline void glue(gen_, name)(DisasContext *ctx,          \
777                                     int rb, int rc, int fn11)   \
778 {                                                               \
779     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
780 }
781 IEEE_ARITH2(sqrts)
782 IEEE_ARITH2(sqrtt)
783 IEEE_ARITH2(cvtst)
784 IEEE_ARITH2(cvtts)
785 
786 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
787 {
788     TCGv vb, vc;
789 
790     /* No need to set flushzero, since we have an integer output.  */
791     vb = gen_ieee_input(ctx, rb, fn11, 0);
792     vc = dest_fpr(ctx, rc);
793 
794     /* Almost all integer conversions use cropped rounding;
795        special case that.  */
796     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
797         gen_helper_cvttq_c(vc, cpu_env, vb);
798     } else {
799         gen_qual_roundmode(ctx, fn11);
800         gen_helper_cvttq(vc, cpu_env, vb);
801     }
802     gen_fp_exc_raise(rc, fn11);
803 }
804 
805 static void gen_ieee_intcvt(DisasContext *ctx,
806                             void (*helper)(TCGv, TCGv_ptr, TCGv),
807 			    int rb, int rc, int fn11)
808 {
809     TCGv vb, vc;
810 
811     gen_qual_roundmode(ctx, fn11);
812     vb = load_fpr(ctx, rb);
813     vc = dest_fpr(ctx, rc);
814 
815     /* The only exception that can be raised by integer conversion
816        is inexact.  Thus we only need to worry about exceptions when
817        inexact handling is requested.  */
818     if (fn11 & QUAL_I) {
819         helper(vc, cpu_env, vb);
820         gen_fp_exc_raise(rc, fn11);
821     } else {
822         helper(vc, cpu_env, vb);
823     }
824 }
825 
826 #define IEEE_INTCVT(name)                                       \
827 static inline void glue(gen_, name)(DisasContext *ctx,          \
828                                     int rb, int rc, int fn11)   \
829 {                                                               \
830     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
831 }
832 IEEE_INTCVT(cvtqs)
833 IEEE_INTCVT(cvtqt)
834 
835 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
836 {
837     TCGv vmask = tcg_const_i64(mask);
838     TCGv tmp = tcg_temp_new_i64();
839 
840     if (inv_a) {
841         tcg_gen_andc_i64(tmp, vmask, va);
842     } else {
843         tcg_gen_and_i64(tmp, va, vmask);
844     }
845 
846     tcg_gen_andc_i64(vc, vb, vmask);
847     tcg_gen_or_i64(vc, vc, tmp);
848 
849     tcg_temp_free(vmask);
850     tcg_temp_free(tmp);
851 }
852 
853 static void gen_ieee_arith3(DisasContext *ctx,
854                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
855                             int ra, int rb, int rc, int fn11)
856 {
857     TCGv va, vb, vc;
858 
859     gen_qual_roundmode(ctx, fn11);
860     gen_qual_flushzero(ctx, fn11);
861 
862     va = gen_ieee_input(ctx, ra, fn11, 0);
863     vb = gen_ieee_input(ctx, rb, fn11, 0);
864     vc = dest_fpr(ctx, rc);
865     helper(vc, cpu_env, va, vb);
866 
867     gen_fp_exc_raise(rc, fn11);
868 }
869 
870 #define IEEE_ARITH3(name)                                               \
871 static inline void glue(gen_, name)(DisasContext *ctx,                  \
872                                     int ra, int rb, int rc, int fn11)   \
873 {                                                                       \
874     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
875 }
876 IEEE_ARITH3(adds)
877 IEEE_ARITH3(subs)
878 IEEE_ARITH3(muls)
879 IEEE_ARITH3(divs)
880 IEEE_ARITH3(addt)
881 IEEE_ARITH3(subt)
882 IEEE_ARITH3(mult)
883 IEEE_ARITH3(divt)
884 
885 static void gen_ieee_compare(DisasContext *ctx,
886                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
887                              int ra, int rb, int rc, int fn11)
888 {
889     TCGv va, vb, vc;
890 
891     va = gen_ieee_input(ctx, ra, fn11, 1);
892     vb = gen_ieee_input(ctx, rb, fn11, 1);
893     vc = dest_fpr(ctx, rc);
894     helper(vc, cpu_env, va, vb);
895 
896     gen_fp_exc_raise(rc, fn11);
897 }
898 
899 #define IEEE_CMP3(name)                                                 \
900 static inline void glue(gen_, name)(DisasContext *ctx,                  \
901                                     int ra, int rb, int rc, int fn11)   \
902 {                                                                       \
903     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
904 }
905 IEEE_CMP3(cmptun)
906 IEEE_CMP3(cmpteq)
907 IEEE_CMP3(cmptlt)
908 IEEE_CMP3(cmptle)
909 
910 static inline uint64_t zapnot_mask(uint8_t lit)
911 {
912     uint64_t mask = 0;
913     int i;
914 
915     for (i = 0; i < 8; ++i) {
916         if ((lit >> i) & 1) {
917             mask |= 0xffull << (i * 8);
918         }
919     }
920     return mask;
921 }
922 
923 /* Implement zapnot with an immediate operand, which expands to some
924    form of immediate AND.  This is a basic building block in the
925    definition of many of the other byte manipulation instructions.  */
926 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
927 {
928     switch (lit) {
929     case 0x00:
930         tcg_gen_movi_i64(dest, 0);
931         break;
932     case 0x01:
933         tcg_gen_ext8u_i64(dest, src);
934         break;
935     case 0x03:
936         tcg_gen_ext16u_i64(dest, src);
937         break;
938     case 0x0f:
939         tcg_gen_ext32u_i64(dest, src);
940         break;
941     case 0xff:
942         tcg_gen_mov_i64(dest, src);
943         break;
944     default:
945         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
946         break;
947     }
948 }
949 
950 /* EXTWH, EXTLH, EXTQH */
951 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
952                       uint8_t lit, uint8_t byte_mask)
953 {
954     if (islit) {
955         int pos = (64 - lit * 8) & 0x3f;
956         int len = cto32(byte_mask) * 8;
957         if (pos < len) {
958             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
959         } else {
960             tcg_gen_movi_i64(vc, 0);
961         }
962     } else {
963         TCGv tmp = tcg_temp_new();
964         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
965         tcg_gen_neg_i64(tmp, tmp);
966         tcg_gen_andi_i64(tmp, tmp, 0x3f);
967         tcg_gen_shl_i64(vc, va, tmp);
968         tcg_temp_free(tmp);
969     }
970     gen_zapnoti(vc, vc, byte_mask);
971 }
972 
973 /* EXTBL, EXTWL, EXTLL, EXTQL */
974 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
975                       uint8_t lit, uint8_t byte_mask)
976 {
977     if (islit) {
978         int pos = (lit & 7) * 8;
979         int len = cto32(byte_mask) * 8;
980         if (pos + len >= 64) {
981             len = 64 - pos;
982         }
983         tcg_gen_extract_i64(vc, va, pos, len);
984     } else {
985         TCGv tmp = tcg_temp_new();
986         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
987         tcg_gen_shli_i64(tmp, tmp, 3);
988         tcg_gen_shr_i64(vc, va, tmp);
989         tcg_temp_free(tmp);
990         gen_zapnoti(vc, vc, byte_mask);
991     }
992 }
993 
994 /* INSWH, INSLH, INSQH */
995 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
996                       uint8_t lit, uint8_t byte_mask)
997 {
998     if (islit) {
999         int pos = 64 - (lit & 7) * 8;
1000         int len = cto32(byte_mask) * 8;
1001         if (pos < len) {
1002             tcg_gen_extract_i64(vc, va, pos, len - pos);
1003         } else {
1004             tcg_gen_movi_i64(vc, 0);
1005         }
1006     } else {
1007         TCGv tmp = tcg_temp_new();
1008         TCGv shift = tcg_temp_new();
1009 
1010         /* The instruction description has us left-shift the byte mask
1011            and extract bits <15:8> and apply that zap at the end.  This
1012            is equivalent to simply performing the zap first and shifting
1013            afterward.  */
1014         gen_zapnoti(tmp, va, byte_mask);
1015 
1016         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
1017            portably by splitting the shift into two parts: shift_count-1 and 1.
1018            Arrange for the -1 by using ones-complement instead of
1019            twos-complement in the negation: ~(B * 8) & 63.  */
1020 
1021         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1022         tcg_gen_not_i64(shift, shift);
1023         tcg_gen_andi_i64(shift, shift, 0x3f);
1024 
1025         tcg_gen_shr_i64(vc, tmp, shift);
1026         tcg_gen_shri_i64(vc, vc, 1);
1027         tcg_temp_free(shift);
1028         tcg_temp_free(tmp);
1029     }
1030 }
1031 
1032 /* INSBL, INSWL, INSLL, INSQL */
1033 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1034                       uint8_t lit, uint8_t byte_mask)
1035 {
1036     if (islit) {
1037         int pos = (lit & 7) * 8;
1038         int len = cto32(byte_mask) * 8;
1039         if (pos + len > 64) {
1040             len = 64 - pos;
1041         }
1042         tcg_gen_deposit_z_i64(vc, va, pos, len);
1043     } else {
1044         TCGv tmp = tcg_temp_new();
1045         TCGv shift = tcg_temp_new();
1046 
1047         /* The instruction description has us left-shift the byte mask
1048            and extract bits <15:8> and apply that zap at the end.  This
1049            is equivalent to simply performing the zap first and shifting
1050            afterward.  */
1051         gen_zapnoti(tmp, va, byte_mask);
1052 
1053         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1054         tcg_gen_shli_i64(shift, shift, 3);
1055         tcg_gen_shl_i64(vc, tmp, shift);
1056         tcg_temp_free(shift);
1057         tcg_temp_free(tmp);
1058     }
1059 }
1060 
1061 /* MSKWH, MSKLH, MSKQH */
1062 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1063                       uint8_t lit, uint8_t byte_mask)
1064 {
1065     if (islit) {
1066         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
1067     } else {
1068         TCGv shift = tcg_temp_new();
1069         TCGv mask = tcg_temp_new();
1070 
1071         /* The instruction description is as above, where the byte_mask
1072            is shifted left, and then we extract bits <15:8>.  This can be
1073            emulated with a right-shift on the expanded byte mask.  This
1074            requires extra care because for an input <2:0> == 0 we need a
1075            shift of 64 bits in order to generate a zero.  This is done by
1076            splitting the shift into two parts, the variable shift - 1
1077            followed by a constant 1 shift.  The code we expand below is
1078            equivalent to ~(B * 8) & 63.  */
1079 
1080         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1081         tcg_gen_not_i64(shift, shift);
1082         tcg_gen_andi_i64(shift, shift, 0x3f);
1083         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1084         tcg_gen_shr_i64(mask, mask, shift);
1085         tcg_gen_shri_i64(mask, mask, 1);
1086 
1087         tcg_gen_andc_i64(vc, va, mask);
1088 
1089         tcg_temp_free(mask);
1090         tcg_temp_free(shift);
1091     }
1092 }
1093 
1094 /* MSKBL, MSKWL, MSKLL, MSKQL */
1095 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1096                       uint8_t lit, uint8_t byte_mask)
1097 {
1098     if (islit) {
1099         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1100     } else {
1101         TCGv shift = tcg_temp_new();
1102         TCGv mask = tcg_temp_new();
1103 
1104         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1105         tcg_gen_shli_i64(shift, shift, 3);
1106         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1107         tcg_gen_shl_i64(mask, mask, shift);
1108 
1109         tcg_gen_andc_i64(vc, va, mask);
1110 
1111         tcg_temp_free(mask);
1112         tcg_temp_free(shift);
1113     }
1114 }
1115 
1116 static void gen_rx(DisasContext *ctx, int ra, int set)
1117 {
1118     TCGv tmp;
1119 
1120     if (ra != 31) {
1121         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1122     }
1123 
1124     tmp = tcg_const_i64(set);
1125     st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1126     tcg_temp_free(tmp);
1127 }
1128 
1129 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1130 {
1131     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1132        to internal cpu registers.  */
1133 
1134     /* Unprivileged PAL call */
1135     if (palcode >= 0x80 && palcode < 0xC0) {
1136         switch (palcode) {
1137         case 0x86:
1138             /* IMB */
1139             /* No-op inside QEMU.  */
1140             break;
1141         case 0x9E:
1142             /* RDUNIQUE */
1143             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1144                            offsetof(CPUAlphaState, unique));
1145             break;
1146         case 0x9F:
1147             /* WRUNIQUE */
1148             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1149                            offsetof(CPUAlphaState, unique));
1150             break;
1151         default:
1152             palcode &= 0xbf;
1153             goto do_call_pal;
1154         }
1155         return DISAS_NEXT;
1156     }
1157 
1158 #ifndef CONFIG_USER_ONLY
1159     /* Privileged PAL code */
1160     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1161         switch (palcode) {
1162         case 0x01:
1163             /* CFLUSH */
1164             /* No-op inside QEMU.  */
1165             break;
1166         case 0x02:
1167             /* DRAINA */
1168             /* No-op inside QEMU.  */
1169             break;
1170         case 0x2D:
1171             /* WRVPTPTR */
1172             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1173                            offsetof(CPUAlphaState, vptptr));
1174             break;
1175         case 0x31:
1176             /* WRVAL */
1177             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1178                            offsetof(CPUAlphaState, sysval));
1179             break;
1180         case 0x32:
1181             /* RDVAL */
1182             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1183                            offsetof(CPUAlphaState, sysval));
1184             break;
1185 
1186         case 0x35:
1187             /* SWPIPL */
1188             /* Note that we already know we're in kernel mode, so we know
1189                that PS only contains the 3 IPL bits.  */
1190             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1191 
1192             /* But make sure and store only the 3 IPL bits from the user.  */
1193             {
1194                 TCGv tmp = tcg_temp_new();
1195                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1196                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1197                 tcg_temp_free(tmp);
1198             }
1199 
1200             /* Allow interrupts to be recognized right away.  */
1201             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1202             return DISAS_PC_UPDATED_NOCHAIN;
1203 
1204         case 0x36:
1205             /* RDPS */
1206             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1207             break;
1208 
1209         case 0x38:
1210             /* WRUSP */
1211             tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
1212                            offsetof(CPUAlphaState, usp));
1213             break;
1214         case 0x3A:
1215             /* RDUSP */
1216             tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
1217                            offsetof(CPUAlphaState, usp));
1218             break;
1219         case 0x3C:
1220             /* WHAMI */
1221             tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
1222                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1223             break;
1224 
1225         case 0x3E:
1226             /* WTINT */
1227             {
1228                 TCGv_i32 tmp = tcg_const_i32(1);
1229                 tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1230                                              offsetof(CPUState, halted));
1231                 tcg_temp_free_i32(tmp);
1232             }
1233             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1234             return gen_excp(ctx, EXCP_HALTED, 0);
1235 
1236         default:
1237             palcode &= 0x3f;
1238             goto do_call_pal;
1239         }
1240         return DISAS_NEXT;
1241     }
1242 #endif
1243     return gen_invalid(ctx);
1244 
1245  do_call_pal:
1246 #ifdef CONFIG_USER_ONLY
1247     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1248 #else
1249     {
1250         TCGv tmp = tcg_temp_new();
1251         uint64_t exc_addr = ctx->base.pc_next;
1252         uint64_t entry = ctx->palbr;
1253 
1254         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1255             exc_addr |= 1;
1256         } else {
1257             tcg_gen_movi_i64(tmp, 1);
1258             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1259         }
1260 
1261         tcg_gen_movi_i64(tmp, exc_addr);
1262         tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
1263         tcg_temp_free(tmp);
1264 
1265         entry += (palcode & 0x80
1266                   ? 0x2000 + (palcode - 0x80) * 64
1267                   : 0x1000 + palcode * 64);
1268 
1269         /* Since the destination is running in PALmode, we don't really
1270            need the page permissions check.  We'll see the existence of
1271            the page when we create the TB, and we'll flush all TBs if
1272            we change the PAL base register.  */
1273         if (!use_exit_tb(ctx)) {
1274             tcg_gen_goto_tb(0);
1275             tcg_gen_movi_i64(cpu_pc, entry);
1276             tcg_gen_exit_tb(ctx->base.tb, 0);
1277             return DISAS_NORETURN;
1278         } else {
1279             tcg_gen_movi_i64(cpu_pc, entry);
1280             return DISAS_PC_UPDATED;
1281         }
1282     }
1283 #endif
1284 }
1285 
1286 #ifndef CONFIG_USER_ONLY
1287 
1288 #define PR_LONG         0x200000
1289 
1290 static int cpu_pr_data(int pr)
1291 {
1292     switch (pr) {
1293     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1294     case  3: return offsetof(CPUAlphaState, trap_arg0);
1295     case  4: return offsetof(CPUAlphaState, trap_arg1);
1296     case  5: return offsetof(CPUAlphaState, trap_arg2);
1297     case  6: return offsetof(CPUAlphaState, exc_addr);
1298     case  7: return offsetof(CPUAlphaState, palbr);
1299     case  8: return offsetof(CPUAlphaState, ptbr);
1300     case  9: return offsetof(CPUAlphaState, vptptr);
1301     case 10: return offsetof(CPUAlphaState, unique);
1302     case 11: return offsetof(CPUAlphaState, sysval);
1303     case 12: return offsetof(CPUAlphaState, usp);
1304 
1305     case 40 ... 63:
1306         return offsetof(CPUAlphaState, scratch[pr - 40]);
1307 
1308     case 251:
1309         return offsetof(CPUAlphaState, alarm_expire);
1310     }
1311     return 0;
1312 }
1313 
1314 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1315 {
1316     void (*helper)(TCGv);
1317     int data;
1318 
1319     switch (regno) {
1320     case 32 ... 39:
1321         /* Accessing the "non-shadow" general registers.  */
1322         regno = regno == 39 ? 25 : regno - 32 + 8;
1323         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1324         break;
1325 
1326     case 250: /* WALLTIME */
1327         helper = gen_helper_get_walltime;
1328         goto do_helper;
1329     case 249: /* VMTIME */
1330         helper = gen_helper_get_vmtime;
1331     do_helper:
1332         if (use_icount) {
1333             gen_io_start();
1334             helper(va);
1335             gen_io_end();
1336             return DISAS_PC_STALE;
1337         } else {
1338             helper(va);
1339         }
1340         break;
1341 
1342     case 0: /* PS */
1343         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1344         break;
1345     case 1: /* FEN */
1346         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1347         break;
1348 
1349     default:
1350         /* The basic registers are data only, and unknown registers
1351            are read-zero, write-ignore.  */
1352         data = cpu_pr_data(regno);
1353         if (data == 0) {
1354             tcg_gen_movi_i64(va, 0);
1355         } else if (data & PR_LONG) {
1356             tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
1357         } else {
1358             tcg_gen_ld_i64(va, cpu_env, data);
1359         }
1360         break;
1361     }
1362 
1363     return DISAS_NEXT;
1364 }
1365 
1366 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1367 {
1368     int data;
1369 
1370     switch (regno) {
1371     case 255:
1372         /* TBIA */
1373         gen_helper_tbia(cpu_env);
1374         break;
1375 
1376     case 254:
1377         /* TBIS */
1378         gen_helper_tbis(cpu_env, vb);
1379         break;
1380 
1381     case 253:
1382         /* WAIT */
1383         {
1384             TCGv_i32 tmp = tcg_const_i32(1);
1385             tcg_gen_st_i32(tmp, cpu_env, -offsetof(AlphaCPU, env) +
1386                                          offsetof(CPUState, halted));
1387             tcg_temp_free_i32(tmp);
1388         }
1389         return gen_excp(ctx, EXCP_HALTED, 0);
1390 
1391     case 252:
1392         /* HALT */
1393         gen_helper_halt(vb);
1394         return DISAS_PC_STALE;
1395 
1396     case 251:
1397         /* ALARM */
1398         gen_helper_set_alarm(cpu_env, vb);
1399         break;
1400 
1401     case 7:
1402         /* PALBR */
1403         tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
1404         /* Changing the PAL base register implies un-chaining all of the TBs
1405            that ended with a CALL_PAL.  Since the base register usually only
1406            changes during boot, flushing everything works well.  */
1407         gen_helper_tb_flush(cpu_env);
1408         return DISAS_PC_STALE;
1409 
1410     case 32 ... 39:
1411         /* Accessing the "non-shadow" general registers.  */
1412         regno = regno == 39 ? 25 : regno - 32 + 8;
1413         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1414         break;
1415 
1416     case 0: /* PS */
1417         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1418         break;
1419     case 1: /* FEN */
1420         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1421         break;
1422 
1423     default:
1424         /* The basic registers are data only, and unknown registers
1425            are read-zero, write-ignore.  */
1426         data = cpu_pr_data(regno);
1427         if (data != 0) {
1428             if (data & PR_LONG) {
1429                 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
1430             } else {
1431                 tcg_gen_st_i64(vb, cpu_env, data);
1432             }
1433         }
1434         break;
1435     }
1436 
1437     return DISAS_NEXT;
1438 }
1439 #endif /* !USER_ONLY*/
1440 
1441 #define REQUIRE_NO_LIT                          \
1442     do {                                        \
1443         if (real_islit) {                       \
1444             goto invalid_opc;                   \
1445         }                                       \
1446     } while (0)
1447 
1448 #define REQUIRE_AMASK(FLAG)                     \
1449     do {                                        \
1450         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1451             goto invalid_opc;                   \
1452         }                                       \
1453     } while (0)
1454 
1455 #define REQUIRE_TB_FLAG(FLAG)                   \
1456     do {                                        \
1457         if ((ctx->tbflags & (FLAG)) == 0) {     \
1458             goto invalid_opc;                   \
1459         }                                       \
1460     } while (0)
1461 
1462 #define REQUIRE_REG_31(WHICH)                   \
1463     do {                                        \
1464         if (WHICH != 31) {                      \
1465             goto invalid_opc;                   \
1466         }                                       \
1467     } while (0)
1468 
1469 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1470 {
1471     int32_t disp21, disp16, disp12 __attribute__((unused));
1472     uint16_t fn11;
1473     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1474     bool islit, real_islit;
1475     TCGv va, vb, vc, tmp, tmp2;
1476     TCGv_i32 t32;
1477     DisasJumpType ret;
1478 
1479     /* Decode all instruction fields */
1480     opc = extract32(insn, 26, 6);
1481     ra = extract32(insn, 21, 5);
1482     rb = extract32(insn, 16, 5);
1483     rc = extract32(insn, 0, 5);
1484     real_islit = islit = extract32(insn, 12, 1);
1485     lit = extract32(insn, 13, 8);
1486 
1487     disp21 = sextract32(insn, 0, 21);
1488     disp16 = sextract32(insn, 0, 16);
1489     disp12 = sextract32(insn, 0, 12);
1490 
1491     fn11 = extract32(insn, 5, 11);
1492     fpfn = extract32(insn, 5, 6);
1493     fn7 = extract32(insn, 5, 7);
1494 
1495     if (rb == 31 && !islit) {
1496         islit = true;
1497         lit = 0;
1498     }
1499 
1500     ret = DISAS_NEXT;
1501     switch (opc) {
1502     case 0x00:
1503         /* CALL_PAL */
1504         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1505         break;
1506     case 0x01:
1507         /* OPC01 */
1508         goto invalid_opc;
1509     case 0x02:
1510         /* OPC02 */
1511         goto invalid_opc;
1512     case 0x03:
1513         /* OPC03 */
1514         goto invalid_opc;
1515     case 0x04:
1516         /* OPC04 */
1517         goto invalid_opc;
1518     case 0x05:
1519         /* OPC05 */
1520         goto invalid_opc;
1521     case 0x06:
1522         /* OPC06 */
1523         goto invalid_opc;
1524     case 0x07:
1525         /* OPC07 */
1526         goto invalid_opc;
1527 
1528     case 0x09:
1529         /* LDAH */
1530         disp16 = (uint32_t)disp16 << 16;
1531         /* fall through */
1532     case 0x08:
1533         /* LDA */
1534         va = dest_gpr(ctx, ra);
1535         /* It's worth special-casing immediate loads.  */
1536         if (rb == 31) {
1537             tcg_gen_movi_i64(va, disp16);
1538         } else {
1539             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1540         }
1541         break;
1542 
1543     case 0x0A:
1544         /* LDBU */
1545         REQUIRE_AMASK(BWX);
1546         gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
1547         break;
1548     case 0x0B:
1549         /* LDQ_U */
1550         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
1551         break;
1552     case 0x0C:
1553         /* LDWU */
1554         REQUIRE_AMASK(BWX);
1555         gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
1556         break;
1557     case 0x0D:
1558         /* STW */
1559         REQUIRE_AMASK(BWX);
1560         gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
1561         break;
1562     case 0x0E:
1563         /* STB */
1564         REQUIRE_AMASK(BWX);
1565         gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
1566         break;
1567     case 0x0F:
1568         /* STQ_U */
1569         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
1570         break;
1571 
1572     case 0x10:
1573         vc = dest_gpr(ctx, rc);
1574         vb = load_gpr_lit(ctx, rb, lit, islit);
1575 
1576         if (ra == 31) {
1577             if (fn7 == 0x00) {
1578                 /* Special case ADDL as SEXTL.  */
1579                 tcg_gen_ext32s_i64(vc, vb);
1580                 break;
1581             }
1582             if (fn7 == 0x29) {
1583                 /* Special case SUBQ as NEGQ.  */
1584                 tcg_gen_neg_i64(vc, vb);
1585                 break;
1586             }
1587         }
1588 
1589         va = load_gpr(ctx, ra);
1590         switch (fn7) {
1591         case 0x00:
1592             /* ADDL */
1593             tcg_gen_add_i64(vc, va, vb);
1594             tcg_gen_ext32s_i64(vc, vc);
1595             break;
1596         case 0x02:
1597             /* S4ADDL */
1598             tmp = tcg_temp_new();
1599             tcg_gen_shli_i64(tmp, va, 2);
1600             tcg_gen_add_i64(tmp, tmp, vb);
1601             tcg_gen_ext32s_i64(vc, tmp);
1602             tcg_temp_free(tmp);
1603             break;
1604         case 0x09:
1605             /* SUBL */
1606             tcg_gen_sub_i64(vc, va, vb);
1607             tcg_gen_ext32s_i64(vc, vc);
1608             break;
1609         case 0x0B:
1610             /* S4SUBL */
1611             tmp = tcg_temp_new();
1612             tcg_gen_shli_i64(tmp, va, 2);
1613             tcg_gen_sub_i64(tmp, tmp, vb);
1614             tcg_gen_ext32s_i64(vc, tmp);
1615             tcg_temp_free(tmp);
1616             break;
1617         case 0x0F:
1618             /* CMPBGE */
1619             if (ra == 31) {
1620                 /* Special case 0 >= X as X == 0.  */
1621                 gen_helper_cmpbe0(vc, vb);
1622             } else {
1623                 gen_helper_cmpbge(vc, va, vb);
1624             }
1625             break;
1626         case 0x12:
1627             /* S8ADDL */
1628             tmp = tcg_temp_new();
1629             tcg_gen_shli_i64(tmp, va, 3);
1630             tcg_gen_add_i64(tmp, tmp, vb);
1631             tcg_gen_ext32s_i64(vc, tmp);
1632             tcg_temp_free(tmp);
1633             break;
1634         case 0x1B:
1635             /* S8SUBL */
1636             tmp = tcg_temp_new();
1637             tcg_gen_shli_i64(tmp, va, 3);
1638             tcg_gen_sub_i64(tmp, tmp, vb);
1639             tcg_gen_ext32s_i64(vc, tmp);
1640             tcg_temp_free(tmp);
1641             break;
1642         case 0x1D:
1643             /* CMPULT */
1644             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1645             break;
1646         case 0x20:
1647             /* ADDQ */
1648             tcg_gen_add_i64(vc, va, vb);
1649             break;
1650         case 0x22:
1651             /* S4ADDQ */
1652             tmp = tcg_temp_new();
1653             tcg_gen_shli_i64(tmp, va, 2);
1654             tcg_gen_add_i64(vc, tmp, vb);
1655             tcg_temp_free(tmp);
1656             break;
1657         case 0x29:
1658             /* SUBQ */
1659             tcg_gen_sub_i64(vc, va, vb);
1660             break;
1661         case 0x2B:
1662             /* S4SUBQ */
1663             tmp = tcg_temp_new();
1664             tcg_gen_shli_i64(tmp, va, 2);
1665             tcg_gen_sub_i64(vc, tmp, vb);
1666             tcg_temp_free(tmp);
1667             break;
1668         case 0x2D:
1669             /* CMPEQ */
1670             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1671             break;
1672         case 0x32:
1673             /* S8ADDQ */
1674             tmp = tcg_temp_new();
1675             tcg_gen_shli_i64(tmp, va, 3);
1676             tcg_gen_add_i64(vc, tmp, vb);
1677             tcg_temp_free(tmp);
1678             break;
1679         case 0x3B:
1680             /* S8SUBQ */
1681             tmp = tcg_temp_new();
1682             tcg_gen_shli_i64(tmp, va, 3);
1683             tcg_gen_sub_i64(vc, tmp, vb);
1684             tcg_temp_free(tmp);
1685             break;
1686         case 0x3D:
1687             /* CMPULE */
1688             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1689             break;
1690         case 0x40:
1691             /* ADDL/V */
1692             tmp = tcg_temp_new();
1693             tcg_gen_ext32s_i64(tmp, va);
1694             tcg_gen_ext32s_i64(vc, vb);
1695             tcg_gen_add_i64(tmp, tmp, vc);
1696             tcg_gen_ext32s_i64(vc, tmp);
1697             gen_helper_check_overflow(cpu_env, vc, tmp);
1698             tcg_temp_free(tmp);
1699             break;
1700         case 0x49:
1701             /* SUBL/V */
1702             tmp = tcg_temp_new();
1703             tcg_gen_ext32s_i64(tmp, va);
1704             tcg_gen_ext32s_i64(vc, vb);
1705             tcg_gen_sub_i64(tmp, tmp, vc);
1706             tcg_gen_ext32s_i64(vc, tmp);
1707             gen_helper_check_overflow(cpu_env, vc, tmp);
1708             tcg_temp_free(tmp);
1709             break;
1710         case 0x4D:
1711             /* CMPLT */
1712             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1713             break;
1714         case 0x60:
1715             /* ADDQ/V */
1716             tmp = tcg_temp_new();
1717             tmp2 = tcg_temp_new();
1718             tcg_gen_eqv_i64(tmp, va, vb);
1719             tcg_gen_mov_i64(tmp2, va);
1720             tcg_gen_add_i64(vc, va, vb);
1721             tcg_gen_xor_i64(tmp2, tmp2, vc);
1722             tcg_gen_and_i64(tmp, tmp, tmp2);
1723             tcg_gen_shri_i64(tmp, tmp, 63);
1724             tcg_gen_movi_i64(tmp2, 0);
1725             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1726             tcg_temp_free(tmp);
1727             tcg_temp_free(tmp2);
1728             break;
1729         case 0x69:
1730             /* SUBQ/V */
1731             tmp = tcg_temp_new();
1732             tmp2 = tcg_temp_new();
1733             tcg_gen_xor_i64(tmp, va, vb);
1734             tcg_gen_mov_i64(tmp2, va);
1735             tcg_gen_sub_i64(vc, va, vb);
1736             tcg_gen_xor_i64(tmp2, tmp2, vc);
1737             tcg_gen_and_i64(tmp, tmp, tmp2);
1738             tcg_gen_shri_i64(tmp, tmp, 63);
1739             tcg_gen_movi_i64(tmp2, 0);
1740             gen_helper_check_overflow(cpu_env, tmp, tmp2);
1741             tcg_temp_free(tmp);
1742             tcg_temp_free(tmp2);
1743             break;
1744         case 0x6D:
1745             /* CMPLE */
1746             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1747             break;
1748         default:
1749             goto invalid_opc;
1750         }
1751         break;
1752 
1753     case 0x11:
1754         if (fn7 == 0x20) {
1755             if (rc == 31) {
1756                 /* Special case BIS as NOP.  */
1757                 break;
1758             }
1759             if (ra == 31) {
1760                 /* Special case BIS as MOV.  */
1761                 vc = dest_gpr(ctx, rc);
1762                 if (islit) {
1763                     tcg_gen_movi_i64(vc, lit);
1764                 } else {
1765                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1766                 }
1767                 break;
1768             }
1769         }
1770 
1771         vc = dest_gpr(ctx, rc);
1772         vb = load_gpr_lit(ctx, rb, lit, islit);
1773 
1774         if (fn7 == 0x28 && ra == 31) {
1775             /* Special case ORNOT as NOT.  */
1776             tcg_gen_not_i64(vc, vb);
1777             break;
1778         }
1779 
1780         va = load_gpr(ctx, ra);
1781         switch (fn7) {
1782         case 0x00:
1783             /* AND */
1784             tcg_gen_and_i64(vc, va, vb);
1785             break;
1786         case 0x08:
1787             /* BIC */
1788             tcg_gen_andc_i64(vc, va, vb);
1789             break;
1790         case 0x14:
1791             /* CMOVLBS */
1792             tmp = tcg_temp_new();
1793             tcg_gen_andi_i64(tmp, va, 1);
1794             tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
1795                                 vb, load_gpr(ctx, rc));
1796             tcg_temp_free(tmp);
1797             break;
1798         case 0x16:
1799             /* CMOVLBC */
1800             tmp = tcg_temp_new();
1801             tcg_gen_andi_i64(tmp, va, 1);
1802             tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
1803                                 vb, load_gpr(ctx, rc));
1804             tcg_temp_free(tmp);
1805             break;
1806         case 0x20:
1807             /* BIS */
1808             tcg_gen_or_i64(vc, va, vb);
1809             break;
1810         case 0x24:
1811             /* CMOVEQ */
1812             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1813                                 vb, load_gpr(ctx, rc));
1814             break;
1815         case 0x26:
1816             /* CMOVNE */
1817             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1818                                 vb, load_gpr(ctx, rc));
1819             break;
1820         case 0x28:
1821             /* ORNOT */
1822             tcg_gen_orc_i64(vc, va, vb);
1823             break;
1824         case 0x40:
1825             /* XOR */
1826             tcg_gen_xor_i64(vc, va, vb);
1827             break;
1828         case 0x44:
1829             /* CMOVLT */
1830             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1831                                 vb, load_gpr(ctx, rc));
1832             break;
1833         case 0x46:
1834             /* CMOVGE */
1835             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1836                                 vb, load_gpr(ctx, rc));
1837             break;
1838         case 0x48:
1839             /* EQV */
1840             tcg_gen_eqv_i64(vc, va, vb);
1841             break;
1842         case 0x61:
1843             /* AMASK */
1844             REQUIRE_REG_31(ra);
1845             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1846             break;
1847         case 0x64:
1848             /* CMOVLE */
1849             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1850                                 vb, load_gpr(ctx, rc));
1851             break;
1852         case 0x66:
1853             /* CMOVGT */
1854             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1855                                 vb, load_gpr(ctx, rc));
1856             break;
1857         case 0x6C:
1858             /* IMPLVER */
1859             REQUIRE_REG_31(ra);
1860             tcg_gen_movi_i64(vc, ctx->implver);
1861             break;
1862         default:
1863             goto invalid_opc;
1864         }
1865         break;
1866 
1867     case 0x12:
1868         vc = dest_gpr(ctx, rc);
1869         va = load_gpr(ctx, ra);
1870         switch (fn7) {
1871         case 0x02:
1872             /* MSKBL */
1873             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1874             break;
1875         case 0x06:
1876             /* EXTBL */
1877             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1878             break;
1879         case 0x0B:
1880             /* INSBL */
1881             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1882             break;
1883         case 0x12:
1884             /* MSKWL */
1885             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1886             break;
1887         case 0x16:
1888             /* EXTWL */
1889             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1890             break;
1891         case 0x1B:
1892             /* INSWL */
1893             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1894             break;
1895         case 0x22:
1896             /* MSKLL */
1897             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1898             break;
1899         case 0x26:
1900             /* EXTLL */
1901             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1902             break;
1903         case 0x2B:
1904             /* INSLL */
1905             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1906             break;
1907         case 0x30:
1908             /* ZAP */
1909             if (islit) {
1910                 gen_zapnoti(vc, va, ~lit);
1911             } else {
1912                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1913             }
1914             break;
1915         case 0x31:
1916             /* ZAPNOT */
1917             if (islit) {
1918                 gen_zapnoti(vc, va, lit);
1919             } else {
1920                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1921             }
1922             break;
1923         case 0x32:
1924             /* MSKQL */
1925             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1926             break;
1927         case 0x34:
1928             /* SRL */
1929             if (islit) {
1930                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1931             } else {
1932                 tmp = tcg_temp_new();
1933                 vb = load_gpr(ctx, rb);
1934                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1935                 tcg_gen_shr_i64(vc, va, tmp);
1936                 tcg_temp_free(tmp);
1937             }
1938             break;
1939         case 0x36:
1940             /* EXTQL */
1941             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1942             break;
1943         case 0x39:
1944             /* SLL */
1945             if (islit) {
1946                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1947             } else {
1948                 tmp = tcg_temp_new();
1949                 vb = load_gpr(ctx, rb);
1950                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1951                 tcg_gen_shl_i64(vc, va, tmp);
1952                 tcg_temp_free(tmp);
1953             }
1954             break;
1955         case 0x3B:
1956             /* INSQL */
1957             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1958             break;
1959         case 0x3C:
1960             /* SRA */
1961             if (islit) {
1962                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1963             } else {
1964                 tmp = tcg_temp_new();
1965                 vb = load_gpr(ctx, rb);
1966                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1967                 tcg_gen_sar_i64(vc, va, tmp);
1968                 tcg_temp_free(tmp);
1969             }
1970             break;
1971         case 0x52:
1972             /* MSKWH */
1973             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1974             break;
1975         case 0x57:
1976             /* INSWH */
1977             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1978             break;
1979         case 0x5A:
1980             /* EXTWH */
1981             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1982             break;
1983         case 0x62:
1984             /* MSKLH */
1985             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1986             break;
1987         case 0x67:
1988             /* INSLH */
1989             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1990             break;
1991         case 0x6A:
1992             /* EXTLH */
1993             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1994             break;
1995         case 0x72:
1996             /* MSKQH */
1997             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1998             break;
1999         case 0x77:
2000             /* INSQH */
2001             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
2002             break;
2003         case 0x7A:
2004             /* EXTQH */
2005             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
2006             break;
2007         default:
2008             goto invalid_opc;
2009         }
2010         break;
2011 
2012     case 0x13:
2013         vc = dest_gpr(ctx, rc);
2014         vb = load_gpr_lit(ctx, rb, lit, islit);
2015         va = load_gpr(ctx, ra);
2016         switch (fn7) {
2017         case 0x00:
2018             /* MULL */
2019             tcg_gen_mul_i64(vc, va, vb);
2020             tcg_gen_ext32s_i64(vc, vc);
2021             break;
2022         case 0x20:
2023             /* MULQ */
2024             tcg_gen_mul_i64(vc, va, vb);
2025             break;
2026         case 0x30:
2027             /* UMULH */
2028             tmp = tcg_temp_new();
2029             tcg_gen_mulu2_i64(tmp, vc, va, vb);
2030             tcg_temp_free(tmp);
2031             break;
2032         case 0x40:
2033             /* MULL/V */
2034             tmp = tcg_temp_new();
2035             tcg_gen_ext32s_i64(tmp, va);
2036             tcg_gen_ext32s_i64(vc, vb);
2037             tcg_gen_mul_i64(tmp, tmp, vc);
2038             tcg_gen_ext32s_i64(vc, tmp);
2039             gen_helper_check_overflow(cpu_env, vc, tmp);
2040             tcg_temp_free(tmp);
2041             break;
2042         case 0x60:
2043             /* MULQ/V */
2044             tmp = tcg_temp_new();
2045             tmp2 = tcg_temp_new();
2046             tcg_gen_muls2_i64(vc, tmp, va, vb);
2047             tcg_gen_sari_i64(tmp2, vc, 63);
2048             gen_helper_check_overflow(cpu_env, tmp, tmp2);
2049             tcg_temp_free(tmp);
2050             tcg_temp_free(tmp2);
2051             break;
2052         default:
2053             goto invalid_opc;
2054         }
2055         break;
2056 
2057     case 0x14:
2058         REQUIRE_AMASK(FIX);
2059         vc = dest_fpr(ctx, rc);
2060         switch (fpfn) { /* fn11 & 0x3F */
2061         case 0x04:
2062             /* ITOFS */
2063             REQUIRE_REG_31(rb);
2064             t32 = tcg_temp_new_i32();
2065             va = load_gpr(ctx, ra);
2066             tcg_gen_extrl_i64_i32(t32, va);
2067             gen_helper_memory_to_s(vc, t32);
2068             tcg_temp_free_i32(t32);
2069             break;
2070         case 0x0A:
2071             /* SQRTF */
2072             REQUIRE_REG_31(ra);
2073             vb = load_fpr(ctx, rb);
2074             gen_helper_sqrtf(vc, cpu_env, vb);
2075             break;
2076         case 0x0B:
2077             /* SQRTS */
2078             REQUIRE_REG_31(ra);
2079             gen_sqrts(ctx, rb, rc, fn11);
2080             break;
2081         case 0x14:
2082             /* ITOFF */
2083             REQUIRE_REG_31(rb);
2084             t32 = tcg_temp_new_i32();
2085             va = load_gpr(ctx, ra);
2086             tcg_gen_extrl_i64_i32(t32, va);
2087             gen_helper_memory_to_f(vc, t32);
2088             tcg_temp_free_i32(t32);
2089             break;
2090         case 0x24:
2091             /* ITOFT */
2092             REQUIRE_REG_31(rb);
2093             va = load_gpr(ctx, ra);
2094             tcg_gen_mov_i64(vc, va);
2095             break;
2096         case 0x2A:
2097             /* SQRTG */
2098             REQUIRE_REG_31(ra);
2099             vb = load_fpr(ctx, rb);
2100             gen_helper_sqrtg(vc, cpu_env, vb);
2101             break;
2102         case 0x02B:
2103             /* SQRTT */
2104             REQUIRE_REG_31(ra);
2105             gen_sqrtt(ctx, rb, rc, fn11);
2106             break;
2107         default:
2108             goto invalid_opc;
2109         }
2110         break;
2111 
2112     case 0x15:
2113         /* VAX floating point */
2114         /* XXX: rounding mode and trap are ignored (!) */
2115         vc = dest_fpr(ctx, rc);
2116         vb = load_fpr(ctx, rb);
2117         va = load_fpr(ctx, ra);
2118         switch (fpfn) { /* fn11 & 0x3F */
2119         case 0x00:
2120             /* ADDF */
2121             gen_helper_addf(vc, cpu_env, va, vb);
2122             break;
2123         case 0x01:
2124             /* SUBF */
2125             gen_helper_subf(vc, cpu_env, va, vb);
2126             break;
2127         case 0x02:
2128             /* MULF */
2129             gen_helper_mulf(vc, cpu_env, va, vb);
2130             break;
2131         case 0x03:
2132             /* DIVF */
2133             gen_helper_divf(vc, cpu_env, va, vb);
2134             break;
2135         case 0x1E:
2136             /* CVTDG -- TODO */
2137             REQUIRE_REG_31(ra);
2138             goto invalid_opc;
2139         case 0x20:
2140             /* ADDG */
2141             gen_helper_addg(vc, cpu_env, va, vb);
2142             break;
2143         case 0x21:
2144             /* SUBG */
2145             gen_helper_subg(vc, cpu_env, va, vb);
2146             break;
2147         case 0x22:
2148             /* MULG */
2149             gen_helper_mulg(vc, cpu_env, va, vb);
2150             break;
2151         case 0x23:
2152             /* DIVG */
2153             gen_helper_divg(vc, cpu_env, va, vb);
2154             break;
2155         case 0x25:
2156             /* CMPGEQ */
2157             gen_helper_cmpgeq(vc, cpu_env, va, vb);
2158             break;
2159         case 0x26:
2160             /* CMPGLT */
2161             gen_helper_cmpglt(vc, cpu_env, va, vb);
2162             break;
2163         case 0x27:
2164             /* CMPGLE */
2165             gen_helper_cmpgle(vc, cpu_env, va, vb);
2166             break;
2167         case 0x2C:
2168             /* CVTGF */
2169             REQUIRE_REG_31(ra);
2170             gen_helper_cvtgf(vc, cpu_env, vb);
2171             break;
2172         case 0x2D:
2173             /* CVTGD -- TODO */
2174             REQUIRE_REG_31(ra);
2175             goto invalid_opc;
2176         case 0x2F:
2177             /* CVTGQ */
2178             REQUIRE_REG_31(ra);
2179             gen_helper_cvtgq(vc, cpu_env, vb);
2180             break;
2181         case 0x3C:
2182             /* CVTQF */
2183             REQUIRE_REG_31(ra);
2184             gen_helper_cvtqf(vc, cpu_env, vb);
2185             break;
2186         case 0x3E:
2187             /* CVTQG */
2188             REQUIRE_REG_31(ra);
2189             gen_helper_cvtqg(vc, cpu_env, vb);
2190             break;
2191         default:
2192             goto invalid_opc;
2193         }
2194         break;
2195 
2196     case 0x16:
2197         /* IEEE floating-point */
2198         switch (fpfn) { /* fn11 & 0x3F */
2199         case 0x00:
2200             /* ADDS */
2201             gen_adds(ctx, ra, rb, rc, fn11);
2202             break;
2203         case 0x01:
2204             /* SUBS */
2205             gen_subs(ctx, ra, rb, rc, fn11);
2206             break;
2207         case 0x02:
2208             /* MULS */
2209             gen_muls(ctx, ra, rb, rc, fn11);
2210             break;
2211         case 0x03:
2212             /* DIVS */
2213             gen_divs(ctx, ra, rb, rc, fn11);
2214             break;
2215         case 0x20:
2216             /* ADDT */
2217             gen_addt(ctx, ra, rb, rc, fn11);
2218             break;
2219         case 0x21:
2220             /* SUBT */
2221             gen_subt(ctx, ra, rb, rc, fn11);
2222             break;
2223         case 0x22:
2224             /* MULT */
2225             gen_mult(ctx, ra, rb, rc, fn11);
2226             break;
2227         case 0x23:
2228             /* DIVT */
2229             gen_divt(ctx, ra, rb, rc, fn11);
2230             break;
2231         case 0x24:
2232             /* CMPTUN */
2233             gen_cmptun(ctx, ra, rb, rc, fn11);
2234             break;
2235         case 0x25:
2236             /* CMPTEQ */
2237             gen_cmpteq(ctx, ra, rb, rc, fn11);
2238             break;
2239         case 0x26:
2240             /* CMPTLT */
2241             gen_cmptlt(ctx, ra, rb, rc, fn11);
2242             break;
2243         case 0x27:
2244             /* CMPTLE */
2245             gen_cmptle(ctx, ra, rb, rc, fn11);
2246             break;
2247         case 0x2C:
2248             REQUIRE_REG_31(ra);
2249             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2250                 /* CVTST */
2251                 gen_cvtst(ctx, rb, rc, fn11);
2252             } else {
2253                 /* CVTTS */
2254                 gen_cvtts(ctx, rb, rc, fn11);
2255             }
2256             break;
2257         case 0x2F:
2258             /* CVTTQ */
2259             REQUIRE_REG_31(ra);
2260             gen_cvttq(ctx, rb, rc, fn11);
2261             break;
2262         case 0x3C:
2263             /* CVTQS */
2264             REQUIRE_REG_31(ra);
2265             gen_cvtqs(ctx, rb, rc, fn11);
2266             break;
2267         case 0x3E:
2268             /* CVTQT */
2269             REQUIRE_REG_31(ra);
2270             gen_cvtqt(ctx, rb, rc, fn11);
2271             break;
2272         default:
2273             goto invalid_opc;
2274         }
2275         break;
2276 
2277     case 0x17:
2278         switch (fn11) {
2279         case 0x010:
2280             /* CVTLQ */
2281             REQUIRE_REG_31(ra);
2282             vc = dest_fpr(ctx, rc);
2283             vb = load_fpr(ctx, rb);
2284             gen_cvtlq(vc, vb);
2285             break;
2286         case 0x020:
2287             /* CPYS */
2288             if (rc == 31) {
2289                 /* Special case CPYS as FNOP.  */
2290             } else {
2291                 vc = dest_fpr(ctx, rc);
2292                 va = load_fpr(ctx, ra);
2293                 if (ra == rb) {
2294                     /* Special case CPYS as FMOV.  */
2295                     tcg_gen_mov_i64(vc, va);
2296                 } else {
2297                     vb = load_fpr(ctx, rb);
2298                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2299                 }
2300             }
2301             break;
2302         case 0x021:
2303             /* CPYSN */
2304             vc = dest_fpr(ctx, rc);
2305             vb = load_fpr(ctx, rb);
2306             va = load_fpr(ctx, ra);
2307             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2308             break;
2309         case 0x022:
2310             /* CPYSE */
2311             vc = dest_fpr(ctx, rc);
2312             vb = load_fpr(ctx, rb);
2313             va = load_fpr(ctx, ra);
2314             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2315             break;
2316         case 0x024:
2317             /* MT_FPCR */
2318             va = load_fpr(ctx, ra);
2319             gen_helper_store_fpcr(cpu_env, va);
2320             if (ctx->tb_rm == QUAL_RM_D) {
2321                 /* Re-do the copy of the rounding mode to fp_status
2322                    the next time we use dynamic rounding.  */
2323                 ctx->tb_rm = -1;
2324             }
2325             break;
2326         case 0x025:
2327             /* MF_FPCR */
2328             va = dest_fpr(ctx, ra);
2329             gen_helper_load_fpcr(va, cpu_env);
2330             break;
2331         case 0x02A:
2332             /* FCMOVEQ */
2333             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2334             break;
2335         case 0x02B:
2336             /* FCMOVNE */
2337             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2338             break;
2339         case 0x02C:
2340             /* FCMOVLT */
2341             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2342             break;
2343         case 0x02D:
2344             /* FCMOVGE */
2345             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2346             break;
2347         case 0x02E:
2348             /* FCMOVLE */
2349             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2350             break;
2351         case 0x02F:
2352             /* FCMOVGT */
2353             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2354             break;
2355         case 0x030: /* CVTQL */
2356         case 0x130: /* CVTQL/V */
2357         case 0x530: /* CVTQL/SV */
2358             REQUIRE_REG_31(ra);
2359             vc = dest_fpr(ctx, rc);
2360             vb = load_fpr(ctx, rb);
2361             gen_helper_cvtql(vc, cpu_env, vb);
2362             gen_fp_exc_raise(rc, fn11);
2363             break;
2364         default:
2365             goto invalid_opc;
2366         }
2367         break;
2368 
2369     case 0x18:
2370         switch ((uint16_t)disp16) {
2371         case 0x0000:
2372             /* TRAPB */
2373             /* No-op.  */
2374             break;
2375         case 0x0400:
2376             /* EXCB */
2377             /* No-op.  */
2378             break;
2379         case 0x4000:
2380             /* MB */
2381             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2382             break;
2383         case 0x4400:
2384             /* WMB */
2385             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2386             break;
2387         case 0x8000:
2388             /* FETCH */
2389             /* No-op */
2390             break;
2391         case 0xA000:
2392             /* FETCH_M */
2393             /* No-op */
2394             break;
2395         case 0xC000:
2396             /* RPCC */
2397             va = dest_gpr(ctx, ra);
2398             if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
2399                 gen_io_start();
2400                 gen_helper_load_pcc(va, cpu_env);
2401                 gen_io_end();
2402                 ret = DISAS_PC_STALE;
2403             } else {
2404                 gen_helper_load_pcc(va, cpu_env);
2405             }
2406             break;
2407         case 0xE000:
2408             /* RC */
2409             gen_rx(ctx, ra, 0);
2410             break;
2411         case 0xE800:
2412             /* ECB */
2413             break;
2414         case 0xF000:
2415             /* RS */
2416             gen_rx(ctx, ra, 1);
2417             break;
2418         case 0xF800:
2419             /* WH64 */
2420             /* No-op */
2421             break;
2422         case 0xFC00:
2423             /* WH64EN */
2424             /* No-op */
2425             break;
2426         default:
2427             goto invalid_opc;
2428         }
2429         break;
2430 
2431     case 0x19:
2432         /* HW_MFPR (PALcode) */
2433 #ifndef CONFIG_USER_ONLY
2434         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2435         va = dest_gpr(ctx, ra);
2436         ret = gen_mfpr(ctx, va, insn & 0xffff);
2437         break;
2438 #else
2439         goto invalid_opc;
2440 #endif
2441 
2442     case 0x1A:
2443         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2444            prediction stack action, which of course we don't implement.  */
2445         vb = load_gpr(ctx, rb);
2446         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2447         if (ra != 31) {
2448             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2449         }
2450         ret = DISAS_PC_UPDATED;
2451         break;
2452 
2453     case 0x1B:
2454         /* HW_LD (PALcode) */
2455 #ifndef CONFIG_USER_ONLY
2456         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2457         {
2458             TCGv addr = tcg_temp_new();
2459             vb = load_gpr(ctx, rb);
2460             va = dest_gpr(ctx, ra);
2461 
2462             tcg_gen_addi_i64(addr, vb, disp12);
2463             switch ((insn >> 12) & 0xF) {
2464             case 0x0:
2465                 /* Longword physical access (hw_ldl/p) */
2466                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
2467                 break;
2468             case 0x1:
2469                 /* Quadword physical access (hw_ldq/p) */
2470                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
2471                 break;
2472             case 0x2:
2473                 /* Longword physical access with lock (hw_ldl_l/p) */
2474                 gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
2475                 break;
2476             case 0x3:
2477                 /* Quadword physical access with lock (hw_ldq_l/p) */
2478                 gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
2479                 break;
2480             case 0x4:
2481                 /* Longword virtual PTE fetch (hw_ldl/v) */
2482                 goto invalid_opc;
2483             case 0x5:
2484                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2485                 goto invalid_opc;
2486                 break;
2487             case 0x6:
2488                 /* Invalid */
2489                 goto invalid_opc;
2490             case 0x7:
2491                 /* Invaliid */
2492                 goto invalid_opc;
2493             case 0x8:
2494                 /* Longword virtual access (hw_ldl) */
2495                 goto invalid_opc;
2496             case 0x9:
2497                 /* Quadword virtual access (hw_ldq) */
2498                 goto invalid_opc;
2499             case 0xA:
2500                 /* Longword virtual access with protection check (hw_ldl/w) */
2501                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
2502                 break;
2503             case 0xB:
2504                 /* Quadword virtual access with protection check (hw_ldq/w) */
2505                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
2506                 break;
2507             case 0xC:
2508                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2509                 goto invalid_opc;
2510             case 0xD:
2511                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2512                 goto invalid_opc;
2513             case 0xE:
2514                 /* Longword virtual access with alternate access mode and
2515                    protection checks (hw_ldl/wa) */
2516                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
2517                 break;
2518             case 0xF:
2519                 /* Quadword virtual access with alternate access mode and
2520                    protection checks (hw_ldq/wa) */
2521                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
2522                 break;
2523             }
2524             tcg_temp_free(addr);
2525             break;
2526         }
2527 #else
2528         goto invalid_opc;
2529 #endif
2530 
2531     case 0x1C:
2532         vc = dest_gpr(ctx, rc);
2533         if (fn7 == 0x70) {
2534             /* FTOIT */
2535             REQUIRE_AMASK(FIX);
2536             REQUIRE_REG_31(rb);
2537             va = load_fpr(ctx, ra);
2538             tcg_gen_mov_i64(vc, va);
2539             break;
2540         } else if (fn7 == 0x78) {
2541             /* FTOIS */
2542             REQUIRE_AMASK(FIX);
2543             REQUIRE_REG_31(rb);
2544             t32 = tcg_temp_new_i32();
2545             va = load_fpr(ctx, ra);
2546             gen_helper_s_to_memory(t32, va);
2547             tcg_gen_ext_i32_i64(vc, t32);
2548             tcg_temp_free_i32(t32);
2549             break;
2550         }
2551 
2552         vb = load_gpr_lit(ctx, rb, lit, islit);
2553         switch (fn7) {
2554         case 0x00:
2555             /* SEXTB */
2556             REQUIRE_AMASK(BWX);
2557             REQUIRE_REG_31(ra);
2558             tcg_gen_ext8s_i64(vc, vb);
2559             break;
2560         case 0x01:
2561             /* SEXTW */
2562             REQUIRE_AMASK(BWX);
2563             REQUIRE_REG_31(ra);
2564             tcg_gen_ext16s_i64(vc, vb);
2565             break;
2566         case 0x30:
2567             /* CTPOP */
2568             REQUIRE_AMASK(CIX);
2569             REQUIRE_REG_31(ra);
2570             REQUIRE_NO_LIT;
2571             tcg_gen_ctpop_i64(vc, vb);
2572             break;
2573         case 0x31:
2574             /* PERR */
2575             REQUIRE_AMASK(MVI);
2576             REQUIRE_NO_LIT;
2577             va = load_gpr(ctx, ra);
2578             gen_helper_perr(vc, va, vb);
2579             break;
2580         case 0x32:
2581             /* CTLZ */
2582             REQUIRE_AMASK(CIX);
2583             REQUIRE_REG_31(ra);
2584             REQUIRE_NO_LIT;
2585             tcg_gen_clzi_i64(vc, vb, 64);
2586             break;
2587         case 0x33:
2588             /* CTTZ */
2589             REQUIRE_AMASK(CIX);
2590             REQUIRE_REG_31(ra);
2591             REQUIRE_NO_LIT;
2592             tcg_gen_ctzi_i64(vc, vb, 64);
2593             break;
2594         case 0x34:
2595             /* UNPKBW */
2596             REQUIRE_AMASK(MVI);
2597             REQUIRE_REG_31(ra);
2598             REQUIRE_NO_LIT;
2599             gen_helper_unpkbw(vc, vb);
2600             break;
2601         case 0x35:
2602             /* UNPKBL */
2603             REQUIRE_AMASK(MVI);
2604             REQUIRE_REG_31(ra);
2605             REQUIRE_NO_LIT;
2606             gen_helper_unpkbl(vc, vb);
2607             break;
2608         case 0x36:
2609             /* PKWB */
2610             REQUIRE_AMASK(MVI);
2611             REQUIRE_REG_31(ra);
2612             REQUIRE_NO_LIT;
2613             gen_helper_pkwb(vc, vb);
2614             break;
2615         case 0x37:
2616             /* PKLB */
2617             REQUIRE_AMASK(MVI);
2618             REQUIRE_REG_31(ra);
2619             REQUIRE_NO_LIT;
2620             gen_helper_pklb(vc, vb);
2621             break;
2622         case 0x38:
2623             /* MINSB8 */
2624             REQUIRE_AMASK(MVI);
2625             va = load_gpr(ctx, ra);
2626             gen_helper_minsb8(vc, va, vb);
2627             break;
2628         case 0x39:
2629             /* MINSW4 */
2630             REQUIRE_AMASK(MVI);
2631             va = load_gpr(ctx, ra);
2632             gen_helper_minsw4(vc, va, vb);
2633             break;
2634         case 0x3A:
2635             /* MINUB8 */
2636             REQUIRE_AMASK(MVI);
2637             va = load_gpr(ctx, ra);
2638             gen_helper_minub8(vc, va, vb);
2639             break;
2640         case 0x3B:
2641             /* MINUW4 */
2642             REQUIRE_AMASK(MVI);
2643             va = load_gpr(ctx, ra);
2644             gen_helper_minuw4(vc, va, vb);
2645             break;
2646         case 0x3C:
2647             /* MAXUB8 */
2648             REQUIRE_AMASK(MVI);
2649             va = load_gpr(ctx, ra);
2650             gen_helper_maxub8(vc, va, vb);
2651             break;
2652         case 0x3D:
2653             /* MAXUW4 */
2654             REQUIRE_AMASK(MVI);
2655             va = load_gpr(ctx, ra);
2656             gen_helper_maxuw4(vc, va, vb);
2657             break;
2658         case 0x3E:
2659             /* MAXSB8 */
2660             REQUIRE_AMASK(MVI);
2661             va = load_gpr(ctx, ra);
2662             gen_helper_maxsb8(vc, va, vb);
2663             break;
2664         case 0x3F:
2665             /* MAXSW4 */
2666             REQUIRE_AMASK(MVI);
2667             va = load_gpr(ctx, ra);
2668             gen_helper_maxsw4(vc, va, vb);
2669             break;
2670         default:
2671             goto invalid_opc;
2672         }
2673         break;
2674 
2675     case 0x1D:
2676         /* HW_MTPR (PALcode) */
2677 #ifndef CONFIG_USER_ONLY
2678         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2679         vb = load_gpr(ctx, rb);
2680         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2681         break;
2682 #else
2683         goto invalid_opc;
2684 #endif
2685 
2686     case 0x1E:
2687         /* HW_RET (PALcode) */
2688 #ifndef CONFIG_USER_ONLY
2689         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2690         if (rb == 31) {
2691             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2692                address from EXC_ADDR.  This turns out to be useful for our
2693                emulation PALcode, so continue to accept it.  */
2694             ctx->lit = vb = tcg_temp_new();
2695             tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
2696         } else {
2697             vb = load_gpr(ctx, rb);
2698         }
2699         tcg_gen_movi_i64(cpu_lock_addr, -1);
2700         tmp = tcg_temp_new();
2701         tcg_gen_movi_i64(tmp, 0);
2702         st_flag_byte(tmp, ENV_FLAG_RX_SHIFT);
2703         tcg_gen_andi_i64(tmp, vb, 1);
2704         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2705         tcg_temp_free(tmp);
2706         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2707         /* Allow interrupts to be recognized right away.  */
2708         ret = DISAS_PC_UPDATED_NOCHAIN;
2709         break;
2710 #else
2711         goto invalid_opc;
2712 #endif
2713 
2714     case 0x1F:
2715         /* HW_ST (PALcode) */
2716 #ifndef CONFIG_USER_ONLY
2717         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2718         {
2719             switch ((insn >> 12) & 0xF) {
2720             case 0x0:
2721                 /* Longword physical access */
2722                 va = load_gpr(ctx, ra);
2723                 vb = load_gpr(ctx, rb);
2724                 tmp = tcg_temp_new();
2725                 tcg_gen_addi_i64(tmp, vb, disp12);
2726                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
2727                 tcg_temp_free(tmp);
2728                 break;
2729             case 0x1:
2730                 /* Quadword physical access */
2731                 va = load_gpr(ctx, ra);
2732                 vb = load_gpr(ctx, rb);
2733                 tmp = tcg_temp_new();
2734                 tcg_gen_addi_i64(tmp, vb, disp12);
2735                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
2736                 tcg_temp_free(tmp);
2737                 break;
2738             case 0x2:
2739                 /* Longword physical access with lock */
2740                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2741                                             MMU_PHYS_IDX, MO_LESL);
2742                 break;
2743             case 0x3:
2744                 /* Quadword physical access with lock */
2745                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2746                                             MMU_PHYS_IDX, MO_LEQ);
2747                 break;
2748             case 0x4:
2749                 /* Longword virtual access */
2750                 goto invalid_opc;
2751             case 0x5:
2752                 /* Quadword virtual access */
2753                 goto invalid_opc;
2754             case 0x6:
2755                 /* Invalid */
2756                 goto invalid_opc;
2757             case 0x7:
2758                 /* Invalid */
2759                 goto invalid_opc;
2760             case 0x8:
2761                 /* Invalid */
2762                 goto invalid_opc;
2763             case 0x9:
2764                 /* Invalid */
2765                 goto invalid_opc;
2766             case 0xA:
2767                 /* Invalid */
2768                 goto invalid_opc;
2769             case 0xB:
2770                 /* Invalid */
2771                 goto invalid_opc;
2772             case 0xC:
2773                 /* Longword virtual access with alternate access mode */
2774                 goto invalid_opc;
2775             case 0xD:
2776                 /* Quadword virtual access with alternate access mode */
2777                 goto invalid_opc;
2778             case 0xE:
2779                 /* Invalid */
2780                 goto invalid_opc;
2781             case 0xF:
2782                 /* Invalid */
2783                 goto invalid_opc;
2784             }
2785             break;
2786         }
2787 #else
2788         goto invalid_opc;
2789 #endif
2790     case 0x20:
2791         /* LDF */
2792         gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
2793         break;
2794     case 0x21:
2795         /* LDG */
2796         gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
2797         break;
2798     case 0x22:
2799         /* LDS */
2800         gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
2801         break;
2802     case 0x23:
2803         /* LDT */
2804         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
2805         break;
2806     case 0x24:
2807         /* STF */
2808         gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
2809         break;
2810     case 0x25:
2811         /* STG */
2812         gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
2813         break;
2814     case 0x26:
2815         /* STS */
2816         gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
2817         break;
2818     case 0x27:
2819         /* STT */
2820         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
2821         break;
2822     case 0x28:
2823         /* LDL */
2824         gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
2825         break;
2826     case 0x29:
2827         /* LDQ */
2828         gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
2829         break;
2830     case 0x2A:
2831         /* LDL_L */
2832         gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
2833         break;
2834     case 0x2B:
2835         /* LDQ_L */
2836         gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
2837         break;
2838     case 0x2C:
2839         /* STL */
2840         gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
2841         break;
2842     case 0x2D:
2843         /* STQ */
2844         gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
2845         break;
2846     case 0x2E:
2847         /* STL_C */
2848         ret = gen_store_conditional(ctx, ra, rb, disp16,
2849                                     ctx->mem_idx, MO_LESL);
2850         break;
2851     case 0x2F:
2852         /* STQ_C */
2853         ret = gen_store_conditional(ctx, ra, rb, disp16,
2854                                     ctx->mem_idx, MO_LEQ);
2855         break;
2856     case 0x30:
2857         /* BR */
2858         ret = gen_bdirect(ctx, ra, disp21);
2859         break;
2860     case 0x31: /* FBEQ */
2861         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2862         break;
2863     case 0x32: /* FBLT */
2864         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2865         break;
2866     case 0x33: /* FBLE */
2867         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2868         break;
2869     case 0x34:
2870         /* BSR */
2871         ret = gen_bdirect(ctx, ra, disp21);
2872         break;
2873     case 0x35: /* FBNE */
2874         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2875         break;
2876     case 0x36: /* FBGE */
2877         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2878         break;
2879     case 0x37: /* FBGT */
2880         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2881         break;
2882     case 0x38:
2883         /* BLBC */
2884         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
2885         break;
2886     case 0x39:
2887         /* BEQ */
2888         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
2889         break;
2890     case 0x3A:
2891         /* BLT */
2892         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
2893         break;
2894     case 0x3B:
2895         /* BLE */
2896         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
2897         break;
2898     case 0x3C:
2899         /* BLBS */
2900         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
2901         break;
2902     case 0x3D:
2903         /* BNE */
2904         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
2905         break;
2906     case 0x3E:
2907         /* BGE */
2908         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
2909         break;
2910     case 0x3F:
2911         /* BGT */
2912         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
2913         break;
2914     invalid_opc:
2915         ret = gen_invalid(ctx);
2916         break;
2917     }
2918 
2919     return ret;
2920 }
2921 
2922 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2923 {
2924     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2925     CPUAlphaState *env = cpu->env_ptr;
2926     int64_t bound, mask;
2927 
2928     ctx->tbflags = ctx->base.tb->flags;
2929     ctx->mem_idx = cpu_mmu_index(env, false);
2930     ctx->implver = env->implver;
2931     ctx->amask = env->amask;
2932 
2933 #ifdef CONFIG_USER_ONLY
2934     ctx->ir = cpu_std_ir;
2935 #else
2936     ctx->palbr = env->palbr;
2937     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2938 #endif
2939 
2940     /* ??? Every TB begins with unset rounding mode, to be initialized on
2941        the first fp insn of the TB.  Alternately we could define a proper
2942        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2943        to reset the FP_STATUS to that default at the end of any TB that
2944        changes the default.  We could even (gasp) dynamiclly figure out
2945        what default would be most efficient given the running program.  */
2946     ctx->tb_rm = -1;
2947     /* Similarly for flush-to-zero.  */
2948     ctx->tb_ftz = -1;
2949 
2950     ctx->zero = NULL;
2951     ctx->sink = NULL;
2952     ctx->lit = NULL;
2953 
2954     /* Bound the number of insns to execute to those left on the page.  */
2955     if (in_superpage(ctx, ctx->base.pc_first)) {
2956         mask = -1ULL << 41;
2957     } else {
2958         mask = TARGET_PAGE_MASK;
2959     }
2960     bound = -(ctx->base.pc_first | mask) / 4;
2961     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2962 }
2963 
2964 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2965 {
2966 }
2967 
2968 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2969 {
2970     tcg_gen_insn_start(dcbase->pc_next);
2971 }
2972 
2973 static bool alpha_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
2974                                       const CPUBreakpoint *bp)
2975 {
2976     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2977 
2978     ctx->base.is_jmp = gen_excp(ctx, EXCP_DEBUG, 0);
2979 
2980     /* The address covered by the breakpoint must be included in
2981        [tb->pc, tb->pc + tb->size) in order to for it to be
2982        properly cleared -- thus we increment the PC here so that
2983        the logic setting tb->size below does the right thing.  */
2984     ctx->base.pc_next += 4;
2985     return true;
2986 }
2987 
2988 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2989 {
2990     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2991     CPUAlphaState *env = cpu->env_ptr;
2992     uint32_t insn = cpu_ldl_code(env, ctx->base.pc_next);
2993 
2994     ctx->base.pc_next += 4;
2995     ctx->base.is_jmp = translate_one(ctx, insn);
2996 
2997     free_context_temps(ctx);
2998     translator_loop_temp_check(&ctx->base);
2999 }
3000 
3001 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
3002 {
3003     DisasContext *ctx = container_of(dcbase, DisasContext, base);
3004 
3005     switch (ctx->base.is_jmp) {
3006     case DISAS_NORETURN:
3007         break;
3008     case DISAS_TOO_MANY:
3009         if (use_goto_tb(ctx, ctx->base.pc_next)) {
3010             tcg_gen_goto_tb(0);
3011             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3012             tcg_gen_exit_tb(ctx->base.tb, 0);
3013         }
3014         /* FALLTHRU */
3015     case DISAS_PC_STALE:
3016         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
3017         /* FALLTHRU */
3018     case DISAS_PC_UPDATED:
3019         if (!use_exit_tb(ctx)) {
3020             tcg_gen_lookup_and_goto_ptr();
3021             break;
3022         }
3023         /* FALLTHRU */
3024     case DISAS_PC_UPDATED_NOCHAIN:
3025         if (ctx->base.singlestep_enabled) {
3026             gen_excp_1(EXCP_DEBUG, 0);
3027         } else {
3028             tcg_gen_exit_tb(NULL, 0);
3029         }
3030         break;
3031     default:
3032         g_assert_not_reached();
3033     }
3034 }
3035 
3036 static void alpha_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
3037 {
3038     qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
3039     log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
3040 }
3041 
3042 static const TranslatorOps alpha_tr_ops = {
3043     .init_disas_context = alpha_tr_init_disas_context,
3044     .tb_start           = alpha_tr_tb_start,
3045     .insn_start         = alpha_tr_insn_start,
3046     .breakpoint_check   = alpha_tr_breakpoint_check,
3047     .translate_insn     = alpha_tr_translate_insn,
3048     .tb_stop            = alpha_tr_tb_stop,
3049     .disas_log          = alpha_tr_disas_log,
3050 };
3051 
3052 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb)
3053 {
3054     DisasContext dc;
3055     translator_loop(&alpha_tr_ops, &dc.base, cpu, tb);
3056 }
3057 
3058 void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
3059                           target_ulong *data)
3060 {
3061     env->pc = data[0];
3062 }
3063