xref: /openbmc/qemu/target/alpha/translate.c (revision 64c6e744)
1 /*
2  *  Alpha emulation cpu translation for qemu.
3  *
4  *  Copyright (c) 2007 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/log.h"
31 
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
34 #undef  HELPER_H
35 
36 #undef ALPHA_DEBUG_DISAS
37 #define CONFIG_SOFTFLOAT_INLINE
38 
39 #ifdef ALPHA_DEBUG_DISAS
40 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 #  define LOG_DISAS(...) do { } while (0)
43 #endif
44 
45 typedef struct DisasContext DisasContext;
46 struct DisasContext {
47     DisasContextBase base;
48 
49 #ifdef CONFIG_USER_ONLY
50     MemOp unalign;
51 #else
52     uint64_t palbr;
53 #endif
54     uint32_t tbflags;
55     int mem_idx;
56 
57     /* implver and amask values for this CPU.  */
58     int implver;
59     int amask;
60 
61     /* Current rounding mode for this TB.  */
62     int tb_rm;
63     /* Current flush-to-zero setting for this TB.  */
64     int tb_ftz;
65 
66     /* The set of registers active in the current context.  */
67     TCGv *ir;
68 
69     /* Temporaries for $31 and $f31 as source and destination.  */
70     TCGv zero;
71     TCGv sink;
72 };
73 
74 #ifdef CONFIG_USER_ONLY
75 #define UNALIGN(C)  (C)->unalign
76 #else
77 #define UNALIGN(C)  MO_ALIGN
78 #endif
79 
80 /* Target-specific return values from translate_one, indicating the
81    state of the TB.  Note that DISAS_NEXT indicates that we are not
82    exiting the TB.  */
83 #define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
84 #define DISAS_PC_UPDATED          DISAS_TARGET_1
85 #define DISAS_PC_STALE            DISAS_TARGET_2
86 
87 /* global register indexes */
88 static TCGv cpu_std_ir[31];
89 static TCGv cpu_fir[31];
90 static TCGv cpu_pc;
91 static TCGv cpu_lock_addr;
92 static TCGv cpu_lock_value;
93 
94 #ifndef CONFIG_USER_ONLY
95 static TCGv cpu_pal_ir[31];
96 #endif
97 
98 void alpha_translate_init(void)
99 {
100 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
101 
102     typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
103     static const GlobalVar vars[] = {
104         DEF_VAR(pc),
105         DEF_VAR(lock_addr),
106         DEF_VAR(lock_value),
107     };
108 
109 #undef DEF_VAR
110 
111     /* Use the symbolic register names that match the disassembler.  */
112     static const char greg_names[31][4] = {
113         "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
114         "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
115         "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
116         "t10", "t11", "ra", "t12", "at", "gp", "sp"
117     };
118     static const char freg_names[31][4] = {
119         "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
120         "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
121         "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
122         "f24", "f25", "f26", "f27", "f28", "f29", "f30"
123     };
124 #ifndef CONFIG_USER_ONLY
125     static const char shadow_names[8][8] = {
126         "pal_t7", "pal_s0", "pal_s1", "pal_s2",
127         "pal_s3", "pal_s4", "pal_s5", "pal_t11"
128     };
129 #endif
130 
131     int i;
132 
133     for (i = 0; i < 31; i++) {
134         cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env,
135                                                offsetof(CPUAlphaState, ir[i]),
136                                                greg_names[i]);
137     }
138 
139     for (i = 0; i < 31; i++) {
140         cpu_fir[i] = tcg_global_mem_new_i64(tcg_env,
141                                             offsetof(CPUAlphaState, fir[i]),
142                                             freg_names[i]);
143     }
144 
145 #ifndef CONFIG_USER_ONLY
146     memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
147     for (i = 0; i < 8; i++) {
148         int r = (i == 7 ? 25 : i + 8);
149         cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env,
150                                                offsetof(CPUAlphaState,
151                                                         shadow[i]),
152                                                shadow_names[i]);
153     }
154 #endif
155 
156     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
157         const GlobalVar *v = &vars[i];
158         *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name);
159     }
160 }
161 
162 static TCGv load_zero(DisasContext *ctx)
163 {
164     if (!ctx->zero) {
165         ctx->zero = tcg_constant_i64(0);
166     }
167     return ctx->zero;
168 }
169 
170 static TCGv dest_sink(DisasContext *ctx)
171 {
172     if (!ctx->sink) {
173         ctx->sink = tcg_temp_new();
174     }
175     return ctx->sink;
176 }
177 
178 static void free_context_temps(DisasContext *ctx)
179 {
180     if (ctx->sink) {
181         tcg_gen_discard_i64(ctx->sink);
182         ctx->sink = NULL;
183     }
184 }
185 
186 static TCGv load_gpr(DisasContext *ctx, unsigned reg)
187 {
188     if (likely(reg < 31)) {
189         return ctx->ir[reg];
190     } else {
191         return load_zero(ctx);
192     }
193 }
194 
195 static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
196                          uint8_t lit, bool islit)
197 {
198     if (islit) {
199         return tcg_constant_i64(lit);
200     } else if (likely(reg < 31)) {
201         return ctx->ir[reg];
202     } else {
203         return load_zero(ctx);
204     }
205 }
206 
207 static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
208 {
209     if (likely(reg < 31)) {
210         return ctx->ir[reg];
211     } else {
212         return dest_sink(ctx);
213     }
214 }
215 
216 static TCGv load_fpr(DisasContext *ctx, unsigned reg)
217 {
218     if (likely(reg < 31)) {
219         return cpu_fir[reg];
220     } else {
221         return load_zero(ctx);
222     }
223 }
224 
225 static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
226 {
227     if (likely(reg < 31)) {
228         return cpu_fir[reg];
229     } else {
230         return dest_sink(ctx);
231     }
232 }
233 
234 static int get_flag_ofs(unsigned shift)
235 {
236     int ofs = offsetof(CPUAlphaState, flags);
237 #if HOST_BIG_ENDIAN
238     ofs += 3 - (shift / 8);
239 #else
240     ofs += shift / 8;
241 #endif
242     return ofs;
243 }
244 
245 static void ld_flag_byte(TCGv val, unsigned shift)
246 {
247     tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift));
248 }
249 
250 static void st_flag_byte(TCGv val, unsigned shift)
251 {
252     tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift));
253 }
254 
255 static void gen_excp_1(int exception, int error_code)
256 {
257     TCGv_i32 tmp1, tmp2;
258 
259     tmp1 = tcg_constant_i32(exception);
260     tmp2 = tcg_constant_i32(error_code);
261     gen_helper_excp(tcg_env, tmp1, tmp2);
262 }
263 
264 static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
265 {
266     tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
267     gen_excp_1(exception, error_code);
268     return DISAS_NORETURN;
269 }
270 
271 static inline DisasJumpType gen_invalid(DisasContext *ctx)
272 {
273     return gen_excp(ctx, EXCP_OPCDEC, 0);
274 }
275 
276 static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
277 {
278     TCGv_i32 tmp32 = tcg_temp_new_i32();
279     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
280     gen_helper_memory_to_f(dest, tmp32);
281 }
282 
283 static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
284 {
285     TCGv tmp = tcg_temp_new();
286     tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
287     gen_helper_memory_to_g(dest, tmp);
288 }
289 
290 static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
291 {
292     TCGv_i32 tmp32 = tcg_temp_new_i32();
293     tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
294     gen_helper_memory_to_s(dest, tmp32);
295 }
296 
297 static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
298 {
299     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
300 }
301 
302 static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
303                         void (*func)(DisasContext *, TCGv, TCGv))
304 {
305     /* Loads to $f31 are prefetches, which we can treat as nops. */
306     if (likely(ra != 31)) {
307         TCGv addr = tcg_temp_new();
308         tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
309         func(ctx, cpu_fir[ra], addr);
310     }
311 }
312 
313 static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
314                          MemOp op, bool clear, bool locked)
315 {
316     TCGv addr, dest;
317 
318     /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
319        prefetches, which we can treat as nops.  No worries about
320        missed exceptions here.  */
321     if (unlikely(ra == 31)) {
322         return;
323     }
324 
325     addr = tcg_temp_new();
326     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
327     if (clear) {
328         tcg_gen_andi_i64(addr, addr, ~0x7);
329     } else if (!locked) {
330         op |= UNALIGN(ctx);
331     }
332 
333     dest = ctx->ir[ra];
334     tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);
335 
336     if (locked) {
337         tcg_gen_mov_i64(cpu_lock_addr, addr);
338         tcg_gen_mov_i64(cpu_lock_value, dest);
339     }
340 }
341 
342 static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
343 {
344     TCGv_i32 tmp32 = tcg_temp_new_i32();
345     gen_helper_f_to_memory(tmp32, addr);
346     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
347 }
348 
349 static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
350 {
351     TCGv tmp = tcg_temp_new();
352     gen_helper_g_to_memory(tmp, src);
353     tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
354 }
355 
356 static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
357 {
358     TCGv_i32 tmp32 = tcg_temp_new_i32();
359     gen_helper_s_to_memory(tmp32, src);
360     tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
361 }
362 
363 static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
364 {
365     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
366 }
367 
368 static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
369                          void (*func)(DisasContext *, TCGv, TCGv))
370 {
371     TCGv addr = tcg_temp_new();
372     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
373     func(ctx, load_fpr(ctx, ra), addr);
374 }
375 
376 static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
377                           MemOp op, bool clear)
378 {
379     TCGv addr, src;
380 
381     addr = tcg_temp_new();
382     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
383     if (clear) {
384         tcg_gen_andi_i64(addr, addr, ~0x7);
385     } else {
386         op |= UNALIGN(ctx);
387     }
388 
389     src = load_gpr(ctx, ra);
390     tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
391 }
392 
393 static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
394                                            int32_t disp16, int mem_idx,
395                                            MemOp op)
396 {
397     TCGLabel *lab_fail, *lab_done;
398     TCGv addr, val;
399 
400     addr = tcg_temp_new_i64();
401     tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
402     free_context_temps(ctx);
403 
404     lab_fail = gen_new_label();
405     lab_done = gen_new_label();
406     tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
407 
408     val = tcg_temp_new_i64();
409     tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
410                                load_gpr(ctx, ra), mem_idx, op);
411     free_context_temps(ctx);
412 
413     if (ra != 31) {
414         tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
415     }
416     tcg_gen_br(lab_done);
417 
418     gen_set_label(lab_fail);
419     if (ra != 31) {
420         tcg_gen_movi_i64(ctx->ir[ra], 0);
421     }
422 
423     gen_set_label(lab_done);
424     tcg_gen_movi_i64(cpu_lock_addr, -1);
425     return DISAS_NEXT;
426 }
427 
428 static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
429 {
430     return translator_use_goto_tb(&ctx->base, dest);
431 }
432 
433 static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
434 {
435     uint64_t dest = ctx->base.pc_next + (disp << 2);
436 
437     if (ra != 31) {
438         tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
439     }
440 
441     /* Notice branch-to-next; used to initialize RA with the PC.  */
442     if (disp == 0) {
443         return 0;
444     } else if (use_goto_tb(ctx, dest)) {
445         tcg_gen_goto_tb(0);
446         tcg_gen_movi_i64(cpu_pc, dest);
447         tcg_gen_exit_tb(ctx->base.tb, 0);
448         return DISAS_NORETURN;
449     } else {
450         tcg_gen_movi_i64(cpu_pc, dest);
451         return DISAS_PC_UPDATED;
452     }
453 }
454 
455 static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
456                                         TCGv cmp, uint64_t imm, int32_t disp)
457 {
458     uint64_t dest = ctx->base.pc_next + (disp << 2);
459     TCGLabel *lab_true = gen_new_label();
460 
461     if (use_goto_tb(ctx, dest)) {
462         tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);
463 
464         tcg_gen_goto_tb(0);
465         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
466         tcg_gen_exit_tb(ctx->base.tb, 0);
467 
468         gen_set_label(lab_true);
469         tcg_gen_goto_tb(1);
470         tcg_gen_movi_i64(cpu_pc, dest);
471         tcg_gen_exit_tb(ctx->base.tb, 1);
472 
473         return DISAS_NORETURN;
474     } else {
475         TCGv_i64 i = tcg_constant_i64(imm);
476         TCGv_i64 d = tcg_constant_i64(dest);
477         TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
478 
479         tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p);
480         return DISAS_PC_UPDATED;
481     }
482 }
483 
484 static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
485                                int32_t disp)
486 {
487     return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
488                               is_tst_cond(cond), disp);
489 }
490 
491 /* Fold -0.0 for comparison with COND.  */
492 
493 static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
494 {
495     TCGv_i64 tmp;
496 
497     *pimm = 0;
498     switch (*pcond) {
499     case TCG_COND_LE:
500     case TCG_COND_GT:
501         /* For <= or >, the -0.0 value directly compares the way we want.  */
502         return src;
503 
504     case TCG_COND_EQ:
505     case TCG_COND_NE:
506         /* For == or !=, we can compare without the sign bit. */
507         *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
508         *pimm = INT64_MAX;
509         return src;
510 
511     case TCG_COND_GE:
512     case TCG_COND_LT:
513         /* For >= or <, map -0.0 to +0.0. */
514         tmp = tcg_temp_new_i64();
515         tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
516                             src, tcg_constant_i64(INT64_MIN),
517                             tcg_constant_i64(0), src);
518         return tmp;
519 
520     default:
521         g_assert_not_reached();
522     }
523 }
524 
525 static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
526                                 int32_t disp)
527 {
528     uint64_t imm;
529     TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
530     return gen_bcond_internal(ctx, cond, tmp, imm, disp);
531 }
532 
533 static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
534 {
535     uint64_t imm;
536     TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
537     tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
538                         tmp, tcg_constant_i64(imm),
539                         load_fpr(ctx, rb), load_fpr(ctx, rc));
540 }
541 
542 #define QUAL_RM_N       0x080   /* Round mode nearest even */
543 #define QUAL_RM_C       0x000   /* Round mode chopped */
544 #define QUAL_RM_M       0x040   /* Round mode minus infinity */
545 #define QUAL_RM_D       0x0c0   /* Round mode dynamic */
546 #define QUAL_RM_MASK    0x0c0
547 
548 #define QUAL_U          0x100   /* Underflow enable (fp output) */
549 #define QUAL_V          0x100   /* Overflow enable (int output) */
550 #define QUAL_S          0x400   /* Software completion enable */
551 #define QUAL_I          0x200   /* Inexact detection enable */
552 
553 static void gen_qual_roundmode(DisasContext *ctx, int fn11)
554 {
555     TCGv_i32 tmp;
556 
557     fn11 &= QUAL_RM_MASK;
558     if (fn11 == ctx->tb_rm) {
559         return;
560     }
561     ctx->tb_rm = fn11;
562 
563     tmp = tcg_temp_new_i32();
564     switch (fn11) {
565     case QUAL_RM_N:
566         tcg_gen_movi_i32(tmp, float_round_nearest_even);
567         break;
568     case QUAL_RM_C:
569         tcg_gen_movi_i32(tmp, float_round_to_zero);
570         break;
571     case QUAL_RM_M:
572         tcg_gen_movi_i32(tmp, float_round_down);
573         break;
574     case QUAL_RM_D:
575         tcg_gen_ld8u_i32(tmp, tcg_env,
576                          offsetof(CPUAlphaState, fpcr_dyn_round));
577         break;
578     }
579 
580 #if defined(CONFIG_SOFTFLOAT_INLINE)
581     /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
582        With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
583        sets the one field.  */
584     tcg_gen_st8_i32(tmp, tcg_env,
585                     offsetof(CPUAlphaState, fp_status.float_rounding_mode));
586 #else
587     gen_helper_setroundmode(tmp);
588 #endif
589 }
590 
591 static void gen_qual_flushzero(DisasContext *ctx, int fn11)
592 {
593     TCGv_i32 tmp;
594 
595     fn11 &= QUAL_U;
596     if (fn11 == ctx->tb_ftz) {
597         return;
598     }
599     ctx->tb_ftz = fn11;
600 
601     tmp = tcg_temp_new_i32();
602     if (fn11) {
603         /* Underflow is enabled, use the FPCR setting.  */
604         tcg_gen_ld8u_i32(tmp, tcg_env,
605                          offsetof(CPUAlphaState, fpcr_flush_to_zero));
606     } else {
607         /* Underflow is disabled, force flush-to-zero.  */
608         tcg_gen_movi_i32(tmp, 1);
609     }
610 
611 #if defined(CONFIG_SOFTFLOAT_INLINE)
612     tcg_gen_st8_i32(tmp, tcg_env,
613                     offsetof(CPUAlphaState, fp_status.flush_to_zero));
614 #else
615     gen_helper_setflushzero(tmp);
616 #endif
617 }
618 
619 static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
620 {
621     TCGv val;
622 
623     if (unlikely(reg == 31)) {
624         val = load_zero(ctx);
625     } else {
626         val = cpu_fir[reg];
627         if ((fn11 & QUAL_S) == 0) {
628             if (is_cmp) {
629                 gen_helper_ieee_input_cmp(tcg_env, val);
630             } else {
631                 gen_helper_ieee_input(tcg_env, val);
632             }
633         } else {
634 #ifndef CONFIG_USER_ONLY
635             /* In system mode, raise exceptions for denormals like real
636                hardware.  In user mode, proceed as if the OS completion
637                handler is handling the denormal as per spec.  */
638             gen_helper_ieee_input_s(tcg_env, val);
639 #endif
640         }
641     }
642     return val;
643 }
644 
645 static void gen_fp_exc_raise(int rc, int fn11)
646 {
647     /* ??? We ought to be able to do something with imprecise exceptions.
648        E.g. notice we're still in the trap shadow of something within the
649        TB and do not generate the code to signal the exception; end the TB
650        when an exception is forced to arrive, either by consumption of a
651        register value or TRAPB or EXCB.  */
652     TCGv_i32 reg, ign;
653     uint32_t ignore = 0;
654 
655     if (!(fn11 & QUAL_U)) {
656         /* Note that QUAL_U == QUAL_V, so ignore either.  */
657         ignore |= FPCR_UNF | FPCR_IOV;
658     }
659     if (!(fn11 & QUAL_I)) {
660         ignore |= FPCR_INE;
661     }
662     ign = tcg_constant_i32(ignore);
663 
664     /* ??? Pass in the regno of the destination so that the helper can
665        set EXC_MASK, which contains a bitmask of destination registers
666        that have caused arithmetic traps.  A simple userspace emulation
667        does not require this.  We do need it for a guest kernel's entArith,
668        or if we were to do something clever with imprecise exceptions.  */
669     reg = tcg_constant_i32(rc + 32);
670     if (fn11 & QUAL_S) {
671         gen_helper_fp_exc_raise_s(tcg_env, ign, reg);
672     } else {
673         gen_helper_fp_exc_raise(tcg_env, ign, reg);
674     }
675 }
676 
677 static void gen_cvtlq(TCGv vc, TCGv vb)
678 {
679     TCGv tmp = tcg_temp_new();
680 
681     /* The arithmetic right shift here, plus the sign-extended mask below
682        yields a sign-extended result without an explicit ext32s_i64.  */
683     tcg_gen_shri_i64(tmp, vb, 29);
684     tcg_gen_sari_i64(vc, vb, 32);
685     tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
686 }
687 
688 static void gen_ieee_arith2(DisasContext *ctx,
689                             void (*helper)(TCGv, TCGv_ptr, TCGv),
690                             int rb, int rc, int fn11)
691 {
692     TCGv vb;
693 
694     gen_qual_roundmode(ctx, fn11);
695     gen_qual_flushzero(ctx, fn11);
696 
697     vb = gen_ieee_input(ctx, rb, fn11, 0);
698     helper(dest_fpr(ctx, rc), tcg_env, vb);
699 
700     gen_fp_exc_raise(rc, fn11);
701 }
702 
703 #define IEEE_ARITH2(name)                                       \
704 static inline void glue(gen_, name)(DisasContext *ctx,          \
705                                     int rb, int rc, int fn11)   \
706 {                                                               \
707     gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
708 }
709 IEEE_ARITH2(sqrts)
710 IEEE_ARITH2(sqrtt)
711 IEEE_ARITH2(cvtst)
712 IEEE_ARITH2(cvtts)
713 
714 static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
715 {
716     TCGv vb, vc;
717 
718     /* No need to set flushzero, since we have an integer output.  */
719     vb = gen_ieee_input(ctx, rb, fn11, 0);
720     vc = dest_fpr(ctx, rc);
721 
722     /* Almost all integer conversions use cropped rounding;
723        special case that.  */
724     if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
725         gen_helper_cvttq_c(vc, tcg_env, vb);
726     } else {
727         gen_qual_roundmode(ctx, fn11);
728         gen_helper_cvttq(vc, tcg_env, vb);
729     }
730     gen_fp_exc_raise(rc, fn11);
731 }
732 
733 static void gen_ieee_intcvt(DisasContext *ctx,
734                             void (*helper)(TCGv, TCGv_ptr, TCGv),
735                             int rb, int rc, int fn11)
736 {
737     TCGv vb, vc;
738 
739     gen_qual_roundmode(ctx, fn11);
740     vb = load_fpr(ctx, rb);
741     vc = dest_fpr(ctx, rc);
742 
743     /* The only exception that can be raised by integer conversion
744        is inexact.  Thus we only need to worry about exceptions when
745        inexact handling is requested.  */
746     if (fn11 & QUAL_I) {
747         helper(vc, tcg_env, vb);
748         gen_fp_exc_raise(rc, fn11);
749     } else {
750         helper(vc, tcg_env, vb);
751     }
752 }
753 
754 #define IEEE_INTCVT(name)                                       \
755 static inline void glue(gen_, name)(DisasContext *ctx,          \
756                                     int rb, int rc, int fn11)   \
757 {                                                               \
758     gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
759 }
760 IEEE_INTCVT(cvtqs)
761 IEEE_INTCVT(cvtqt)
762 
763 static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
764 {
765     TCGv vmask = tcg_constant_i64(mask);
766     TCGv tmp = tcg_temp_new_i64();
767 
768     if (inv_a) {
769         tcg_gen_andc_i64(tmp, vmask, va);
770     } else {
771         tcg_gen_and_i64(tmp, va, vmask);
772     }
773 
774     tcg_gen_andc_i64(vc, vb, vmask);
775     tcg_gen_or_i64(vc, vc, tmp);
776 }
777 
778 static void gen_ieee_arith3(DisasContext *ctx,
779                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
780                             int ra, int rb, int rc, int fn11)
781 {
782     TCGv va, vb, vc;
783 
784     gen_qual_roundmode(ctx, fn11);
785     gen_qual_flushzero(ctx, fn11);
786 
787     va = gen_ieee_input(ctx, ra, fn11, 0);
788     vb = gen_ieee_input(ctx, rb, fn11, 0);
789     vc = dest_fpr(ctx, rc);
790     helper(vc, tcg_env, va, vb);
791 
792     gen_fp_exc_raise(rc, fn11);
793 }
794 
795 #define IEEE_ARITH3(name)                                               \
796 static inline void glue(gen_, name)(DisasContext *ctx,                  \
797                                     int ra, int rb, int rc, int fn11)   \
798 {                                                                       \
799     gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
800 }
801 IEEE_ARITH3(adds)
802 IEEE_ARITH3(subs)
803 IEEE_ARITH3(muls)
804 IEEE_ARITH3(divs)
805 IEEE_ARITH3(addt)
806 IEEE_ARITH3(subt)
807 IEEE_ARITH3(mult)
808 IEEE_ARITH3(divt)
809 
810 static void gen_ieee_compare(DisasContext *ctx,
811                              void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
812                              int ra, int rb, int rc, int fn11)
813 {
814     TCGv va, vb, vc;
815 
816     va = gen_ieee_input(ctx, ra, fn11, 1);
817     vb = gen_ieee_input(ctx, rb, fn11, 1);
818     vc = dest_fpr(ctx, rc);
819     helper(vc, tcg_env, va, vb);
820 
821     gen_fp_exc_raise(rc, fn11);
822 }
823 
824 #define IEEE_CMP3(name)                                                 \
825 static inline void glue(gen_, name)(DisasContext *ctx,                  \
826                                     int ra, int rb, int rc, int fn11)   \
827 {                                                                       \
828     gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
829 }
830 IEEE_CMP3(cmptun)
831 IEEE_CMP3(cmpteq)
832 IEEE_CMP3(cmptlt)
833 IEEE_CMP3(cmptle)
834 
835 static inline uint64_t zapnot_mask(uint8_t lit)
836 {
837     uint64_t mask = 0;
838     int i;
839 
840     for (i = 0; i < 8; ++i) {
841         if ((lit >> i) & 1) {
842             mask |= 0xffull << (i * 8);
843         }
844     }
845     return mask;
846 }
847 
848 /* Implement zapnot with an immediate operand, which expands to some
849    form of immediate AND.  This is a basic building block in the
850    definition of many of the other byte manipulation instructions.  */
851 static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
852 {
853     switch (lit) {
854     case 0x00:
855         tcg_gen_movi_i64(dest, 0);
856         break;
857     case 0x01:
858         tcg_gen_ext8u_i64(dest, src);
859         break;
860     case 0x03:
861         tcg_gen_ext16u_i64(dest, src);
862         break;
863     case 0x0f:
864         tcg_gen_ext32u_i64(dest, src);
865         break;
866     case 0xff:
867         tcg_gen_mov_i64(dest, src);
868         break;
869     default:
870         tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
871         break;
872     }
873 }
874 
875 /* EXTWH, EXTLH, EXTQH */
876 static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
877                       uint8_t lit, uint8_t byte_mask)
878 {
879     if (islit) {
880         int pos = (64 - lit * 8) & 0x3f;
881         int len = cto32(byte_mask) * 8;
882         if (pos < len) {
883             tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
884         } else {
885             tcg_gen_movi_i64(vc, 0);
886         }
887     } else {
888         TCGv tmp = tcg_temp_new();
889         tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
890         tcg_gen_neg_i64(tmp, tmp);
891         tcg_gen_andi_i64(tmp, tmp, 0x3f);
892         tcg_gen_shl_i64(vc, va, tmp);
893     }
894     gen_zapnoti(vc, vc, byte_mask);
895 }
896 
897 /* EXTBL, EXTWL, EXTLL, EXTQL */
898 static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
899                       uint8_t lit, uint8_t byte_mask)
900 {
901     if (islit) {
902         int pos = (lit & 7) * 8;
903         int len = cto32(byte_mask) * 8;
904         if (pos + len >= 64) {
905             len = 64 - pos;
906         }
907         tcg_gen_extract_i64(vc, va, pos, len);
908     } else {
909         TCGv tmp = tcg_temp_new();
910         tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
911         tcg_gen_shli_i64(tmp, tmp, 3);
912         tcg_gen_shr_i64(vc, va, tmp);
913         gen_zapnoti(vc, vc, byte_mask);
914     }
915 }
916 
917 /* INSWH, INSLH, INSQH */
918 static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
919                       uint8_t lit, uint8_t byte_mask)
920 {
921     if (islit) {
922         int pos = 64 - (lit & 7) * 8;
923         int len = cto32(byte_mask) * 8;
924         if (pos < len) {
925             tcg_gen_extract_i64(vc, va, pos, len - pos);
926         } else {
927             tcg_gen_movi_i64(vc, 0);
928         }
929     } else {
930         TCGv tmp = tcg_temp_new();
931         TCGv shift = tcg_temp_new();
932 
933         /* The instruction description has us left-shift the byte mask
934            and extract bits <15:8> and apply that zap at the end.  This
935            is equivalent to simply performing the zap first and shifting
936            afterward.  */
937         gen_zapnoti(tmp, va, byte_mask);
938 
939         /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
940            portably by splitting the shift into two parts: shift_count-1 and 1.
941            Arrange for the -1 by using ones-complement instead of
942            twos-complement in the negation: ~(B * 8) & 63.  */
943 
944         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
945         tcg_gen_not_i64(shift, shift);
946         tcg_gen_andi_i64(shift, shift, 0x3f);
947 
948         tcg_gen_shr_i64(vc, tmp, shift);
949         tcg_gen_shri_i64(vc, vc, 1);
950     }
951 }
952 
953 /* INSBL, INSWL, INSLL, INSQL */
954 static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
955                       uint8_t lit, uint8_t byte_mask)
956 {
957     if (islit) {
958         int pos = (lit & 7) * 8;
959         int len = cto32(byte_mask) * 8;
960         if (pos + len > 64) {
961             len = 64 - pos;
962         }
963         tcg_gen_deposit_z_i64(vc, va, pos, len);
964     } else {
965         TCGv tmp = tcg_temp_new();
966         TCGv shift = tcg_temp_new();
967 
968         /* The instruction description has us left-shift the byte mask
969            and extract bits <15:8> and apply that zap at the end.  This
970            is equivalent to simply performing the zap first and shifting
971            afterward.  */
972         gen_zapnoti(tmp, va, byte_mask);
973 
974         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
975         tcg_gen_shli_i64(shift, shift, 3);
976         tcg_gen_shl_i64(vc, tmp, shift);
977     }
978 }
979 
980 /* MSKWH, MSKLH, MSKQH */
981 static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
982                       uint8_t lit, uint8_t byte_mask)
983 {
984     if (islit) {
985         gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
986     } else {
987         TCGv shift = tcg_temp_new();
988         TCGv mask = tcg_temp_new();
989 
990         /* The instruction description is as above, where the byte_mask
991            is shifted left, and then we extract bits <15:8>.  This can be
992            emulated with a right-shift on the expanded byte mask.  This
993            requires extra care because for an input <2:0> == 0 we need a
994            shift of 64 bits in order to generate a zero.  This is done by
995            splitting the shift into two parts, the variable shift - 1
996            followed by a constant 1 shift.  The code we expand below is
997            equivalent to ~(B * 8) & 63.  */
998 
999         tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
1000         tcg_gen_not_i64(shift, shift);
1001         tcg_gen_andi_i64(shift, shift, 0x3f);
1002         tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
1003         tcg_gen_shr_i64(mask, mask, shift);
1004         tcg_gen_shri_i64(mask, mask, 1);
1005 
1006         tcg_gen_andc_i64(vc, va, mask);
1007     }
1008 }
1009 
1010 /* MSKBL, MSKWL, MSKLL, MSKQL */
1011 static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
1012                       uint8_t lit, uint8_t byte_mask)
1013 {
1014     if (islit) {
1015         gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
1016     } else {
1017         TCGv shift = tcg_temp_new();
1018         TCGv mask = tcg_temp_new();
1019 
1020         tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
1021         tcg_gen_shli_i64(shift, shift, 3);
1022         tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
1023         tcg_gen_shl_i64(mask, mask, shift);
1024 
1025         tcg_gen_andc_i64(vc, va, mask);
1026     }
1027 }
1028 
1029 static void gen_rx(DisasContext *ctx, int ra, int set)
1030 {
1031     if (ra != 31) {
1032         ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
1033     }
1034 
1035     st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
1036 }
1037 
1038 static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
1039 {
1040     /* We're emulating OSF/1 PALcode.  Many of these are trivial access
1041        to internal cpu registers.  */
1042 
1043     /* Unprivileged PAL call */
1044     if (palcode >= 0x80 && palcode < 0xC0) {
1045         switch (palcode) {
1046         case 0x86:
1047             /* IMB */
1048             /* No-op inside QEMU.  */
1049             break;
1050         case 0x9E:
1051             /* RDUNIQUE */
1052             tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1053                            offsetof(CPUAlphaState, unique));
1054             break;
1055         case 0x9F:
1056             /* WRUNIQUE */
1057             tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1058                            offsetof(CPUAlphaState, unique));
1059             break;
1060         default:
1061             palcode &= 0xbf;
1062             goto do_call_pal;
1063         }
1064         return DISAS_NEXT;
1065     }
1066 
1067 #ifndef CONFIG_USER_ONLY
1068     /* Privileged PAL code */
1069     if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
1070         switch (palcode) {
1071         case 0x01:
1072             /* CFLUSH */
1073             /* No-op inside QEMU.  */
1074             break;
1075         case 0x02:
1076             /* DRAINA */
1077             /* No-op inside QEMU.  */
1078             break;
1079         case 0x2D:
1080             /* WRVPTPTR */
1081             tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1082                            offsetof(CPUAlphaState, vptptr));
1083             break;
1084         case 0x31:
1085             /* WRVAL */
1086             tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1087                            offsetof(CPUAlphaState, sysval));
1088             break;
1089         case 0x32:
1090             /* RDVAL */
1091             tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1092                            offsetof(CPUAlphaState, sysval));
1093             break;
1094 
1095         case 0x35:
1096             /* SWPIPL */
1097             /* Note that we already know we're in kernel mode, so we know
1098                that PS only contains the 3 IPL bits.  */
1099             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1100 
1101             /* But make sure and store only the 3 IPL bits from the user.  */
1102             {
1103                 TCGv tmp = tcg_temp_new();
1104                 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
1105                 st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
1106             }
1107 
1108             /* Allow interrupts to be recognized right away.  */
1109             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
1110             return DISAS_PC_UPDATED_NOCHAIN;
1111 
1112         case 0x36:
1113             /* RDPS */
1114             ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
1115             break;
1116 
1117         case 0x38:
1118             /* WRUSP */
1119             tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
1120                            offsetof(CPUAlphaState, usp));
1121             break;
1122         case 0x3A:
1123             /* RDUSP */
1124             tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
1125                            offsetof(CPUAlphaState, usp));
1126             break;
1127         case 0x3C:
1128             /* WHAMI */
1129             tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
1130                 -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
1131             break;
1132 
1133         case 0x3E:
1134             /* WTINT */
1135             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1136                            -offsetof(AlphaCPU, env) +
1137                            offsetof(CPUState, halted));
1138             tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
1139             return gen_excp(ctx, EXCP_HALTED, 0);
1140 
1141         default:
1142             palcode &= 0x3f;
1143             goto do_call_pal;
1144         }
1145         return DISAS_NEXT;
1146     }
1147 #endif
1148     return gen_invalid(ctx);
1149 
1150  do_call_pal:
1151 #ifdef CONFIG_USER_ONLY
1152     return gen_excp(ctx, EXCP_CALL_PAL, palcode);
1153 #else
1154     {
1155         TCGv tmp = tcg_temp_new();
1156         uint64_t exc_addr = ctx->base.pc_next;
1157         uint64_t entry = ctx->palbr;
1158 
1159         if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
1160             exc_addr |= 1;
1161         } else {
1162             tcg_gen_movi_i64(tmp, 1);
1163             st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
1164         }
1165 
1166         tcg_gen_movi_i64(tmp, exc_addr);
1167         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr));
1168 
1169         entry += (palcode & 0x80
1170                   ? 0x2000 + (palcode - 0x80) * 64
1171                   : 0x1000 + palcode * 64);
1172 
1173         tcg_gen_movi_i64(cpu_pc, entry);
1174         return DISAS_PC_UPDATED;
1175     }
1176 #endif
1177 }
1178 
1179 #ifndef CONFIG_USER_ONLY
1180 
1181 #define PR_LONG         0x200000
1182 
1183 static int cpu_pr_data(int pr)
1184 {
1185     switch (pr) {
1186     case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
1187     case  3: return offsetof(CPUAlphaState, trap_arg0);
1188     case  4: return offsetof(CPUAlphaState, trap_arg1);
1189     case  5: return offsetof(CPUAlphaState, trap_arg2);
1190     case  6: return offsetof(CPUAlphaState, exc_addr);
1191     case  7: return offsetof(CPUAlphaState, palbr);
1192     case  8: return offsetof(CPUAlphaState, ptbr);
1193     case  9: return offsetof(CPUAlphaState, vptptr);
1194     case 10: return offsetof(CPUAlphaState, unique);
1195     case 11: return offsetof(CPUAlphaState, sysval);
1196     case 12: return offsetof(CPUAlphaState, usp);
1197 
1198     case 40 ... 63:
1199         return offsetof(CPUAlphaState, scratch[pr - 40]);
1200 
1201     case 251:
1202         return offsetof(CPUAlphaState, alarm_expire);
1203     }
1204     return 0;
1205 }
1206 
1207 static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
1208 {
1209     void (*helper)(TCGv);
1210     int data;
1211 
1212     switch (regno) {
1213     case 32 ... 39:
1214         /* Accessing the "non-shadow" general registers.  */
1215         regno = regno == 39 ? 25 : regno - 32 + 8;
1216         tcg_gen_mov_i64(va, cpu_std_ir[regno]);
1217         break;
1218 
1219     case 250: /* WALLTIME */
1220         helper = gen_helper_get_walltime;
1221         goto do_helper;
1222     case 249: /* VMTIME */
1223         helper = gen_helper_get_vmtime;
1224     do_helper:
1225         if (translator_io_start(&ctx->base)) {
1226             helper(va);
1227             return DISAS_PC_STALE;
1228         } else {
1229             helper(va);
1230         }
1231         break;
1232 
1233     case 0: /* PS */
1234         ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
1235         break;
1236     case 1: /* FEN */
1237         ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
1238         break;
1239 
1240     default:
1241         /* The basic registers are data only, and unknown registers
1242            are read-zero, write-ignore.  */
1243         data = cpu_pr_data(regno);
1244         if (data == 0) {
1245             tcg_gen_movi_i64(va, 0);
1246         } else if (data & PR_LONG) {
1247             tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG);
1248         } else {
1249             tcg_gen_ld_i64(va, tcg_env, data);
1250         }
1251         break;
1252     }
1253 
1254     return DISAS_NEXT;
1255 }
1256 
1257 static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
1258 {
1259     int data;
1260     DisasJumpType ret = DISAS_NEXT;
1261 
1262     switch (regno) {
1263     case 255:
1264         /* TBIA */
1265         gen_helper_tbia(tcg_env);
1266         break;
1267 
1268     case 254:
1269         /* TBIS */
1270         gen_helper_tbis(tcg_env, vb);
1271         break;
1272 
1273     case 253:
1274         /* WAIT */
1275         tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
1276                        -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
1277         return gen_excp(ctx, EXCP_HALTED, 0);
1278 
1279     case 252:
1280         /* HALT */
1281         gen_helper_halt(vb);
1282         return DISAS_PC_STALE;
1283 
1284     case 251:
1285         /* ALARM */
1286         if (translator_io_start(&ctx->base)) {
1287             ret = DISAS_PC_STALE;
1288         }
1289         gen_helper_set_alarm(tcg_env, vb);
1290         break;
1291 
1292     case 7:
1293         /* PALBR */
1294         tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr));
1295         /* Changing the PAL base register implies un-chaining all of the TBs
1296            that ended with a CALL_PAL.  Since the base register usually only
1297            changes during boot, flushing everything works well.  */
1298         gen_helper_tb_flush(tcg_env);
1299         return DISAS_PC_STALE;
1300 
1301     case 32 ... 39:
1302         /* Accessing the "non-shadow" general registers.  */
1303         regno = regno == 39 ? 25 : regno - 32 + 8;
1304         tcg_gen_mov_i64(cpu_std_ir[regno], vb);
1305         break;
1306 
1307     case 0: /* PS */
1308         st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
1309         break;
1310     case 1: /* FEN */
1311         st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
1312         break;
1313 
1314     default:
1315         /* The basic registers are data only, and unknown registers
1316            are read-zero, write-ignore.  */
1317         data = cpu_pr_data(regno);
1318         if (data != 0) {
1319             if (data & PR_LONG) {
1320                 tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG);
1321             } else {
1322                 tcg_gen_st_i64(vb, tcg_env, data);
1323             }
1324         }
1325         break;
1326     }
1327 
1328     return ret;
1329 }
1330 #endif /* !USER_ONLY*/
1331 
1332 #define REQUIRE_NO_LIT                          \
1333     do {                                        \
1334         if (real_islit) {                       \
1335             goto invalid_opc;                   \
1336         }                                       \
1337     } while (0)
1338 
1339 #define REQUIRE_AMASK(FLAG)                     \
1340     do {                                        \
1341         if ((ctx->amask & AMASK_##FLAG) == 0) { \
1342             goto invalid_opc;                   \
1343         }                                       \
1344     } while (0)
1345 
1346 #define REQUIRE_TB_FLAG(FLAG)                   \
1347     do {                                        \
1348         if ((ctx->tbflags & (FLAG)) == 0) {     \
1349             goto invalid_opc;                   \
1350         }                                       \
1351     } while (0)
1352 
1353 #define REQUIRE_REG_31(WHICH)                   \
1354     do {                                        \
1355         if (WHICH != 31) {                      \
1356             goto invalid_opc;                   \
1357         }                                       \
1358     } while (0)
1359 
1360 #define REQUIRE_FEN                             \
1361     do {                                        \
1362         if (!(ctx->tbflags & ENV_FLAG_FEN)) {   \
1363             goto raise_fen;                     \
1364         }                                       \
1365     } while (0)
1366 
1367 static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
1368 {
1369     int32_t disp21, disp16, disp12 __attribute__((unused));
1370     uint16_t fn11;
1371     uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
1372     bool islit, real_islit;
1373     TCGv va, vb, vc, tmp, tmp2;
1374     TCGv_i32 t32;
1375     DisasJumpType ret;
1376 
1377     /* Decode all instruction fields */
1378     opc = extract32(insn, 26, 6);
1379     ra = extract32(insn, 21, 5);
1380     rb = extract32(insn, 16, 5);
1381     rc = extract32(insn, 0, 5);
1382     real_islit = islit = extract32(insn, 12, 1);
1383     lit = extract32(insn, 13, 8);
1384 
1385     disp21 = sextract32(insn, 0, 21);
1386     disp16 = sextract32(insn, 0, 16);
1387     disp12 = sextract32(insn, 0, 12);
1388 
1389     fn11 = extract32(insn, 5, 11);
1390     fpfn = extract32(insn, 5, 6);
1391     fn7 = extract32(insn, 5, 7);
1392 
1393     if (rb == 31 && !islit) {
1394         islit = true;
1395         lit = 0;
1396     }
1397 
1398     ret = DISAS_NEXT;
1399     switch (opc) {
1400     case 0x00:
1401         /* CALL_PAL */
1402         ret = gen_call_pal(ctx, insn & 0x03ffffff);
1403         break;
1404     case 0x01:
1405         /* OPC01 */
1406         goto invalid_opc;
1407     case 0x02:
1408         /* OPC02 */
1409         goto invalid_opc;
1410     case 0x03:
1411         /* OPC03 */
1412         goto invalid_opc;
1413     case 0x04:
1414         /* OPC04 */
1415         goto invalid_opc;
1416     case 0x05:
1417         /* OPC05 */
1418         goto invalid_opc;
1419     case 0x06:
1420         /* OPC06 */
1421         goto invalid_opc;
1422     case 0x07:
1423         /* OPC07 */
1424         goto invalid_opc;
1425 
1426     case 0x09:
1427         /* LDAH */
1428         disp16 = (uint32_t)disp16 << 16;
1429         /* fall through */
1430     case 0x08:
1431         /* LDA */
1432         va = dest_gpr(ctx, ra);
1433         /* It's worth special-casing immediate loads.  */
1434         if (rb == 31) {
1435             tcg_gen_movi_i64(va, disp16);
1436         } else {
1437             tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
1438         }
1439         break;
1440 
1441     case 0x0A:
1442         /* LDBU */
1443         REQUIRE_AMASK(BWX);
1444         gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
1445         break;
1446     case 0x0B:
1447         /* LDQ_U */
1448         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
1449         break;
1450     case 0x0C:
1451         /* LDWU */
1452         REQUIRE_AMASK(BWX);
1453         gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
1454         break;
1455     case 0x0D:
1456         /* STW */
1457         REQUIRE_AMASK(BWX);
1458         gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
1459         break;
1460     case 0x0E:
1461         /* STB */
1462         REQUIRE_AMASK(BWX);
1463         gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
1464         break;
1465     case 0x0F:
1466         /* STQ_U */
1467         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
1468         break;
1469 
1470     case 0x10:
1471         vc = dest_gpr(ctx, rc);
1472         vb = load_gpr_lit(ctx, rb, lit, islit);
1473 
1474         if (ra == 31) {
1475             if (fn7 == 0x00) {
1476                 /* Special case ADDL as SEXTL.  */
1477                 tcg_gen_ext32s_i64(vc, vb);
1478                 break;
1479             }
1480             if (fn7 == 0x29) {
1481                 /* Special case SUBQ as NEGQ.  */
1482                 tcg_gen_neg_i64(vc, vb);
1483                 break;
1484             }
1485         }
1486 
1487         va = load_gpr(ctx, ra);
1488         switch (fn7) {
1489         case 0x00:
1490             /* ADDL */
1491             tcg_gen_add_i64(vc, va, vb);
1492             tcg_gen_ext32s_i64(vc, vc);
1493             break;
1494         case 0x02:
1495             /* S4ADDL */
1496             tmp = tcg_temp_new();
1497             tcg_gen_shli_i64(tmp, va, 2);
1498             tcg_gen_add_i64(tmp, tmp, vb);
1499             tcg_gen_ext32s_i64(vc, tmp);
1500             break;
1501         case 0x09:
1502             /* SUBL */
1503             tcg_gen_sub_i64(vc, va, vb);
1504             tcg_gen_ext32s_i64(vc, vc);
1505             break;
1506         case 0x0B:
1507             /* S4SUBL */
1508             tmp = tcg_temp_new();
1509             tcg_gen_shli_i64(tmp, va, 2);
1510             tcg_gen_sub_i64(tmp, tmp, vb);
1511             tcg_gen_ext32s_i64(vc, tmp);
1512             break;
1513         case 0x0F:
1514             /* CMPBGE */
1515             if (ra == 31) {
1516                 /* Special case 0 >= X as X == 0.  */
1517                 gen_helper_cmpbe0(vc, vb);
1518             } else {
1519                 gen_helper_cmpbge(vc, va, vb);
1520             }
1521             break;
1522         case 0x12:
1523             /* S8ADDL */
1524             tmp = tcg_temp_new();
1525             tcg_gen_shli_i64(tmp, va, 3);
1526             tcg_gen_add_i64(tmp, tmp, vb);
1527             tcg_gen_ext32s_i64(vc, tmp);
1528             break;
1529         case 0x1B:
1530             /* S8SUBL */
1531             tmp = tcg_temp_new();
1532             tcg_gen_shli_i64(tmp, va, 3);
1533             tcg_gen_sub_i64(tmp, tmp, vb);
1534             tcg_gen_ext32s_i64(vc, tmp);
1535             break;
1536         case 0x1D:
1537             /* CMPULT */
1538             tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
1539             break;
1540         case 0x20:
1541             /* ADDQ */
1542             tcg_gen_add_i64(vc, va, vb);
1543             break;
1544         case 0x22:
1545             /* S4ADDQ */
1546             tmp = tcg_temp_new();
1547             tcg_gen_shli_i64(tmp, va, 2);
1548             tcg_gen_add_i64(vc, tmp, vb);
1549             break;
1550         case 0x29:
1551             /* SUBQ */
1552             tcg_gen_sub_i64(vc, va, vb);
1553             break;
1554         case 0x2B:
1555             /* S4SUBQ */
1556             tmp = tcg_temp_new();
1557             tcg_gen_shli_i64(tmp, va, 2);
1558             tcg_gen_sub_i64(vc, tmp, vb);
1559             break;
1560         case 0x2D:
1561             /* CMPEQ */
1562             tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
1563             break;
1564         case 0x32:
1565             /* S8ADDQ */
1566             tmp = tcg_temp_new();
1567             tcg_gen_shli_i64(tmp, va, 3);
1568             tcg_gen_add_i64(vc, tmp, vb);
1569             break;
1570         case 0x3B:
1571             /* S8SUBQ */
1572             tmp = tcg_temp_new();
1573             tcg_gen_shli_i64(tmp, va, 3);
1574             tcg_gen_sub_i64(vc, tmp, vb);
1575             break;
1576         case 0x3D:
1577             /* CMPULE */
1578             tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
1579             break;
1580         case 0x40:
1581             /* ADDL/V */
1582             tmp = tcg_temp_new();
1583             tcg_gen_ext32s_i64(tmp, va);
1584             tcg_gen_ext32s_i64(vc, vb);
1585             tcg_gen_add_i64(tmp, tmp, vc);
1586             tcg_gen_ext32s_i64(vc, tmp);
1587             gen_helper_check_overflow(tcg_env, vc, tmp);
1588             break;
1589         case 0x49:
1590             /* SUBL/V */
1591             tmp = tcg_temp_new();
1592             tcg_gen_ext32s_i64(tmp, va);
1593             tcg_gen_ext32s_i64(vc, vb);
1594             tcg_gen_sub_i64(tmp, tmp, vc);
1595             tcg_gen_ext32s_i64(vc, tmp);
1596             gen_helper_check_overflow(tcg_env, vc, tmp);
1597             break;
1598         case 0x4D:
1599             /* CMPLT */
1600             tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
1601             break;
1602         case 0x60:
1603             /* ADDQ/V */
1604             tmp = tcg_temp_new();
1605             tmp2 = tcg_temp_new();
1606             tcg_gen_eqv_i64(tmp, va, vb);
1607             tcg_gen_mov_i64(tmp2, va);
1608             tcg_gen_add_i64(vc, va, vb);
1609             tcg_gen_xor_i64(tmp2, tmp2, vc);
1610             tcg_gen_and_i64(tmp, tmp, tmp2);
1611             tcg_gen_shri_i64(tmp, tmp, 63);
1612             tcg_gen_movi_i64(tmp2, 0);
1613             gen_helper_check_overflow(tcg_env, tmp, tmp2);
1614             break;
1615         case 0x69:
1616             /* SUBQ/V */
1617             tmp = tcg_temp_new();
1618             tmp2 = tcg_temp_new();
1619             tcg_gen_xor_i64(tmp, va, vb);
1620             tcg_gen_mov_i64(tmp2, va);
1621             tcg_gen_sub_i64(vc, va, vb);
1622             tcg_gen_xor_i64(tmp2, tmp2, vc);
1623             tcg_gen_and_i64(tmp, tmp, tmp2);
1624             tcg_gen_shri_i64(tmp, tmp, 63);
1625             tcg_gen_movi_i64(tmp2, 0);
1626             gen_helper_check_overflow(tcg_env, tmp, tmp2);
1627             break;
1628         case 0x6D:
1629             /* CMPLE */
1630             tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
1631             break;
1632         default:
1633             goto invalid_opc;
1634         }
1635         break;
1636 
1637     case 0x11:
1638         if (fn7 == 0x20) {
1639             if (rc == 31) {
1640                 /* Special case BIS as NOP.  */
1641                 break;
1642             }
1643             if (ra == 31) {
1644                 /* Special case BIS as MOV.  */
1645                 vc = dest_gpr(ctx, rc);
1646                 if (islit) {
1647                     tcg_gen_movi_i64(vc, lit);
1648                 } else {
1649                     tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
1650                 }
1651                 break;
1652             }
1653         }
1654 
1655         vc = dest_gpr(ctx, rc);
1656         vb = load_gpr_lit(ctx, rb, lit, islit);
1657 
1658         if (fn7 == 0x28 && ra == 31) {
1659             /* Special case ORNOT as NOT.  */
1660             tcg_gen_not_i64(vc, vb);
1661             break;
1662         }
1663 
1664         va = load_gpr(ctx, ra);
1665         switch (fn7) {
1666         case 0x00:
1667             /* AND */
1668             tcg_gen_and_i64(vc, va, vb);
1669             break;
1670         case 0x08:
1671             /* BIC */
1672             tcg_gen_andc_i64(vc, va, vb);
1673             break;
1674         case 0x14:
1675             /* CMOVLBS */
1676             tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
1677                                 vb, load_gpr(ctx, rc));
1678             break;
1679         case 0x16:
1680             /* CMOVLBC */
1681             tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
1682                                 vb, load_gpr(ctx, rc));
1683             break;
1684         case 0x20:
1685             /* BIS */
1686             tcg_gen_or_i64(vc, va, vb);
1687             break;
1688         case 0x24:
1689             /* CMOVEQ */
1690             tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
1691                                 vb, load_gpr(ctx, rc));
1692             break;
1693         case 0x26:
1694             /* CMOVNE */
1695             tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
1696                                 vb, load_gpr(ctx, rc));
1697             break;
1698         case 0x28:
1699             /* ORNOT */
1700             tcg_gen_orc_i64(vc, va, vb);
1701             break;
1702         case 0x40:
1703             /* XOR */
1704             tcg_gen_xor_i64(vc, va, vb);
1705             break;
1706         case 0x44:
1707             /* CMOVLT */
1708             tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
1709                                 vb, load_gpr(ctx, rc));
1710             break;
1711         case 0x46:
1712             /* CMOVGE */
1713             tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
1714                                 vb, load_gpr(ctx, rc));
1715             break;
1716         case 0x48:
1717             /* EQV */
1718             tcg_gen_eqv_i64(vc, va, vb);
1719             break;
1720         case 0x61:
1721             /* AMASK */
1722             REQUIRE_REG_31(ra);
1723             tcg_gen_andi_i64(vc, vb, ~ctx->amask);
1724             break;
1725         case 0x64:
1726             /* CMOVLE */
1727             tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
1728                                 vb, load_gpr(ctx, rc));
1729             break;
1730         case 0x66:
1731             /* CMOVGT */
1732             tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
1733                                 vb, load_gpr(ctx, rc));
1734             break;
1735         case 0x6C:
1736             /* IMPLVER */
1737             REQUIRE_REG_31(ra);
1738             tcg_gen_movi_i64(vc, ctx->implver);
1739             break;
1740         default:
1741             goto invalid_opc;
1742         }
1743         break;
1744 
1745     case 0x12:
1746         vc = dest_gpr(ctx, rc);
1747         va = load_gpr(ctx, ra);
1748         switch (fn7) {
1749         case 0x02:
1750             /* MSKBL */
1751             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
1752             break;
1753         case 0x06:
1754             /* EXTBL */
1755             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
1756             break;
1757         case 0x0B:
1758             /* INSBL */
1759             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
1760             break;
1761         case 0x12:
1762             /* MSKWL */
1763             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
1764             break;
1765         case 0x16:
1766             /* EXTWL */
1767             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
1768             break;
1769         case 0x1B:
1770             /* INSWL */
1771             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
1772             break;
1773         case 0x22:
1774             /* MSKLL */
1775             gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
1776             break;
1777         case 0x26:
1778             /* EXTLL */
1779             gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
1780             break;
1781         case 0x2B:
1782             /* INSLL */
1783             gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
1784             break;
1785         case 0x30:
1786             /* ZAP */
1787             if (islit) {
1788                 gen_zapnoti(vc, va, ~lit);
1789             } else {
1790                 gen_helper_zap(vc, va, load_gpr(ctx, rb));
1791             }
1792             break;
1793         case 0x31:
1794             /* ZAPNOT */
1795             if (islit) {
1796                 gen_zapnoti(vc, va, lit);
1797             } else {
1798                 gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
1799             }
1800             break;
1801         case 0x32:
1802             /* MSKQL */
1803             gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
1804             break;
1805         case 0x34:
1806             /* SRL */
1807             if (islit) {
1808                 tcg_gen_shri_i64(vc, va, lit & 0x3f);
1809             } else {
1810                 tmp = tcg_temp_new();
1811                 vb = load_gpr(ctx, rb);
1812                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1813                 tcg_gen_shr_i64(vc, va, tmp);
1814             }
1815             break;
1816         case 0x36:
1817             /* EXTQL */
1818             gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
1819             break;
1820         case 0x39:
1821             /* SLL */
1822             if (islit) {
1823                 tcg_gen_shli_i64(vc, va, lit & 0x3f);
1824             } else {
1825                 tmp = tcg_temp_new();
1826                 vb = load_gpr(ctx, rb);
1827                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1828                 tcg_gen_shl_i64(vc, va, tmp);
1829             }
1830             break;
1831         case 0x3B:
1832             /* INSQL */
1833             gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
1834             break;
1835         case 0x3C:
1836             /* SRA */
1837             if (islit) {
1838                 tcg_gen_sari_i64(vc, va, lit & 0x3f);
1839             } else {
1840                 tmp = tcg_temp_new();
1841                 vb = load_gpr(ctx, rb);
1842                 tcg_gen_andi_i64(tmp, vb, 0x3f);
1843                 tcg_gen_sar_i64(vc, va, tmp);
1844             }
1845             break;
1846         case 0x52:
1847             /* MSKWH */
1848             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
1849             break;
1850         case 0x57:
1851             /* INSWH */
1852             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
1853             break;
1854         case 0x5A:
1855             /* EXTWH */
1856             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
1857             break;
1858         case 0x62:
1859             /* MSKLH */
1860             gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
1861             break;
1862         case 0x67:
1863             /* INSLH */
1864             gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
1865             break;
1866         case 0x6A:
1867             /* EXTLH */
1868             gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
1869             break;
1870         case 0x72:
1871             /* MSKQH */
1872             gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
1873             break;
1874         case 0x77:
1875             /* INSQH */
1876             gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
1877             break;
1878         case 0x7A:
1879             /* EXTQH */
1880             gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
1881             break;
1882         default:
1883             goto invalid_opc;
1884         }
1885         break;
1886 
1887     case 0x13:
1888         vc = dest_gpr(ctx, rc);
1889         vb = load_gpr_lit(ctx, rb, lit, islit);
1890         va = load_gpr(ctx, ra);
1891         switch (fn7) {
1892         case 0x00:
1893             /* MULL */
1894             tcg_gen_mul_i64(vc, va, vb);
1895             tcg_gen_ext32s_i64(vc, vc);
1896             break;
1897         case 0x20:
1898             /* MULQ */
1899             tcg_gen_mul_i64(vc, va, vb);
1900             break;
1901         case 0x30:
1902             /* UMULH */
1903             tmp = tcg_temp_new();
1904             tcg_gen_mulu2_i64(tmp, vc, va, vb);
1905             break;
1906         case 0x40:
1907             /* MULL/V */
1908             tmp = tcg_temp_new();
1909             tcg_gen_ext32s_i64(tmp, va);
1910             tcg_gen_ext32s_i64(vc, vb);
1911             tcg_gen_mul_i64(tmp, tmp, vc);
1912             tcg_gen_ext32s_i64(vc, tmp);
1913             gen_helper_check_overflow(tcg_env, vc, tmp);
1914             break;
1915         case 0x60:
1916             /* MULQ/V */
1917             tmp = tcg_temp_new();
1918             tmp2 = tcg_temp_new();
1919             tcg_gen_muls2_i64(vc, tmp, va, vb);
1920             tcg_gen_sari_i64(tmp2, vc, 63);
1921             gen_helper_check_overflow(tcg_env, tmp, tmp2);
1922             break;
1923         default:
1924             goto invalid_opc;
1925         }
1926         break;
1927 
1928     case 0x14:
1929         REQUIRE_AMASK(FIX);
1930         vc = dest_fpr(ctx, rc);
1931         switch (fpfn) { /* fn11 & 0x3F */
1932         case 0x04:
1933             /* ITOFS */
1934             REQUIRE_REG_31(rb);
1935             REQUIRE_FEN;
1936             t32 = tcg_temp_new_i32();
1937             va = load_gpr(ctx, ra);
1938             tcg_gen_extrl_i64_i32(t32, va);
1939             gen_helper_memory_to_s(vc, t32);
1940             break;
1941         case 0x0A:
1942             /* SQRTF */
1943             REQUIRE_REG_31(ra);
1944             REQUIRE_FEN;
1945             vb = load_fpr(ctx, rb);
1946             gen_helper_sqrtf(vc, tcg_env, vb);
1947             break;
1948         case 0x0B:
1949             /* SQRTS */
1950             REQUIRE_REG_31(ra);
1951             REQUIRE_FEN;
1952             gen_sqrts(ctx, rb, rc, fn11);
1953             break;
1954         case 0x14:
1955             /* ITOFF */
1956             REQUIRE_REG_31(rb);
1957             REQUIRE_FEN;
1958             t32 = tcg_temp_new_i32();
1959             va = load_gpr(ctx, ra);
1960             tcg_gen_extrl_i64_i32(t32, va);
1961             gen_helper_memory_to_f(vc, t32);
1962             break;
1963         case 0x24:
1964             /* ITOFT */
1965             REQUIRE_REG_31(rb);
1966             REQUIRE_FEN;
1967             va = load_gpr(ctx, ra);
1968             tcg_gen_mov_i64(vc, va);
1969             break;
1970         case 0x2A:
1971             /* SQRTG */
1972             REQUIRE_REG_31(ra);
1973             REQUIRE_FEN;
1974             vb = load_fpr(ctx, rb);
1975             gen_helper_sqrtg(vc, tcg_env, vb);
1976             break;
1977         case 0x02B:
1978             /* SQRTT */
1979             REQUIRE_REG_31(ra);
1980             REQUIRE_FEN;
1981             gen_sqrtt(ctx, rb, rc, fn11);
1982             break;
1983         default:
1984             goto invalid_opc;
1985         }
1986         break;
1987 
1988     case 0x15:
1989         /* VAX floating point */
1990         /* XXX: rounding mode and trap are ignored (!) */
1991         vc = dest_fpr(ctx, rc);
1992         vb = load_fpr(ctx, rb);
1993         va = load_fpr(ctx, ra);
1994         switch (fpfn) { /* fn11 & 0x3F */
1995         case 0x00:
1996             /* ADDF */
1997             REQUIRE_FEN;
1998             gen_helper_addf(vc, tcg_env, va, vb);
1999             break;
2000         case 0x01:
2001             /* SUBF */
2002             REQUIRE_FEN;
2003             gen_helper_subf(vc, tcg_env, va, vb);
2004             break;
2005         case 0x02:
2006             /* MULF */
2007             REQUIRE_FEN;
2008             gen_helper_mulf(vc, tcg_env, va, vb);
2009             break;
2010         case 0x03:
2011             /* DIVF */
2012             REQUIRE_FEN;
2013             gen_helper_divf(vc, tcg_env, va, vb);
2014             break;
2015         case 0x1E:
2016             /* CVTDG -- TODO */
2017             REQUIRE_REG_31(ra);
2018             goto invalid_opc;
2019         case 0x20:
2020             /* ADDG */
2021             REQUIRE_FEN;
2022             gen_helper_addg(vc, tcg_env, va, vb);
2023             break;
2024         case 0x21:
2025             /* SUBG */
2026             REQUIRE_FEN;
2027             gen_helper_subg(vc, tcg_env, va, vb);
2028             break;
2029         case 0x22:
2030             /* MULG */
2031             REQUIRE_FEN;
2032             gen_helper_mulg(vc, tcg_env, va, vb);
2033             break;
2034         case 0x23:
2035             /* DIVG */
2036             REQUIRE_FEN;
2037             gen_helper_divg(vc, tcg_env, va, vb);
2038             break;
2039         case 0x25:
2040             /* CMPGEQ */
2041             REQUIRE_FEN;
2042             gen_helper_cmpgeq(vc, tcg_env, va, vb);
2043             break;
2044         case 0x26:
2045             /* CMPGLT */
2046             REQUIRE_FEN;
2047             gen_helper_cmpglt(vc, tcg_env, va, vb);
2048             break;
2049         case 0x27:
2050             /* CMPGLE */
2051             REQUIRE_FEN;
2052             gen_helper_cmpgle(vc, tcg_env, va, vb);
2053             break;
2054         case 0x2C:
2055             /* CVTGF */
2056             REQUIRE_REG_31(ra);
2057             REQUIRE_FEN;
2058             gen_helper_cvtgf(vc, tcg_env, vb);
2059             break;
2060         case 0x2D:
2061             /* CVTGD -- TODO */
2062             REQUIRE_REG_31(ra);
2063             goto invalid_opc;
2064         case 0x2F:
2065             /* CVTGQ */
2066             REQUIRE_REG_31(ra);
2067             REQUIRE_FEN;
2068             gen_helper_cvtgq(vc, tcg_env, vb);
2069             break;
2070         case 0x3C:
2071             /* CVTQF */
2072             REQUIRE_REG_31(ra);
2073             REQUIRE_FEN;
2074             gen_helper_cvtqf(vc, tcg_env, vb);
2075             break;
2076         case 0x3E:
2077             /* CVTQG */
2078             REQUIRE_REG_31(ra);
2079             REQUIRE_FEN;
2080             gen_helper_cvtqg(vc, tcg_env, vb);
2081             break;
2082         default:
2083             goto invalid_opc;
2084         }
2085         break;
2086 
2087     case 0x16:
2088         /* IEEE floating-point */
2089         switch (fpfn) { /* fn11 & 0x3F */
2090         case 0x00:
2091             /* ADDS */
2092             REQUIRE_FEN;
2093             gen_adds(ctx, ra, rb, rc, fn11);
2094             break;
2095         case 0x01:
2096             /* SUBS */
2097             REQUIRE_FEN;
2098             gen_subs(ctx, ra, rb, rc, fn11);
2099             break;
2100         case 0x02:
2101             /* MULS */
2102             REQUIRE_FEN;
2103             gen_muls(ctx, ra, rb, rc, fn11);
2104             break;
2105         case 0x03:
2106             /* DIVS */
2107             REQUIRE_FEN;
2108             gen_divs(ctx, ra, rb, rc, fn11);
2109             break;
2110         case 0x20:
2111             /* ADDT */
2112             REQUIRE_FEN;
2113             gen_addt(ctx, ra, rb, rc, fn11);
2114             break;
2115         case 0x21:
2116             /* SUBT */
2117             REQUIRE_FEN;
2118             gen_subt(ctx, ra, rb, rc, fn11);
2119             break;
2120         case 0x22:
2121             /* MULT */
2122             REQUIRE_FEN;
2123             gen_mult(ctx, ra, rb, rc, fn11);
2124             break;
2125         case 0x23:
2126             /* DIVT */
2127             REQUIRE_FEN;
2128             gen_divt(ctx, ra, rb, rc, fn11);
2129             break;
2130         case 0x24:
2131             /* CMPTUN */
2132             REQUIRE_FEN;
2133             gen_cmptun(ctx, ra, rb, rc, fn11);
2134             break;
2135         case 0x25:
2136             /* CMPTEQ */
2137             REQUIRE_FEN;
2138             gen_cmpteq(ctx, ra, rb, rc, fn11);
2139             break;
2140         case 0x26:
2141             /* CMPTLT */
2142             REQUIRE_FEN;
2143             gen_cmptlt(ctx, ra, rb, rc, fn11);
2144             break;
2145         case 0x27:
2146             /* CMPTLE */
2147             REQUIRE_FEN;
2148             gen_cmptle(ctx, ra, rb, rc, fn11);
2149             break;
2150         case 0x2C:
2151             REQUIRE_REG_31(ra);
2152             REQUIRE_FEN;
2153             if (fn11 == 0x2AC || fn11 == 0x6AC) {
2154                 /* CVTST */
2155                 gen_cvtst(ctx, rb, rc, fn11);
2156             } else {
2157                 /* CVTTS */
2158                 gen_cvtts(ctx, rb, rc, fn11);
2159             }
2160             break;
2161         case 0x2F:
2162             /* CVTTQ */
2163             REQUIRE_REG_31(ra);
2164             REQUIRE_FEN;
2165             gen_cvttq(ctx, rb, rc, fn11);
2166             break;
2167         case 0x3C:
2168             /* CVTQS */
2169             REQUIRE_REG_31(ra);
2170             REQUIRE_FEN;
2171             gen_cvtqs(ctx, rb, rc, fn11);
2172             break;
2173         case 0x3E:
2174             /* CVTQT */
2175             REQUIRE_REG_31(ra);
2176             REQUIRE_FEN;
2177             gen_cvtqt(ctx, rb, rc, fn11);
2178             break;
2179         default:
2180             goto invalid_opc;
2181         }
2182         break;
2183 
2184     case 0x17:
2185         switch (fn11) {
2186         case 0x010:
2187             /* CVTLQ */
2188             REQUIRE_REG_31(ra);
2189             REQUIRE_FEN;
2190             vc = dest_fpr(ctx, rc);
2191             vb = load_fpr(ctx, rb);
2192             gen_cvtlq(vc, vb);
2193             break;
2194         case 0x020:
2195             /* CPYS */
2196             REQUIRE_FEN;
2197             if (rc == 31) {
2198                 /* Special case CPYS as FNOP.  */
2199             } else {
2200                 vc = dest_fpr(ctx, rc);
2201                 va = load_fpr(ctx, ra);
2202                 if (ra == rb) {
2203                     /* Special case CPYS as FMOV.  */
2204                     tcg_gen_mov_i64(vc, va);
2205                 } else {
2206                     vb = load_fpr(ctx, rb);
2207                     gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
2208                 }
2209             }
2210             break;
2211         case 0x021:
2212             /* CPYSN */
2213             REQUIRE_FEN;
2214             vc = dest_fpr(ctx, rc);
2215             vb = load_fpr(ctx, rb);
2216             va = load_fpr(ctx, ra);
2217             gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
2218             break;
2219         case 0x022:
2220             /* CPYSE */
2221             REQUIRE_FEN;
2222             vc = dest_fpr(ctx, rc);
2223             vb = load_fpr(ctx, rb);
2224             va = load_fpr(ctx, ra);
2225             gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
2226             break;
2227         case 0x024:
2228             /* MT_FPCR */
2229             REQUIRE_FEN;
2230             va = load_fpr(ctx, ra);
2231             gen_helper_store_fpcr(tcg_env, va);
2232             if (ctx->tb_rm == QUAL_RM_D) {
2233                 /* Re-do the copy of the rounding mode to fp_status
2234                    the next time we use dynamic rounding.  */
2235                 ctx->tb_rm = -1;
2236             }
2237             break;
2238         case 0x025:
2239             /* MF_FPCR */
2240             REQUIRE_FEN;
2241             va = dest_fpr(ctx, ra);
2242             gen_helper_load_fpcr(va, tcg_env);
2243             break;
2244         case 0x02A:
2245             /* FCMOVEQ */
2246             REQUIRE_FEN;
2247             gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
2248             break;
2249         case 0x02B:
2250             /* FCMOVNE */
2251             REQUIRE_FEN;
2252             gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
2253             break;
2254         case 0x02C:
2255             /* FCMOVLT */
2256             REQUIRE_FEN;
2257             gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
2258             break;
2259         case 0x02D:
2260             /* FCMOVGE */
2261             REQUIRE_FEN;
2262             gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
2263             break;
2264         case 0x02E:
2265             /* FCMOVLE */
2266             REQUIRE_FEN;
2267             gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
2268             break;
2269         case 0x02F:
2270             /* FCMOVGT */
2271             REQUIRE_FEN;
2272             gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
2273             break;
2274         case 0x030: /* CVTQL */
2275         case 0x130: /* CVTQL/V */
2276         case 0x530: /* CVTQL/SV */
2277             REQUIRE_REG_31(ra);
2278             REQUIRE_FEN;
2279             vc = dest_fpr(ctx, rc);
2280             vb = load_fpr(ctx, rb);
2281             gen_helper_cvtql(vc, tcg_env, vb);
2282             gen_fp_exc_raise(rc, fn11);
2283             break;
2284         default:
2285             goto invalid_opc;
2286         }
2287         break;
2288 
2289     case 0x18:
2290         switch ((uint16_t)disp16) {
2291         case 0x0000:
2292             /* TRAPB */
2293             /* No-op.  */
2294             break;
2295         case 0x0400:
2296             /* EXCB */
2297             /* No-op.  */
2298             break;
2299         case 0x4000:
2300             /* MB */
2301             tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2302             break;
2303         case 0x4400:
2304             /* WMB */
2305             tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2306             break;
2307         case 0x8000:
2308             /* FETCH */
2309             /* No-op */
2310             break;
2311         case 0xA000:
2312             /* FETCH_M */
2313             /* No-op */
2314             break;
2315         case 0xC000:
2316             /* RPCC */
2317             va = dest_gpr(ctx, ra);
2318             if (translator_io_start(&ctx->base)) {
2319                 ret = DISAS_PC_STALE;
2320             }
2321             gen_helper_load_pcc(va, tcg_env);
2322             break;
2323         case 0xE000:
2324             /* RC */
2325             gen_rx(ctx, ra, 0);
2326             break;
2327         case 0xE800:
2328             /* ECB */
2329             break;
2330         case 0xF000:
2331             /* RS */
2332             gen_rx(ctx, ra, 1);
2333             break;
2334         case 0xF800:
2335             /* WH64 */
2336             /* No-op */
2337             break;
2338         case 0xFC00:
2339             /* WH64EN */
2340             /* No-op */
2341             break;
2342         default:
2343             goto invalid_opc;
2344         }
2345         break;
2346 
2347     case 0x19:
2348         /* HW_MFPR (PALcode) */
2349 #ifndef CONFIG_USER_ONLY
2350         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2351         va = dest_gpr(ctx, ra);
2352         ret = gen_mfpr(ctx, va, insn & 0xffff);
2353         break;
2354 #else
2355         goto invalid_opc;
2356 #endif
2357 
2358     case 0x1A:
2359         /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
2360            prediction stack action, which of course we don't implement.  */
2361         vb = load_gpr(ctx, rb);
2362         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2363         if (ra != 31) {
2364             tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
2365         }
2366         ret = DISAS_PC_UPDATED;
2367         break;
2368 
2369     case 0x1B:
2370         /* HW_LD (PALcode) */
2371 #ifndef CONFIG_USER_ONLY
2372         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2373         {
2374             TCGv addr = tcg_temp_new();
2375             vb = load_gpr(ctx, rb);
2376             va = dest_gpr(ctx, ra);
2377 
2378             tcg_gen_addi_i64(addr, vb, disp12);
2379             switch ((insn >> 12) & 0xF) {
2380             case 0x0:
2381                 /* Longword physical access (hw_ldl/p) */
2382                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2383                 break;
2384             case 0x1:
2385                 /* Quadword physical access (hw_ldq/p) */
2386                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2387                 break;
2388             case 0x2:
2389                 /* Longword physical access with lock (hw_ldl_l/p) */
2390                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2391                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2392                 tcg_gen_mov_i64(cpu_lock_value, va);
2393                 break;
2394             case 0x3:
2395                 /* Quadword physical access with lock (hw_ldq_l/p) */
2396                 tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2397                 tcg_gen_mov_i64(cpu_lock_addr, addr);
2398                 tcg_gen_mov_i64(cpu_lock_value, va);
2399                 break;
2400             case 0x4:
2401                 /* Longword virtual PTE fetch (hw_ldl/v) */
2402                 goto invalid_opc;
2403             case 0x5:
2404                 /* Quadword virtual PTE fetch (hw_ldq/v) */
2405                 goto invalid_opc;
2406                 break;
2407             case 0x6:
2408                 /* Invalid */
2409                 goto invalid_opc;
2410             case 0x7:
2411                 /* Invaliid */
2412                 goto invalid_opc;
2413             case 0x8:
2414                 /* Longword virtual access (hw_ldl) */
2415                 goto invalid_opc;
2416             case 0x9:
2417                 /* Quadword virtual access (hw_ldq) */
2418                 goto invalid_opc;
2419             case 0xA:
2420                 /* Longword virtual access with protection check (hw_ldl/w) */
2421                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2422                                     MO_LESL | MO_ALIGN);
2423                 break;
2424             case 0xB:
2425                 /* Quadword virtual access with protection check (hw_ldq/w) */
2426                 tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
2427                                     MO_LEUQ | MO_ALIGN);
2428                 break;
2429             case 0xC:
2430                 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2431                 goto invalid_opc;
2432             case 0xD:
2433                 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2434                 goto invalid_opc;
2435             case 0xE:
2436                 /* Longword virtual access with alternate access mode and
2437                    protection checks (hw_ldl/wa) */
2438                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2439                                     MO_LESL | MO_ALIGN);
2440                 break;
2441             case 0xF:
2442                 /* Quadword virtual access with alternate access mode and
2443                    protection checks (hw_ldq/wa) */
2444                 tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
2445                                     MO_LEUQ | MO_ALIGN);
2446                 break;
2447             }
2448             break;
2449         }
2450 #else
2451         goto invalid_opc;
2452 #endif
2453 
2454     case 0x1C:
2455         vc = dest_gpr(ctx, rc);
2456         if (fn7 == 0x70) {
2457             /* FTOIT */
2458             REQUIRE_AMASK(FIX);
2459             REQUIRE_REG_31(rb);
2460             va = load_fpr(ctx, ra);
2461             tcg_gen_mov_i64(vc, va);
2462             break;
2463         } else if (fn7 == 0x78) {
2464             /* FTOIS */
2465             REQUIRE_AMASK(FIX);
2466             REQUIRE_REG_31(rb);
2467             t32 = tcg_temp_new_i32();
2468             va = load_fpr(ctx, ra);
2469             gen_helper_s_to_memory(t32, va);
2470             tcg_gen_ext_i32_i64(vc, t32);
2471             break;
2472         }
2473 
2474         vb = load_gpr_lit(ctx, rb, lit, islit);
2475         switch (fn7) {
2476         case 0x00:
2477             /* SEXTB */
2478             REQUIRE_AMASK(BWX);
2479             REQUIRE_REG_31(ra);
2480             tcg_gen_ext8s_i64(vc, vb);
2481             break;
2482         case 0x01:
2483             /* SEXTW */
2484             REQUIRE_AMASK(BWX);
2485             REQUIRE_REG_31(ra);
2486             tcg_gen_ext16s_i64(vc, vb);
2487             break;
2488         case 0x30:
2489             /* CTPOP */
2490             REQUIRE_AMASK(CIX);
2491             REQUIRE_REG_31(ra);
2492             REQUIRE_NO_LIT;
2493             tcg_gen_ctpop_i64(vc, vb);
2494             break;
2495         case 0x31:
2496             /* PERR */
2497             REQUIRE_AMASK(MVI);
2498             REQUIRE_NO_LIT;
2499             va = load_gpr(ctx, ra);
2500             gen_helper_perr(vc, va, vb);
2501             break;
2502         case 0x32:
2503             /* CTLZ */
2504             REQUIRE_AMASK(CIX);
2505             REQUIRE_REG_31(ra);
2506             REQUIRE_NO_LIT;
2507             tcg_gen_clzi_i64(vc, vb, 64);
2508             break;
2509         case 0x33:
2510             /* CTTZ */
2511             REQUIRE_AMASK(CIX);
2512             REQUIRE_REG_31(ra);
2513             REQUIRE_NO_LIT;
2514             tcg_gen_ctzi_i64(vc, vb, 64);
2515             break;
2516         case 0x34:
2517             /* UNPKBW */
2518             REQUIRE_AMASK(MVI);
2519             REQUIRE_REG_31(ra);
2520             REQUIRE_NO_LIT;
2521             gen_helper_unpkbw(vc, vb);
2522             break;
2523         case 0x35:
2524             /* UNPKBL */
2525             REQUIRE_AMASK(MVI);
2526             REQUIRE_REG_31(ra);
2527             REQUIRE_NO_LIT;
2528             gen_helper_unpkbl(vc, vb);
2529             break;
2530         case 0x36:
2531             /* PKWB */
2532             REQUIRE_AMASK(MVI);
2533             REQUIRE_REG_31(ra);
2534             REQUIRE_NO_LIT;
2535             gen_helper_pkwb(vc, vb);
2536             break;
2537         case 0x37:
2538             /* PKLB */
2539             REQUIRE_AMASK(MVI);
2540             REQUIRE_REG_31(ra);
2541             REQUIRE_NO_LIT;
2542             gen_helper_pklb(vc, vb);
2543             break;
2544         case 0x38:
2545             /* MINSB8 */
2546             REQUIRE_AMASK(MVI);
2547             va = load_gpr(ctx, ra);
2548             gen_helper_minsb8(vc, va, vb);
2549             break;
2550         case 0x39:
2551             /* MINSW4 */
2552             REQUIRE_AMASK(MVI);
2553             va = load_gpr(ctx, ra);
2554             gen_helper_minsw4(vc, va, vb);
2555             break;
2556         case 0x3A:
2557             /* MINUB8 */
2558             REQUIRE_AMASK(MVI);
2559             va = load_gpr(ctx, ra);
2560             gen_helper_minub8(vc, va, vb);
2561             break;
2562         case 0x3B:
2563             /* MINUW4 */
2564             REQUIRE_AMASK(MVI);
2565             va = load_gpr(ctx, ra);
2566             gen_helper_minuw4(vc, va, vb);
2567             break;
2568         case 0x3C:
2569             /* MAXUB8 */
2570             REQUIRE_AMASK(MVI);
2571             va = load_gpr(ctx, ra);
2572             gen_helper_maxub8(vc, va, vb);
2573             break;
2574         case 0x3D:
2575             /* MAXUW4 */
2576             REQUIRE_AMASK(MVI);
2577             va = load_gpr(ctx, ra);
2578             gen_helper_maxuw4(vc, va, vb);
2579             break;
2580         case 0x3E:
2581             /* MAXSB8 */
2582             REQUIRE_AMASK(MVI);
2583             va = load_gpr(ctx, ra);
2584             gen_helper_maxsb8(vc, va, vb);
2585             break;
2586         case 0x3F:
2587             /* MAXSW4 */
2588             REQUIRE_AMASK(MVI);
2589             va = load_gpr(ctx, ra);
2590             gen_helper_maxsw4(vc, va, vb);
2591             break;
2592         default:
2593             goto invalid_opc;
2594         }
2595         break;
2596 
2597     case 0x1D:
2598         /* HW_MTPR (PALcode) */
2599 #ifndef CONFIG_USER_ONLY
2600         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2601         vb = load_gpr(ctx, rb);
2602         ret = gen_mtpr(ctx, vb, insn & 0xffff);
2603         break;
2604 #else
2605         goto invalid_opc;
2606 #endif
2607 
2608     case 0x1E:
2609         /* HW_RET (PALcode) */
2610 #ifndef CONFIG_USER_ONLY
2611         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2612         if (rb == 31) {
2613             /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2614                address from EXC_ADDR.  This turns out to be useful for our
2615                emulation PALcode, so continue to accept it.  */
2616             vb = dest_sink(ctx);
2617             tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr));
2618         } else {
2619             vb = load_gpr(ctx, rb);
2620         }
2621         tcg_gen_movi_i64(cpu_lock_addr, -1);
2622         st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
2623         tmp = tcg_temp_new();
2624         tcg_gen_andi_i64(tmp, vb, 1);
2625         st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
2626         tcg_gen_andi_i64(cpu_pc, vb, ~3);
2627         /* Allow interrupts to be recognized right away.  */
2628         ret = DISAS_PC_UPDATED_NOCHAIN;
2629         break;
2630 #else
2631         goto invalid_opc;
2632 #endif
2633 
2634     case 0x1F:
2635         /* HW_ST (PALcode) */
2636 #ifndef CONFIG_USER_ONLY
2637         REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
2638         {
2639             switch ((insn >> 12) & 0xF) {
2640             case 0x0:
2641                 /* Longword physical access */
2642                 va = load_gpr(ctx, ra);
2643                 vb = load_gpr(ctx, rb);
2644                 tmp = tcg_temp_new();
2645                 tcg_gen_addi_i64(tmp, vb, disp12);
2646                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2647                 break;
2648             case 0x1:
2649                 /* Quadword physical access */
2650                 va = load_gpr(ctx, ra);
2651                 vb = load_gpr(ctx, rb);
2652                 tmp = tcg_temp_new();
2653                 tcg_gen_addi_i64(tmp, vb, disp12);
2654                 tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2655                 break;
2656             case 0x2:
2657                 /* Longword physical access with lock */
2658                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2659                                             MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
2660                 break;
2661             case 0x3:
2662                 /* Quadword physical access with lock */
2663                 ret = gen_store_conditional(ctx, ra, rb, disp12,
2664                                             MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
2665                 break;
2666             case 0x4:
2667                 /* Longword virtual access */
2668                 goto invalid_opc;
2669             case 0x5:
2670                 /* Quadword virtual access */
2671                 goto invalid_opc;
2672             case 0x6:
2673                 /* Invalid */
2674                 goto invalid_opc;
2675             case 0x7:
2676                 /* Invalid */
2677                 goto invalid_opc;
2678             case 0x8:
2679                 /* Invalid */
2680                 goto invalid_opc;
2681             case 0x9:
2682                 /* Invalid */
2683                 goto invalid_opc;
2684             case 0xA:
2685                 /* Invalid */
2686                 goto invalid_opc;
2687             case 0xB:
2688                 /* Invalid */
2689                 goto invalid_opc;
2690             case 0xC:
2691                 /* Longword virtual access with alternate access mode */
2692                 goto invalid_opc;
2693             case 0xD:
2694                 /* Quadword virtual access with alternate access mode */
2695                 goto invalid_opc;
2696             case 0xE:
2697                 /* Invalid */
2698                 goto invalid_opc;
2699             case 0xF:
2700                 /* Invalid */
2701                 goto invalid_opc;
2702             }
2703             break;
2704         }
2705 #else
2706         goto invalid_opc;
2707 #endif
2708     case 0x20:
2709         /* LDF */
2710         REQUIRE_FEN;
2711         gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
2712         break;
2713     case 0x21:
2714         /* LDG */
2715         REQUIRE_FEN;
2716         gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
2717         break;
2718     case 0x22:
2719         /* LDS */
2720         REQUIRE_FEN;
2721         gen_load_fp(ctx, ra, rb, disp16, gen_lds);
2722         break;
2723     case 0x23:
2724         /* LDT */
2725         REQUIRE_FEN;
2726         gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
2727         break;
2728     case 0x24:
2729         /* STF */
2730         REQUIRE_FEN;
2731         gen_store_fp(ctx, ra, rb, disp16, gen_stf);
2732         break;
2733     case 0x25:
2734         /* STG */
2735         REQUIRE_FEN;
2736         gen_store_fp(ctx, ra, rb, disp16, gen_stg);
2737         break;
2738     case 0x26:
2739         /* STS */
2740         REQUIRE_FEN;
2741         gen_store_fp(ctx, ra, rb, disp16, gen_sts);
2742         break;
2743     case 0x27:
2744         /* STT */
2745         REQUIRE_FEN;
2746         gen_store_fp(ctx, ra, rb, disp16, gen_stt);
2747         break;
2748     case 0x28:
2749         /* LDL */
2750         gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
2751         break;
2752     case 0x29:
2753         /* LDQ */
2754         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
2755         break;
2756     case 0x2A:
2757         /* LDL_L */
2758         gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
2759         break;
2760     case 0x2B:
2761         /* LDQ_L */
2762         gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
2763         break;
2764     case 0x2C:
2765         /* STL */
2766         gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
2767         break;
2768     case 0x2D:
2769         /* STQ */
2770         gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
2771         break;
2772     case 0x2E:
2773         /* STL_C */
2774         ret = gen_store_conditional(ctx, ra, rb, disp16,
2775                                     ctx->mem_idx, MO_LESL | MO_ALIGN);
2776         break;
2777     case 0x2F:
2778         /* STQ_C */
2779         ret = gen_store_conditional(ctx, ra, rb, disp16,
2780                                     ctx->mem_idx, MO_LEUQ | MO_ALIGN);
2781         break;
2782     case 0x30:
2783         /* BR */
2784         ret = gen_bdirect(ctx, ra, disp21);
2785         break;
2786     case 0x31: /* FBEQ */
2787         REQUIRE_FEN;
2788         ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
2789         break;
2790     case 0x32: /* FBLT */
2791         REQUIRE_FEN;
2792         ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
2793         break;
2794     case 0x33: /* FBLE */
2795         REQUIRE_FEN;
2796         ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
2797         break;
2798     case 0x34:
2799         /* BSR */
2800         ret = gen_bdirect(ctx, ra, disp21);
2801         break;
2802     case 0x35: /* FBNE */
2803         REQUIRE_FEN;
2804         ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
2805         break;
2806     case 0x36: /* FBGE */
2807         REQUIRE_FEN;
2808         ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
2809         break;
2810     case 0x37: /* FBGT */
2811         REQUIRE_FEN;
2812         ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
2813         break;
2814     case 0x38:
2815         /* BLBC */
2816         ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
2817         break;
2818     case 0x39:
2819         /* BEQ */
2820         ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
2821         break;
2822     case 0x3A:
2823         /* BLT */
2824         ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
2825         break;
2826     case 0x3B:
2827         /* BLE */
2828         ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
2829         break;
2830     case 0x3C:
2831         /* BLBS */
2832         ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
2833         break;
2834     case 0x3D:
2835         /* BNE */
2836         ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
2837         break;
2838     case 0x3E:
2839         /* BGE */
2840         ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
2841         break;
2842     case 0x3F:
2843         /* BGT */
2844         ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
2845         break;
2846     invalid_opc:
2847         ret = gen_invalid(ctx);
2848         break;
2849     raise_fen:
2850         ret = gen_excp(ctx, EXCP_FEN, 0);
2851         break;
2852     }
2853 
2854     return ret;
2855 }
2856 
2857 static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
2858 {
2859     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2860     CPUAlphaState *env = cpu_env(cpu);
2861     int64_t bound;
2862 
2863     ctx->tbflags = ctx->base.tb->flags;
2864     ctx->mem_idx = alpha_env_mmu_index(env);
2865     ctx->implver = env->implver;
2866     ctx->amask = env->amask;
2867 
2868 #ifdef CONFIG_USER_ONLY
2869     ctx->ir = cpu_std_ir;
2870     ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
2871 #else
2872     ctx->palbr = env->palbr;
2873     ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
2874 #endif
2875 
2876     /* ??? Every TB begins with unset rounding mode, to be initialized on
2877        the first fp insn of the TB.  Alternately we could define a proper
2878        default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2879        to reset the FP_STATUS to that default at the end of any TB that
2880        changes the default.  We could even (gasp) dynamically figure out
2881        what default would be most efficient given the running program.  */
2882     ctx->tb_rm = -1;
2883     /* Similarly for flush-to-zero.  */
2884     ctx->tb_ftz = -1;
2885 
2886     ctx->zero = NULL;
2887     ctx->sink = NULL;
2888 
2889     /* Bound the number of insns to execute to those left on the page.  */
2890     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
2891     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2892 }
2893 
2894 static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
2895 {
2896 }
2897 
2898 static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
2899 {
2900     tcg_gen_insn_start(dcbase->pc_next);
2901 }
2902 
2903 static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
2904 {
2905     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2906     CPUAlphaState *env = cpu_env(cpu);
2907     uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
2908 
2909     ctx->base.pc_next += 4;
2910     ctx->base.is_jmp = translate_one(ctx, insn);
2911 
2912     free_context_temps(ctx);
2913 }
2914 
2915 static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
2916 {
2917     DisasContext *ctx = container_of(dcbase, DisasContext, base);
2918 
2919     switch (ctx->base.is_jmp) {
2920     case DISAS_NORETURN:
2921         break;
2922     case DISAS_TOO_MANY:
2923         if (use_goto_tb(ctx, ctx->base.pc_next)) {
2924             tcg_gen_goto_tb(0);
2925             tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2926             tcg_gen_exit_tb(ctx->base.tb, 0);
2927         }
2928         /* FALLTHRU */
2929     case DISAS_PC_STALE:
2930         tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
2931         /* FALLTHRU */
2932     case DISAS_PC_UPDATED:
2933         tcg_gen_lookup_and_goto_ptr();
2934         break;
2935     case DISAS_PC_UPDATED_NOCHAIN:
2936         tcg_gen_exit_tb(NULL, 0);
2937         break;
2938     default:
2939         g_assert_not_reached();
2940     }
2941 }
2942 
2943 static void alpha_tr_disas_log(const DisasContextBase *dcbase,
2944                                CPUState *cpu, FILE *logfile)
2945 {
2946     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2947     target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
2948 }
2949 
2950 static const TranslatorOps alpha_tr_ops = {
2951     .init_disas_context = alpha_tr_init_disas_context,
2952     .tb_start           = alpha_tr_tb_start,
2953     .insn_start         = alpha_tr_insn_start,
2954     .translate_insn     = alpha_tr_translate_insn,
2955     .tb_stop            = alpha_tr_tb_stop,
2956     .disas_log          = alpha_tr_disas_log,
2957 };
2958 
2959 void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
2960                            vaddr pc, void *host_pc)
2961 {
2962     DisasContext dc;
2963     translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
2964 }
2965