xref: /openbmc/qemu/target/hppa/translate.c (revision 7618fffd)
1  /*
2   * HPPA emulation cpu translation for qemu.
3   *
4   * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5   *
6   * This library is free software; you can redistribute it and/or
7   * modify it under the terms of the GNU Lesser General Public
8   * License as published by the Free Software Foundation; either
9   * version 2.1 of the License, or (at your option) any later version.
10   *
11   * This library is distributed in the hope that it will be useful,
12   * but WITHOUT ANY WARRANTY; without even the implied warranty of
13   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   * Lesser General Public License for more details.
15   *
16   * You should have received a copy of the GNU Lesser General Public
17   * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18   */
19  
20  #include "qemu/osdep.h"
21  #include "cpu.h"
22  #include "disas/disas.h"
23  #include "qemu/host-utils.h"
24  #include "exec/exec-all.h"
25  #include "tcg/tcg-op.h"
26  #include "tcg/tcg-op-gvec.h"
27  #include "exec/helper-proto.h"
28  #include "exec/helper-gen.h"
29  #include "exec/translator.h"
30  #include "exec/log.h"
31  
32  #define HELPER_H "helper.h"
33  #include "exec/helper-info.c.inc"
34  #undef  HELPER_H
35  
36  /* Choose to use explicit sizes within this file. */
37  #undef tcg_temp_new
38  
39  typedef struct DisasCond {
40      TCGCond c;
41      TCGv_i64 a0, a1;
42  } DisasCond;
43  
44  typedef struct DisasContext {
45      DisasContextBase base;
46      CPUState *cs;
47      TCGOp *insn_start;
48  
49      uint64_t iaoq_f;
50      uint64_t iaoq_b;
51      uint64_t iaoq_n;
52      TCGv_i64 iaoq_n_var;
53  
54      DisasCond null_cond;
55      TCGLabel *null_lab;
56  
57      TCGv_i64 zero;
58  
59      uint32_t insn;
60      uint32_t tb_flags;
61      int mmu_idx;
62      int privilege;
63      bool psw_n_nonzero;
64      bool is_pa20;
65  
66  #ifdef CONFIG_USER_ONLY
67      MemOp unalign;
68  #endif
69  } DisasContext;
70  
71  #ifdef CONFIG_USER_ONLY
72  #define UNALIGN(C)       (C)->unalign
73  #define MMU_DISABLED(C)  false
74  #else
75  #define UNALIGN(C)       MO_ALIGN
76  #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
77  #endif
78  
79  /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
80  static int expand_sm_imm(DisasContext *ctx, int val)
81  {
82      /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
83      if (ctx->is_pa20) {
84          if (val & PSW_SM_W) {
85              val |= PSW_W;
86          }
87          val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
88      } else {
89          val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
90      }
91      return val;
92  }
93  
94  /* Inverted space register indicates 0 means sr0 not inferred from base.  */
95  static int expand_sr3x(DisasContext *ctx, int val)
96  {
97      return ~val;
98  }
99  
100  /* Convert the M:A bits within a memory insn to the tri-state value
101     we use for the final M.  */
102  static int ma_to_m(DisasContext *ctx, int val)
103  {
104      return val & 2 ? (val & 1 ? -1 : 1) : 0;
105  }
106  
107  /* Convert the sign of the displacement to a pre or post-modify.  */
108  static int pos_to_m(DisasContext *ctx, int val)
109  {
110      return val ? 1 : -1;
111  }
112  
113  static int neg_to_m(DisasContext *ctx, int val)
114  {
115      return val ? -1 : 1;
116  }
117  
118  /* Used for branch targets and fp memory ops.  */
119  static int expand_shl2(DisasContext *ctx, int val)
120  {
121      return val << 2;
122  }
123  
124  /* Used for fp memory ops.  */
125  static int expand_shl3(DisasContext *ctx, int val)
126  {
127      return val << 3;
128  }
129  
130  /* Used for assemble_21.  */
131  static int expand_shl11(DisasContext *ctx, int val)
132  {
133      return val << 11;
134  }
135  
136  static int assemble_6(DisasContext *ctx, int val)
137  {
138      /*
139       * Officially, 32 * x + 32 - y.
140       * Here, x is already in bit 5, and y is [4:0].
141       * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
142       * with the overflow from bit 4 summing with x.
143       */
144      return (val ^ 31) + 1;
145  }
146  
147  /* Translate CMPI doubleword conditions to standard. */
148  static int cmpbid_c(DisasContext *ctx, int val)
149  {
150      return val ? val : 4; /* 0 == "*<<" */
151  }
152  
153  
154  /* Include the auto-generated decoder.  */
155  #include "decode-insns.c.inc"
156  
157  /* We are not using a goto_tb (for whatever reason), but have updated
158     the iaq (for whatever reason), so don't do it again on exit.  */
159  #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
160  
161  /* We are exiting the TB, but have neither emitted a goto_tb, nor
162     updated the iaq for the next instruction to be executed.  */
163  #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
164  
165  /* Similarly, but we want to return to the main loop immediately
166     to recognize unmasked interrupts.  */
167  #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
168  #define DISAS_EXIT                  DISAS_TARGET_3
169  
170  /* global register indexes */
171  static TCGv_i64 cpu_gr[32];
172  static TCGv_i64 cpu_sr[4];
173  static TCGv_i64 cpu_srH;
174  static TCGv_i64 cpu_iaoq_f;
175  static TCGv_i64 cpu_iaoq_b;
176  static TCGv_i64 cpu_iasq_f;
177  static TCGv_i64 cpu_iasq_b;
178  static TCGv_i64 cpu_sar;
179  static TCGv_i64 cpu_psw_n;
180  static TCGv_i64 cpu_psw_v;
181  static TCGv_i64 cpu_psw_cb;
182  static TCGv_i64 cpu_psw_cb_msb;
183  
184  void hppa_translate_init(void)
185  {
186  #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
187  
188      typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
189      static const GlobalVar vars[] = {
190          { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
191          DEF_VAR(psw_n),
192          DEF_VAR(psw_v),
193          DEF_VAR(psw_cb),
194          DEF_VAR(psw_cb_msb),
195          DEF_VAR(iaoq_f),
196          DEF_VAR(iaoq_b),
197      };
198  
199  #undef DEF_VAR
200  
201      /* Use the symbolic register names that match the disassembler.  */
202      static const char gr_names[32][4] = {
203          "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
204          "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
205          "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
206          "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
207      };
208      /* SR[4-7] are not global registers so that we can index them.  */
209      static const char sr_names[5][4] = {
210          "sr0", "sr1", "sr2", "sr3", "srH"
211      };
212  
213      int i;
214  
215      cpu_gr[0] = NULL;
216      for (i = 1; i < 32; i++) {
217          cpu_gr[i] = tcg_global_mem_new(tcg_env,
218                                         offsetof(CPUHPPAState, gr[i]),
219                                         gr_names[i]);
220      }
221      for (i = 0; i < 4; i++) {
222          cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
223                                             offsetof(CPUHPPAState, sr[i]),
224                                             sr_names[i]);
225      }
226      cpu_srH = tcg_global_mem_new_i64(tcg_env,
227                                       offsetof(CPUHPPAState, sr[4]),
228                                       sr_names[4]);
229  
230      for (i = 0; i < ARRAY_SIZE(vars); ++i) {
231          const GlobalVar *v = &vars[i];
232          *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
233      }
234  
235      cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
236                                          offsetof(CPUHPPAState, iasq_f),
237                                          "iasq_f");
238      cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
239                                          offsetof(CPUHPPAState, iasq_b),
240                                          "iasq_b");
241  }
242  
243  static void set_insn_breg(DisasContext *ctx, int breg)
244  {
245      assert(ctx->insn_start != NULL);
246      tcg_set_insn_start_param(ctx->insn_start, 2, breg);
247      ctx->insn_start = NULL;
248  }
249  
250  static DisasCond cond_make_f(void)
251  {
252      return (DisasCond){
253          .c = TCG_COND_NEVER,
254          .a0 = NULL,
255          .a1 = NULL,
256      };
257  }
258  
259  static DisasCond cond_make_t(void)
260  {
261      return (DisasCond){
262          .c = TCG_COND_ALWAYS,
263          .a0 = NULL,
264          .a1 = NULL,
265      };
266  }
267  
268  static DisasCond cond_make_n(void)
269  {
270      return (DisasCond){
271          .c = TCG_COND_NE,
272          .a0 = cpu_psw_n,
273          .a1 = tcg_constant_i64(0)
274      };
275  }
276  
277  static DisasCond cond_make_tmp(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
278  {
279      assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
280      return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
281  }
282  
283  static DisasCond cond_make_0_tmp(TCGCond c, TCGv_i64 a0)
284  {
285      return cond_make_tmp(c, a0, tcg_constant_i64(0));
286  }
287  
288  static DisasCond cond_make_0(TCGCond c, TCGv_i64 a0)
289  {
290      TCGv_i64 tmp = tcg_temp_new_i64();
291      tcg_gen_mov_i64(tmp, a0);
292      return cond_make_0_tmp(c, tmp);
293  }
294  
295  static DisasCond cond_make(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
296  {
297      TCGv_i64 t0 = tcg_temp_new_i64();
298      TCGv_i64 t1 = tcg_temp_new_i64();
299  
300      tcg_gen_mov_i64(t0, a0);
301      tcg_gen_mov_i64(t1, a1);
302      return cond_make_tmp(c, t0, t1);
303  }
304  
305  static void cond_free(DisasCond *cond)
306  {
307      switch (cond->c) {
308      default:
309          cond->a0 = NULL;
310          cond->a1 = NULL;
311          /* fallthru */
312      case TCG_COND_ALWAYS:
313          cond->c = TCG_COND_NEVER;
314          break;
315      case TCG_COND_NEVER:
316          break;
317      }
318  }
319  
320  static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
321  {
322      if (reg == 0) {
323          return ctx->zero;
324      } else {
325          return cpu_gr[reg];
326      }
327  }
328  
329  static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
330  {
331      if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
332          return tcg_temp_new_i64();
333      } else {
334          return cpu_gr[reg];
335      }
336  }
337  
338  static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
339  {
340      if (ctx->null_cond.c != TCG_COND_NEVER) {
341          tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
342                              ctx->null_cond.a1, dest, t);
343      } else {
344          tcg_gen_mov_i64(dest, t);
345      }
346  }
347  
348  static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
349  {
350      if (reg != 0) {
351          save_or_nullify(ctx, cpu_gr[reg], t);
352      }
353  }
354  
355  #if HOST_BIG_ENDIAN
356  # define HI_OFS  0
357  # define LO_OFS  4
358  #else
359  # define HI_OFS  4
360  # define LO_OFS  0
361  #endif
362  
363  static TCGv_i32 load_frw_i32(unsigned rt)
364  {
365      TCGv_i32 ret = tcg_temp_new_i32();
366      tcg_gen_ld_i32(ret, tcg_env,
367                     offsetof(CPUHPPAState, fr[rt & 31])
368                     + (rt & 32 ? LO_OFS : HI_OFS));
369      return ret;
370  }
371  
372  static TCGv_i32 load_frw0_i32(unsigned rt)
373  {
374      if (rt == 0) {
375          TCGv_i32 ret = tcg_temp_new_i32();
376          tcg_gen_movi_i32(ret, 0);
377          return ret;
378      } else {
379          return load_frw_i32(rt);
380      }
381  }
382  
383  static TCGv_i64 load_frw0_i64(unsigned rt)
384  {
385      TCGv_i64 ret = tcg_temp_new_i64();
386      if (rt == 0) {
387          tcg_gen_movi_i64(ret, 0);
388      } else {
389          tcg_gen_ld32u_i64(ret, tcg_env,
390                            offsetof(CPUHPPAState, fr[rt & 31])
391                            + (rt & 32 ? LO_OFS : HI_OFS));
392      }
393      return ret;
394  }
395  
396  static void save_frw_i32(unsigned rt, TCGv_i32 val)
397  {
398      tcg_gen_st_i32(val, tcg_env,
399                     offsetof(CPUHPPAState, fr[rt & 31])
400                     + (rt & 32 ? LO_OFS : HI_OFS));
401  }
402  
403  #undef HI_OFS
404  #undef LO_OFS
405  
406  static TCGv_i64 load_frd(unsigned rt)
407  {
408      TCGv_i64 ret = tcg_temp_new_i64();
409      tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
410      return ret;
411  }
412  
413  static TCGv_i64 load_frd0(unsigned rt)
414  {
415      if (rt == 0) {
416          TCGv_i64 ret = tcg_temp_new_i64();
417          tcg_gen_movi_i64(ret, 0);
418          return ret;
419      } else {
420          return load_frd(rt);
421      }
422  }
423  
424  static void save_frd(unsigned rt, TCGv_i64 val)
425  {
426      tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
427  }
428  
429  static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
430  {
431  #ifdef CONFIG_USER_ONLY
432      tcg_gen_movi_i64(dest, 0);
433  #else
434      if (reg < 4) {
435          tcg_gen_mov_i64(dest, cpu_sr[reg]);
436      } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
437          tcg_gen_mov_i64(dest, cpu_srH);
438      } else {
439          tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
440      }
441  #endif
442  }
443  
444  /* Skip over the implementation of an insn that has been nullified.
445     Use this when the insn is too complex for a conditional move.  */
446  static void nullify_over(DisasContext *ctx)
447  {
448      if (ctx->null_cond.c != TCG_COND_NEVER) {
449          /* The always condition should have been handled in the main loop.  */
450          assert(ctx->null_cond.c != TCG_COND_ALWAYS);
451  
452          ctx->null_lab = gen_new_label();
453  
454          /* If we're using PSW[N], copy it to a temp because... */
455          if (ctx->null_cond.a0 == cpu_psw_n) {
456              ctx->null_cond.a0 = tcg_temp_new_i64();
457              tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
458          }
459          /* ... we clear it before branching over the implementation,
460             so that (1) it's clear after nullifying this insn and
461             (2) if this insn nullifies the next, PSW[N] is valid.  */
462          if (ctx->psw_n_nonzero) {
463              ctx->psw_n_nonzero = false;
464              tcg_gen_movi_i64(cpu_psw_n, 0);
465          }
466  
467          tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
468                             ctx->null_cond.a1, ctx->null_lab);
469          cond_free(&ctx->null_cond);
470      }
471  }
472  
473  /* Save the current nullification state to PSW[N].  */
474  static void nullify_save(DisasContext *ctx)
475  {
476      if (ctx->null_cond.c == TCG_COND_NEVER) {
477          if (ctx->psw_n_nonzero) {
478              tcg_gen_movi_i64(cpu_psw_n, 0);
479          }
480          return;
481      }
482      if (ctx->null_cond.a0 != cpu_psw_n) {
483          tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
484                              ctx->null_cond.a0, ctx->null_cond.a1);
485          ctx->psw_n_nonzero = true;
486      }
487      cond_free(&ctx->null_cond);
488  }
489  
490  /* Set a PSW[N] to X.  The intention is that this is used immediately
491     before a goto_tb/exit_tb, so that there is no fallthru path to other
492     code within the TB.  Therefore we do not update psw_n_nonzero.  */
493  static void nullify_set(DisasContext *ctx, bool x)
494  {
495      if (ctx->psw_n_nonzero || x) {
496          tcg_gen_movi_i64(cpu_psw_n, x);
497      }
498  }
499  
500  /* Mark the end of an instruction that may have been nullified.
501     This is the pair to nullify_over.  Always returns true so that
502     it may be tail-called from a translate function.  */
503  static bool nullify_end(DisasContext *ctx)
504  {
505      TCGLabel *null_lab = ctx->null_lab;
506      DisasJumpType status = ctx->base.is_jmp;
507  
508      /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
509         For UPDATED, we cannot update on the nullified path.  */
510      assert(status != DISAS_IAQ_N_UPDATED);
511  
512      if (likely(null_lab == NULL)) {
513          /* The current insn wasn't conditional or handled the condition
514             applied to it without a branch, so the (new) setting of
515             NULL_COND can be applied directly to the next insn.  */
516          return true;
517      }
518      ctx->null_lab = NULL;
519  
520      if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
521          /* The next instruction will be unconditional,
522             and NULL_COND already reflects that.  */
523          gen_set_label(null_lab);
524      } else {
525          /* The insn that we just executed is itself nullifying the next
526             instruction.  Store the condition in the PSW[N] global.
527             We asserted PSW[N] = 0 in nullify_over, so that after the
528             label we have the proper value in place.  */
529          nullify_save(ctx);
530          gen_set_label(null_lab);
531          ctx->null_cond = cond_make_n();
532      }
533      if (status == DISAS_NORETURN) {
534          ctx->base.is_jmp = DISAS_NEXT;
535      }
536      return true;
537  }
538  
539  static uint64_t gva_offset_mask(DisasContext *ctx)
540  {
541      return (ctx->tb_flags & PSW_W
542              ? MAKE_64BIT_MASK(0, 62)
543              : MAKE_64BIT_MASK(0, 32));
544  }
545  
546  static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
547                              uint64_t ival, TCGv_i64 vval)
548  {
549      uint64_t mask = gva_offset_mask(ctx);
550  
551      if (ival != -1) {
552          tcg_gen_movi_i64(dest, ival & mask);
553          return;
554      }
555      tcg_debug_assert(vval != NULL);
556  
557      /*
558       * We know that the IAOQ is already properly masked.
559       * This optimization is primarily for "iaoq_f = iaoq_b".
560       */
561      if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
562          tcg_gen_mov_i64(dest, vval);
563      } else {
564          tcg_gen_andi_i64(dest, vval, mask);
565      }
566  }
567  
568  static inline uint64_t iaoq_dest(DisasContext *ctx, int64_t disp)
569  {
570      return ctx->iaoq_f + disp + 8;
571  }
572  
573  static void gen_excp_1(int exception)
574  {
575      gen_helper_excp(tcg_env, tcg_constant_i32(exception));
576  }
577  
578  static void gen_excp(DisasContext *ctx, int exception)
579  {
580      copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
581      copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
582      nullify_save(ctx);
583      gen_excp_1(exception);
584      ctx->base.is_jmp = DISAS_NORETURN;
585  }
586  
587  static bool gen_excp_iir(DisasContext *ctx, int exc)
588  {
589      nullify_over(ctx);
590      tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
591                     tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
592      gen_excp(ctx, exc);
593      return nullify_end(ctx);
594  }
595  
596  static bool gen_illegal(DisasContext *ctx)
597  {
598      return gen_excp_iir(ctx, EXCP_ILL);
599  }
600  
601  #ifdef CONFIG_USER_ONLY
602  #define CHECK_MOST_PRIVILEGED(EXCP) \
603      return gen_excp_iir(ctx, EXCP)
604  #else
605  #define CHECK_MOST_PRIVILEGED(EXCP) \
606      do {                                     \
607          if (ctx->privilege != 0) {           \
608              return gen_excp_iir(ctx, EXCP);  \
609          }                                    \
610      } while (0)
611  #endif
612  
613  static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
614  {
615      return translator_use_goto_tb(&ctx->base, dest);
616  }
617  
618  /* If the next insn is to be nullified, and it's on the same page,
619     and we're not attempting to set a breakpoint on it, then we can
620     totally skip the nullified insn.  This avoids creating and
621     executing a TB that merely branches to the next TB.  */
622  static bool use_nullify_skip(DisasContext *ctx)
623  {
624      return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
625              && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
626  }
627  
628  static void gen_goto_tb(DisasContext *ctx, int which,
629                          uint64_t f, uint64_t b)
630  {
631      if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
632          tcg_gen_goto_tb(which);
633          copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
634          copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
635          tcg_gen_exit_tb(ctx->base.tb, which);
636      } else {
637          copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
638          copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
639          tcg_gen_lookup_and_goto_ptr();
640      }
641  }
642  
643  static bool cond_need_sv(int c)
644  {
645      return c == 2 || c == 3 || c == 6;
646  }
647  
648  static bool cond_need_cb(int c)
649  {
650      return c == 4 || c == 5;
651  }
652  
653  /* Need extensions from TCGv_i32 to TCGv_i64. */
654  static bool cond_need_ext(DisasContext *ctx, bool d)
655  {
656      return !(ctx->is_pa20 && d);
657  }
658  
659  /*
660   * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
661   * the Parisc 1.1 Architecture Reference Manual for details.
662   */
663  
664  static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
665                           TCGv_i64 res, TCGv_i64 cb_msb, TCGv_i64 sv)
666  {
667      DisasCond cond;
668      TCGv_i64 tmp;
669  
670      switch (cf >> 1) {
671      case 0: /* Never / TR    (0 / 1) */
672          cond = cond_make_f();
673          break;
674      case 1: /* = / <>        (Z / !Z) */
675          if (cond_need_ext(ctx, d)) {
676              tmp = tcg_temp_new_i64();
677              tcg_gen_ext32u_i64(tmp, res);
678              res = tmp;
679          }
680          cond = cond_make_0(TCG_COND_EQ, res);
681          break;
682      case 2: /* < / >=        (N ^ V / !(N ^ V) */
683          tmp = tcg_temp_new_i64();
684          tcg_gen_xor_i64(tmp, res, sv);
685          if (cond_need_ext(ctx, d)) {
686              tcg_gen_ext32s_i64(tmp, tmp);
687          }
688          cond = cond_make_0_tmp(TCG_COND_LT, tmp);
689          break;
690      case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
691          /*
692           * Simplify:
693           *   (N ^ V) | Z
694           *   ((res < 0) ^ (sv < 0)) | !res
695           *   ((res ^ sv) < 0) | !res
696           *   (~(res ^ sv) >= 0) | !res
697           *   !(~(res ^ sv) >> 31) | !res
698           *   !(~(res ^ sv) >> 31 & res)
699           */
700          tmp = tcg_temp_new_i64();
701          tcg_gen_eqv_i64(tmp, res, sv);
702          if (cond_need_ext(ctx, d)) {
703              tcg_gen_sextract_i64(tmp, tmp, 31, 1);
704              tcg_gen_and_i64(tmp, tmp, res);
705              tcg_gen_ext32u_i64(tmp, tmp);
706          } else {
707              tcg_gen_sari_i64(tmp, tmp, 63);
708              tcg_gen_and_i64(tmp, tmp, res);
709          }
710          cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
711          break;
712      case 4: /* NUV / UV      (!C / C) */
713          /* Only bit 0 of cb_msb is ever set. */
714          cond = cond_make_0(TCG_COND_EQ, cb_msb);
715          break;
716      case 5: /* ZNV / VNZ     (!C | Z / C & !Z) */
717          tmp = tcg_temp_new_i64();
718          tcg_gen_neg_i64(tmp, cb_msb);
719          tcg_gen_and_i64(tmp, tmp, res);
720          if (cond_need_ext(ctx, d)) {
721              tcg_gen_ext32u_i64(tmp, tmp);
722          }
723          cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
724          break;
725      case 6: /* SV / NSV      (V / !V) */
726          if (cond_need_ext(ctx, d)) {
727              tmp = tcg_temp_new_i64();
728              tcg_gen_ext32s_i64(tmp, sv);
729              sv = tmp;
730          }
731          cond = cond_make_0(TCG_COND_LT, sv);
732          break;
733      case 7: /* OD / EV */
734          tmp = tcg_temp_new_i64();
735          tcg_gen_andi_i64(tmp, res, 1);
736          cond = cond_make_0_tmp(TCG_COND_NE, tmp);
737          break;
738      default:
739          g_assert_not_reached();
740      }
741      if (cf & 1) {
742          cond.c = tcg_invert_cond(cond.c);
743      }
744  
745      return cond;
746  }
747  
748  /* Similar, but for the special case of subtraction without borrow, we
749     can use the inputs directly.  This can allow other computation to be
750     deleted as unused.  */
751  
752  static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
753                               TCGv_i64 res, TCGv_i64 in1,
754                               TCGv_i64 in2, TCGv_i64 sv)
755  {
756      TCGCond tc;
757      bool ext_uns;
758  
759      switch (cf >> 1) {
760      case 1: /* = / <> */
761          tc = TCG_COND_EQ;
762          ext_uns = true;
763          break;
764      case 2: /* < / >= */
765          tc = TCG_COND_LT;
766          ext_uns = false;
767          break;
768      case 3: /* <= / > */
769          tc = TCG_COND_LE;
770          ext_uns = false;
771          break;
772      case 4: /* << / >>= */
773          tc = TCG_COND_LTU;
774          ext_uns = true;
775          break;
776      case 5: /* <<= / >> */
777          tc = TCG_COND_LEU;
778          ext_uns = true;
779          break;
780      default:
781          return do_cond(ctx, cf, d, res, NULL, sv);
782      }
783  
784      if (cf & 1) {
785          tc = tcg_invert_cond(tc);
786      }
787      if (cond_need_ext(ctx, d)) {
788          TCGv_i64 t1 = tcg_temp_new_i64();
789          TCGv_i64 t2 = tcg_temp_new_i64();
790  
791          if (ext_uns) {
792              tcg_gen_ext32u_i64(t1, in1);
793              tcg_gen_ext32u_i64(t2, in2);
794          } else {
795              tcg_gen_ext32s_i64(t1, in1);
796              tcg_gen_ext32s_i64(t2, in2);
797          }
798          return cond_make_tmp(tc, t1, t2);
799      }
800      return cond_make(tc, in1, in2);
801  }
802  
803  /*
804   * Similar, but for logicals, where the carry and overflow bits are not
805   * computed, and use of them is undefined.
806   *
807   * Undefined or not, hardware does not trap.  It seems reasonable to
808   * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
809   * how cases c={2,3} are treated.
810   */
811  
812  static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
813                               TCGv_i64 res)
814  {
815      TCGCond tc;
816      bool ext_uns;
817  
818      switch (cf) {
819      case 0:  /* never */
820      case 9:  /* undef, C */
821      case 11: /* undef, C & !Z */
822      case 12: /* undef, V */
823          return cond_make_f();
824  
825      case 1:  /* true */
826      case 8:  /* undef, !C */
827      case 10: /* undef, !C | Z */
828      case 13: /* undef, !V */
829          return cond_make_t();
830  
831      case 2:  /* == */
832          tc = TCG_COND_EQ;
833          ext_uns = true;
834          break;
835      case 3:  /* <> */
836          tc = TCG_COND_NE;
837          ext_uns = true;
838          break;
839      case 4:  /* < */
840          tc = TCG_COND_LT;
841          ext_uns = false;
842          break;
843      case 5:  /* >= */
844          tc = TCG_COND_GE;
845          ext_uns = false;
846          break;
847      case 6:  /* <= */
848          tc = TCG_COND_LE;
849          ext_uns = false;
850          break;
851      case 7:  /* > */
852          tc = TCG_COND_GT;
853          ext_uns = false;
854          break;
855  
856      case 14: /* OD */
857      case 15: /* EV */
858          return do_cond(ctx, cf, d, res, NULL, NULL);
859  
860      default:
861          g_assert_not_reached();
862      }
863  
864      if (cond_need_ext(ctx, d)) {
865          TCGv_i64 tmp = tcg_temp_new_i64();
866  
867          if (ext_uns) {
868              tcg_gen_ext32u_i64(tmp, res);
869          } else {
870              tcg_gen_ext32s_i64(tmp, res);
871          }
872          return cond_make_0_tmp(tc, tmp);
873      }
874      return cond_make_0(tc, res);
875  }
876  
877  /* Similar, but for shift/extract/deposit conditions.  */
878  
879  static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
880                               TCGv_i64 res)
881  {
882      unsigned c, f;
883  
884      /* Convert the compressed condition codes to standard.
885         0-2 are the same as logicals (nv,<,<=), while 3 is OD.
886         4-7 are the reverse of 0-3.  */
887      c = orig & 3;
888      if (c == 3) {
889          c = 7;
890      }
891      f = (orig & 4) / 4;
892  
893      return do_log_cond(ctx, c * 2 + f, d, res);
894  }
895  
896  /* Similar, but for unit conditions.  */
897  
898  static DisasCond do_unit_cond(unsigned cf, bool d, TCGv_i64 res,
899                                TCGv_i64 in1, TCGv_i64 in2)
900  {
901      DisasCond cond;
902      TCGv_i64 tmp, cb = NULL;
903      uint64_t d_repl = d ? 0x0000000100000001ull : 1;
904  
905      if (cf & 8) {
906          /* Since we want to test lots of carry-out bits all at once, do not
907           * do our normal thing and compute carry-in of bit B+1 since that
908           * leaves us with carry bits spread across two words.
909           */
910          cb = tcg_temp_new_i64();
911          tmp = tcg_temp_new_i64();
912          tcg_gen_or_i64(cb, in1, in2);
913          tcg_gen_and_i64(tmp, in1, in2);
914          tcg_gen_andc_i64(cb, cb, res);
915          tcg_gen_or_i64(cb, cb, tmp);
916      }
917  
918      switch (cf >> 1) {
919      case 0: /* never / TR */
920      case 1: /* undefined */
921      case 5: /* undefined */
922          cond = cond_make_f();
923          break;
924  
925      case 2: /* SBZ / NBZ */
926          /* See hasless(v,1) from
927           * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
928           */
929          tmp = tcg_temp_new_i64();
930          tcg_gen_subi_i64(tmp, res, d_repl * 0x01010101u);
931          tcg_gen_andc_i64(tmp, tmp, res);
932          tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80808080u);
933          cond = cond_make_0(TCG_COND_NE, tmp);
934          break;
935  
936      case 3: /* SHZ / NHZ */
937          tmp = tcg_temp_new_i64();
938          tcg_gen_subi_i64(tmp, res, d_repl * 0x00010001u);
939          tcg_gen_andc_i64(tmp, tmp, res);
940          tcg_gen_andi_i64(tmp, tmp, d_repl * 0x80008000u);
941          cond = cond_make_0(TCG_COND_NE, tmp);
942          break;
943  
944      case 4: /* SDC / NDC */
945          tcg_gen_andi_i64(cb, cb, d_repl * 0x88888888u);
946          cond = cond_make_0(TCG_COND_NE, cb);
947          break;
948  
949      case 6: /* SBC / NBC */
950          tcg_gen_andi_i64(cb, cb, d_repl * 0x80808080u);
951          cond = cond_make_0(TCG_COND_NE, cb);
952          break;
953  
954      case 7: /* SHC / NHC */
955          tcg_gen_andi_i64(cb, cb, d_repl * 0x80008000u);
956          cond = cond_make_0(TCG_COND_NE, cb);
957          break;
958  
959      default:
960          g_assert_not_reached();
961      }
962      if (cf & 1) {
963          cond.c = tcg_invert_cond(cond.c);
964      }
965  
966      return cond;
967  }
968  
969  static TCGv_i64 get_carry(DisasContext *ctx, bool d,
970                            TCGv_i64 cb, TCGv_i64 cb_msb)
971  {
972      if (cond_need_ext(ctx, d)) {
973          TCGv_i64 t = tcg_temp_new_i64();
974          tcg_gen_extract_i64(t, cb, 32, 1);
975          return t;
976      }
977      return cb_msb;
978  }
979  
980  static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
981  {
982      return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
983  }
984  
985  /* Compute signed overflow for addition.  */
986  static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
987                            TCGv_i64 in1, TCGv_i64 in2)
988  {
989      TCGv_i64 sv = tcg_temp_new_i64();
990      TCGv_i64 tmp = tcg_temp_new_i64();
991  
992      tcg_gen_xor_i64(sv, res, in1);
993      tcg_gen_xor_i64(tmp, in1, in2);
994      tcg_gen_andc_i64(sv, sv, tmp);
995  
996      return sv;
997  }
998  
999  /* Compute signed overflow for subtraction.  */
1000  static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1001                            TCGv_i64 in1, TCGv_i64 in2)
1002  {
1003      TCGv_i64 sv = tcg_temp_new_i64();
1004      TCGv_i64 tmp = tcg_temp_new_i64();
1005  
1006      tcg_gen_xor_i64(sv, res, in1);
1007      tcg_gen_xor_i64(tmp, in1, in2);
1008      tcg_gen_and_i64(sv, sv, tmp);
1009  
1010      return sv;
1011  }
1012  
1013  static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1014                     TCGv_i64 in2, unsigned shift, bool is_l,
1015                     bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1016  {
1017      TCGv_i64 dest, cb, cb_msb, cb_cond, sv, tmp;
1018      unsigned c = cf >> 1;
1019      DisasCond cond;
1020  
1021      dest = tcg_temp_new_i64();
1022      cb = NULL;
1023      cb_msb = NULL;
1024      cb_cond = NULL;
1025  
1026      if (shift) {
1027          tmp = tcg_temp_new_i64();
1028          tcg_gen_shli_i64(tmp, in1, shift);
1029          in1 = tmp;
1030      }
1031  
1032      if (!is_l || cond_need_cb(c)) {
1033          cb_msb = tcg_temp_new_i64();
1034          cb = tcg_temp_new_i64();
1035  
1036          tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1037          if (is_c) {
1038              tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1039                               get_psw_carry(ctx, d), ctx->zero);
1040          }
1041          tcg_gen_xor_i64(cb, in1, in2);
1042          tcg_gen_xor_i64(cb, cb, dest);
1043          if (cond_need_cb(c)) {
1044              cb_cond = get_carry(ctx, d, cb, cb_msb);
1045          }
1046      } else {
1047          tcg_gen_add_i64(dest, in1, in2);
1048          if (is_c) {
1049              tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1050          }
1051      }
1052  
1053      /* Compute signed overflow if required.  */
1054      sv = NULL;
1055      if (is_tsv || cond_need_sv(c)) {
1056          sv = do_add_sv(ctx, dest, in1, in2);
1057          if (is_tsv) {
1058              /* ??? Need to include overflow from shift.  */
1059              gen_helper_tsv(tcg_env, sv);
1060          }
1061      }
1062  
1063      /* Emit any conditional trap before any writeback.  */
1064      cond = do_cond(ctx, cf, d, dest, cb_cond, sv);
1065      if (is_tc) {
1066          tmp = tcg_temp_new_i64();
1067          tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1068          gen_helper_tcond(tcg_env, tmp);
1069      }
1070  
1071      /* Write back the result.  */
1072      if (!is_l) {
1073          save_or_nullify(ctx, cpu_psw_cb, cb);
1074          save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1075      }
1076      save_gpr(ctx, rt, dest);
1077  
1078      /* Install the new nullification.  */
1079      cond_free(&ctx->null_cond);
1080      ctx->null_cond = cond;
1081  }
1082  
1083  static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1084                         bool is_l, bool is_tsv, bool is_tc, bool is_c)
1085  {
1086      TCGv_i64 tcg_r1, tcg_r2;
1087  
1088      if (a->cf) {
1089          nullify_over(ctx);
1090      }
1091      tcg_r1 = load_gpr(ctx, a->r1);
1092      tcg_r2 = load_gpr(ctx, a->r2);
1093      do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1094             is_tsv, is_tc, is_c, a->cf, a->d);
1095      return nullify_end(ctx);
1096  }
1097  
1098  static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1099                         bool is_tsv, bool is_tc)
1100  {
1101      TCGv_i64 tcg_im, tcg_r2;
1102  
1103      if (a->cf) {
1104          nullify_over(ctx);
1105      }
1106      tcg_im = tcg_constant_i64(a->i);
1107      tcg_r2 = load_gpr(ctx, a->r);
1108      /* All ADDI conditions are 32-bit. */
1109      do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1110      return nullify_end(ctx);
1111  }
1112  
1113  static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1114                     TCGv_i64 in2, bool is_tsv, bool is_b,
1115                     bool is_tc, unsigned cf, bool d)
1116  {
1117      TCGv_i64 dest, sv, cb, cb_msb, tmp;
1118      unsigned c = cf >> 1;
1119      DisasCond cond;
1120  
1121      dest = tcg_temp_new_i64();
1122      cb = tcg_temp_new_i64();
1123      cb_msb = tcg_temp_new_i64();
1124  
1125      if (is_b) {
1126          /* DEST,C = IN1 + ~IN2 + C.  */
1127          tcg_gen_not_i64(cb, in2);
1128          tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1129                           get_psw_carry(ctx, d), ctx->zero);
1130          tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1131          tcg_gen_xor_i64(cb, cb, in1);
1132          tcg_gen_xor_i64(cb, cb, dest);
1133      } else {
1134          /*
1135           * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1136           * operations by seeding the high word with 1 and subtracting.
1137           */
1138          TCGv_i64 one = tcg_constant_i64(1);
1139          tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1140          tcg_gen_eqv_i64(cb, in1, in2);
1141          tcg_gen_xor_i64(cb, cb, dest);
1142      }
1143  
1144      /* Compute signed overflow if required.  */
1145      sv = NULL;
1146      if (is_tsv || cond_need_sv(c)) {
1147          sv = do_sub_sv(ctx, dest, in1, in2);
1148          if (is_tsv) {
1149              gen_helper_tsv(tcg_env, sv);
1150          }
1151      }
1152  
1153      /* Compute the condition.  We cannot use the special case for borrow.  */
1154      if (!is_b) {
1155          cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1156      } else {
1157          cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1158      }
1159  
1160      /* Emit any conditional trap before any writeback.  */
1161      if (is_tc) {
1162          tmp = tcg_temp_new_i64();
1163          tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1164          gen_helper_tcond(tcg_env, tmp);
1165      }
1166  
1167      /* Write back the result.  */
1168      save_or_nullify(ctx, cpu_psw_cb, cb);
1169      save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1170      save_gpr(ctx, rt, dest);
1171  
1172      /* Install the new nullification.  */
1173      cond_free(&ctx->null_cond);
1174      ctx->null_cond = cond;
1175  }
1176  
1177  static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1178                         bool is_tsv, bool is_b, bool is_tc)
1179  {
1180      TCGv_i64 tcg_r1, tcg_r2;
1181  
1182      if (a->cf) {
1183          nullify_over(ctx);
1184      }
1185      tcg_r1 = load_gpr(ctx, a->r1);
1186      tcg_r2 = load_gpr(ctx, a->r2);
1187      do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1188      return nullify_end(ctx);
1189  }
1190  
1191  static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1192  {
1193      TCGv_i64 tcg_im, tcg_r2;
1194  
1195      if (a->cf) {
1196          nullify_over(ctx);
1197      }
1198      tcg_im = tcg_constant_i64(a->i);
1199      tcg_r2 = load_gpr(ctx, a->r);
1200      /* All SUBI conditions are 32-bit. */
1201      do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1202      return nullify_end(ctx);
1203  }
1204  
1205  static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1206                        TCGv_i64 in2, unsigned cf, bool d)
1207  {
1208      TCGv_i64 dest, sv;
1209      DisasCond cond;
1210  
1211      dest = tcg_temp_new_i64();
1212      tcg_gen_sub_i64(dest, in1, in2);
1213  
1214      /* Compute signed overflow if required.  */
1215      sv = NULL;
1216      if (cond_need_sv(cf >> 1)) {
1217          sv = do_sub_sv(ctx, dest, in1, in2);
1218      }
1219  
1220      /* Form the condition for the compare.  */
1221      cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1222  
1223      /* Clear.  */
1224      tcg_gen_movi_i64(dest, 0);
1225      save_gpr(ctx, rt, dest);
1226  
1227      /* Install the new nullification.  */
1228      cond_free(&ctx->null_cond);
1229      ctx->null_cond = cond;
1230  }
1231  
1232  static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1233                     TCGv_i64 in2, unsigned cf, bool d,
1234                     void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1235  {
1236      TCGv_i64 dest = dest_gpr(ctx, rt);
1237  
1238      /* Perform the operation, and writeback.  */
1239      fn(dest, in1, in2);
1240      save_gpr(ctx, rt, dest);
1241  
1242      /* Install the new nullification.  */
1243      cond_free(&ctx->null_cond);
1244      if (cf) {
1245          ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1246      }
1247  }
1248  
1249  static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1250                         void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1251  {
1252      TCGv_i64 tcg_r1, tcg_r2;
1253  
1254      if (a->cf) {
1255          nullify_over(ctx);
1256      }
1257      tcg_r1 = load_gpr(ctx, a->r1);
1258      tcg_r2 = load_gpr(ctx, a->r2);
1259      do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1260      return nullify_end(ctx);
1261  }
1262  
1263  static void do_unit(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1264                      TCGv_i64 in2, unsigned cf, bool d, bool is_tc,
1265                      void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1266  {
1267      TCGv_i64 dest;
1268      DisasCond cond;
1269  
1270      if (cf == 0) {
1271          dest = dest_gpr(ctx, rt);
1272          fn(dest, in1, in2);
1273          save_gpr(ctx, rt, dest);
1274          cond_free(&ctx->null_cond);
1275      } else {
1276          dest = tcg_temp_new_i64();
1277          fn(dest, in1, in2);
1278  
1279          cond = do_unit_cond(cf, d, dest, in1, in2);
1280  
1281          if (is_tc) {
1282              TCGv_i64 tmp = tcg_temp_new_i64();
1283              tcg_gen_setcond_i64(cond.c, tmp, cond.a0, cond.a1);
1284              gen_helper_tcond(tcg_env, tmp);
1285          }
1286          save_gpr(ctx, rt, dest);
1287  
1288          cond_free(&ctx->null_cond);
1289          ctx->null_cond = cond;
1290      }
1291  }
1292  
1293  #ifndef CONFIG_USER_ONLY
1294  /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1295     from the top 2 bits of the base register.  There are a few system
1296     instructions that have a 3-bit space specifier, for which SR0 is
1297     not special.  To handle this, pass ~SP.  */
1298  static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1299  {
1300      TCGv_ptr ptr;
1301      TCGv_i64 tmp;
1302      TCGv_i64 spc;
1303  
1304      if (sp != 0) {
1305          if (sp < 0) {
1306              sp = ~sp;
1307          }
1308          spc = tcg_temp_new_i64();
1309          load_spr(ctx, spc, sp);
1310          return spc;
1311      }
1312      if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1313          return cpu_srH;
1314      }
1315  
1316      ptr = tcg_temp_new_ptr();
1317      tmp = tcg_temp_new_i64();
1318      spc = tcg_temp_new_i64();
1319  
1320      /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1321      tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1322      tcg_gen_andi_i64(tmp, tmp, 030);
1323      tcg_gen_trunc_i64_ptr(ptr, tmp);
1324  
1325      tcg_gen_add_ptr(ptr, ptr, tcg_env);
1326      tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1327  
1328      return spc;
1329  }
1330  #endif
1331  
1332  static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1333                       unsigned rb, unsigned rx, int scale, int64_t disp,
1334                       unsigned sp, int modify, bool is_phys)
1335  {
1336      TCGv_i64 base = load_gpr(ctx, rb);
1337      TCGv_i64 ofs;
1338      TCGv_i64 addr;
1339  
1340      set_insn_breg(ctx, rb);
1341  
1342      /* Note that RX is mutually exclusive with DISP.  */
1343      if (rx) {
1344          ofs = tcg_temp_new_i64();
1345          tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1346          tcg_gen_add_i64(ofs, ofs, base);
1347      } else if (disp || modify) {
1348          ofs = tcg_temp_new_i64();
1349          tcg_gen_addi_i64(ofs, base, disp);
1350      } else {
1351          ofs = base;
1352      }
1353  
1354      *pofs = ofs;
1355      *pgva = addr = tcg_temp_new_i64();
1356      tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base, gva_offset_mask(ctx));
1357  #ifndef CONFIG_USER_ONLY
1358      if (!is_phys) {
1359          tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1360      }
1361  #endif
1362  }
1363  
1364  /* Emit a memory load.  The modify parameter should be
1365   * < 0 for pre-modify,
1366   * > 0 for post-modify,
1367   * = 0 for no base register update.
1368   */
1369  static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1370                         unsigned rx, int scale, int64_t disp,
1371                         unsigned sp, int modify, MemOp mop)
1372  {
1373      TCGv_i64 ofs;
1374      TCGv_i64 addr;
1375  
1376      /* Caller uses nullify_over/nullify_end.  */
1377      assert(ctx->null_cond.c == TCG_COND_NEVER);
1378  
1379      form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1380               MMU_DISABLED(ctx));
1381      tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1382      if (modify) {
1383          save_gpr(ctx, rb, ofs);
1384      }
1385  }
1386  
1387  static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1388                         unsigned rx, int scale, int64_t disp,
1389                         unsigned sp, int modify, MemOp mop)
1390  {
1391      TCGv_i64 ofs;
1392      TCGv_i64 addr;
1393  
1394      /* Caller uses nullify_over/nullify_end.  */
1395      assert(ctx->null_cond.c == TCG_COND_NEVER);
1396  
1397      form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1398               MMU_DISABLED(ctx));
1399      tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1400      if (modify) {
1401          save_gpr(ctx, rb, ofs);
1402      }
1403  }
1404  
1405  static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1406                          unsigned rx, int scale, int64_t disp,
1407                          unsigned sp, int modify, MemOp mop)
1408  {
1409      TCGv_i64 ofs;
1410      TCGv_i64 addr;
1411  
1412      /* Caller uses nullify_over/nullify_end.  */
1413      assert(ctx->null_cond.c == TCG_COND_NEVER);
1414  
1415      form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1416               MMU_DISABLED(ctx));
1417      tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1418      if (modify) {
1419          save_gpr(ctx, rb, ofs);
1420      }
1421  }
1422  
1423  static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1424                          unsigned rx, int scale, int64_t disp,
1425                          unsigned sp, int modify, MemOp mop)
1426  {
1427      TCGv_i64 ofs;
1428      TCGv_i64 addr;
1429  
1430      /* Caller uses nullify_over/nullify_end.  */
1431      assert(ctx->null_cond.c == TCG_COND_NEVER);
1432  
1433      form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1434               MMU_DISABLED(ctx));
1435      tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1436      if (modify) {
1437          save_gpr(ctx, rb, ofs);
1438      }
1439  }
1440  
1441  static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1442                      unsigned rx, int scale, int64_t disp,
1443                      unsigned sp, int modify, MemOp mop)
1444  {
1445      TCGv_i64 dest;
1446  
1447      nullify_over(ctx);
1448  
1449      if (modify == 0) {
1450          /* No base register update.  */
1451          dest = dest_gpr(ctx, rt);
1452      } else {
1453          /* Make sure if RT == RB, we see the result of the load.  */
1454          dest = tcg_temp_new_i64();
1455      }
1456      do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1457      save_gpr(ctx, rt, dest);
1458  
1459      return nullify_end(ctx);
1460  }
1461  
1462  static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1463                        unsigned rx, int scale, int64_t disp,
1464                        unsigned sp, int modify)
1465  {
1466      TCGv_i32 tmp;
1467  
1468      nullify_over(ctx);
1469  
1470      tmp = tcg_temp_new_i32();
1471      do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1472      save_frw_i32(rt, tmp);
1473  
1474      if (rt == 0) {
1475          gen_helper_loaded_fr0(tcg_env);
1476      }
1477  
1478      return nullify_end(ctx);
1479  }
1480  
1481  static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1482  {
1483      return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1484                       a->disp, a->sp, a->m);
1485  }
1486  
1487  static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1488                        unsigned rx, int scale, int64_t disp,
1489                        unsigned sp, int modify)
1490  {
1491      TCGv_i64 tmp;
1492  
1493      nullify_over(ctx);
1494  
1495      tmp = tcg_temp_new_i64();
1496      do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1497      save_frd(rt, tmp);
1498  
1499      if (rt == 0) {
1500          gen_helper_loaded_fr0(tcg_env);
1501      }
1502  
1503      return nullify_end(ctx);
1504  }
1505  
1506  static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1507  {
1508      return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1509                       a->disp, a->sp, a->m);
1510  }
1511  
1512  static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1513                       int64_t disp, unsigned sp,
1514                       int modify, MemOp mop)
1515  {
1516      nullify_over(ctx);
1517      do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1518      return nullify_end(ctx);
1519  }
1520  
1521  static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1522                         unsigned rx, int scale, int64_t disp,
1523                         unsigned sp, int modify)
1524  {
1525      TCGv_i32 tmp;
1526  
1527      nullify_over(ctx);
1528  
1529      tmp = load_frw_i32(rt);
1530      do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1531  
1532      return nullify_end(ctx);
1533  }
1534  
1535  static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1536  {
1537      return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1538                        a->disp, a->sp, a->m);
1539  }
1540  
1541  static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1542                         unsigned rx, int scale, int64_t disp,
1543                         unsigned sp, int modify)
1544  {
1545      TCGv_i64 tmp;
1546  
1547      nullify_over(ctx);
1548  
1549      tmp = load_frd(rt);
1550      do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1551  
1552      return nullify_end(ctx);
1553  }
1554  
1555  static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1556  {
1557      return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1558                        a->disp, a->sp, a->m);
1559  }
1560  
1561  static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1562                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1563  {
1564      TCGv_i32 tmp;
1565  
1566      nullify_over(ctx);
1567      tmp = load_frw0_i32(ra);
1568  
1569      func(tmp, tcg_env, tmp);
1570  
1571      save_frw_i32(rt, tmp);
1572      return nullify_end(ctx);
1573  }
1574  
1575  static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1576                         void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1577  {
1578      TCGv_i32 dst;
1579      TCGv_i64 src;
1580  
1581      nullify_over(ctx);
1582      src = load_frd(ra);
1583      dst = tcg_temp_new_i32();
1584  
1585      func(dst, tcg_env, src);
1586  
1587      save_frw_i32(rt, dst);
1588      return nullify_end(ctx);
1589  }
1590  
1591  static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1592                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1593  {
1594      TCGv_i64 tmp;
1595  
1596      nullify_over(ctx);
1597      tmp = load_frd0(ra);
1598  
1599      func(tmp, tcg_env, tmp);
1600  
1601      save_frd(rt, tmp);
1602      return nullify_end(ctx);
1603  }
1604  
1605  static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1606                         void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1607  {
1608      TCGv_i32 src;
1609      TCGv_i64 dst;
1610  
1611      nullify_over(ctx);
1612      src = load_frw0_i32(ra);
1613      dst = tcg_temp_new_i64();
1614  
1615      func(dst, tcg_env, src);
1616  
1617      save_frd(rt, dst);
1618      return nullify_end(ctx);
1619  }
1620  
1621  static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1622                          unsigned ra, unsigned rb,
1623                          void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1624  {
1625      TCGv_i32 a, b;
1626  
1627      nullify_over(ctx);
1628      a = load_frw0_i32(ra);
1629      b = load_frw0_i32(rb);
1630  
1631      func(a, tcg_env, a, b);
1632  
1633      save_frw_i32(rt, a);
1634      return nullify_end(ctx);
1635  }
1636  
1637  static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1638                          unsigned ra, unsigned rb,
1639                          void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1640  {
1641      TCGv_i64 a, b;
1642  
1643      nullify_over(ctx);
1644      a = load_frd0(ra);
1645      b = load_frd0(rb);
1646  
1647      func(a, tcg_env, a, b);
1648  
1649      save_frd(rt, a);
1650      return nullify_end(ctx);
1651  }
1652  
1653  /* Emit an unconditional branch to a direct target, which may or may not
1654     have already had nullification handled.  */
1655  static bool do_dbranch(DisasContext *ctx, uint64_t dest,
1656                         unsigned link, bool is_n)
1657  {
1658      if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1659          if (link != 0) {
1660              copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1661          }
1662          ctx->iaoq_n = dest;
1663          if (is_n) {
1664              ctx->null_cond.c = TCG_COND_ALWAYS;
1665          }
1666      } else {
1667          nullify_over(ctx);
1668  
1669          if (link != 0) {
1670              copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1671          }
1672  
1673          if (is_n && use_nullify_skip(ctx)) {
1674              nullify_set(ctx, 0);
1675              gen_goto_tb(ctx, 0, dest, dest + 4);
1676          } else {
1677              nullify_set(ctx, is_n);
1678              gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1679          }
1680  
1681          nullify_end(ctx);
1682  
1683          nullify_set(ctx, 0);
1684          gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
1685          ctx->base.is_jmp = DISAS_NORETURN;
1686      }
1687      return true;
1688  }
1689  
1690  /* Emit a conditional branch to a direct target.  If the branch itself
1691     is nullified, we should have already used nullify_over.  */
1692  static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1693                         DisasCond *cond)
1694  {
1695      uint64_t dest = iaoq_dest(ctx, disp);
1696      TCGLabel *taken = NULL;
1697      TCGCond c = cond->c;
1698      bool n;
1699  
1700      assert(ctx->null_cond.c == TCG_COND_NEVER);
1701  
1702      /* Handle TRUE and NEVER as direct branches.  */
1703      if (c == TCG_COND_ALWAYS) {
1704          return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
1705      }
1706      if (c == TCG_COND_NEVER) {
1707          return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
1708      }
1709  
1710      taken = gen_new_label();
1711      tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1712      cond_free(cond);
1713  
1714      /* Not taken: Condition not satisfied; nullify on backward branches. */
1715      n = is_n && disp < 0;
1716      if (n && use_nullify_skip(ctx)) {
1717          nullify_set(ctx, 0);
1718          gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
1719      } else {
1720          if (!n && ctx->null_lab) {
1721              gen_set_label(ctx->null_lab);
1722              ctx->null_lab = NULL;
1723          }
1724          nullify_set(ctx, n);
1725          if (ctx->iaoq_n == -1) {
1726              /* The temporary iaoq_n_var died at the branch above.
1727                 Regenerate it here instead of saving it.  */
1728              tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1729          }
1730          gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
1731      }
1732  
1733      gen_set_label(taken);
1734  
1735      /* Taken: Condition satisfied; nullify on forward branches.  */
1736      n = is_n && disp >= 0;
1737      if (n && use_nullify_skip(ctx)) {
1738          nullify_set(ctx, 0);
1739          gen_goto_tb(ctx, 1, dest, dest + 4);
1740      } else {
1741          nullify_set(ctx, n);
1742          gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
1743      }
1744  
1745      /* Not taken: the branch itself was nullified.  */
1746      if (ctx->null_lab) {
1747          gen_set_label(ctx->null_lab);
1748          ctx->null_lab = NULL;
1749          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1750      } else {
1751          ctx->base.is_jmp = DISAS_NORETURN;
1752      }
1753      return true;
1754  }
1755  
1756  /* Emit an unconditional branch to an indirect target.  This handles
1757     nullification of the branch itself.  */
1758  static bool do_ibranch(DisasContext *ctx, TCGv_i64 dest,
1759                         unsigned link, bool is_n)
1760  {
1761      TCGv_i64 a0, a1, next, tmp;
1762      TCGCond c;
1763  
1764      assert(ctx->null_lab == NULL);
1765  
1766      if (ctx->null_cond.c == TCG_COND_NEVER) {
1767          if (link != 0) {
1768              copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1769          }
1770          next = tcg_temp_new_i64();
1771          tcg_gen_mov_i64(next, dest);
1772          if (is_n) {
1773              if (use_nullify_skip(ctx)) {
1774                  copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1775                  tcg_gen_addi_i64(next, next, 4);
1776                  copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1777                  nullify_set(ctx, 0);
1778                  ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1779                  return true;
1780              }
1781              ctx->null_cond.c = TCG_COND_ALWAYS;
1782          }
1783          ctx->iaoq_n = -1;
1784          ctx->iaoq_n_var = next;
1785      } else if (is_n && use_nullify_skip(ctx)) {
1786          /* The (conditional) branch, B, nullifies the next insn, N,
1787             and we're allowed to skip execution N (no single-step or
1788             tracepoint in effect).  Since the goto_ptr that we must use
1789             for the indirect branch consumes no special resources, we
1790             can (conditionally) skip B and continue execution.  */
1791          /* The use_nullify_skip test implies we have a known control path.  */
1792          tcg_debug_assert(ctx->iaoq_b != -1);
1793          tcg_debug_assert(ctx->iaoq_n != -1);
1794  
1795          /* We do have to handle the non-local temporary, DEST, before
1796             branching.  Since IOAQ_F is not really live at this point, we
1797             can simply store DEST optimistically.  Similarly with IAOQ_B.  */
1798          copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1799          next = tcg_temp_new_i64();
1800          tcg_gen_addi_i64(next, dest, 4);
1801          copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
1802  
1803          nullify_over(ctx);
1804          if (link != 0) {
1805              copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1806          }
1807          tcg_gen_lookup_and_goto_ptr();
1808          return nullify_end(ctx);
1809      } else {
1810          c = ctx->null_cond.c;
1811          a0 = ctx->null_cond.a0;
1812          a1 = ctx->null_cond.a1;
1813  
1814          tmp = tcg_temp_new_i64();
1815          next = tcg_temp_new_i64();
1816  
1817          copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
1818          tcg_gen_movcond_i64(c, next, a0, a1, tmp, dest);
1819          ctx->iaoq_n = -1;
1820          ctx->iaoq_n_var = next;
1821  
1822          if (link != 0) {
1823              tcg_gen_movcond_i64(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
1824          }
1825  
1826          if (is_n) {
1827              /* The branch nullifies the next insn, which means the state of N
1828                 after the branch is the inverse of the state of N that applied
1829                 to the branch.  */
1830              tcg_gen_setcond_i64(tcg_invert_cond(c), cpu_psw_n, a0, a1);
1831              cond_free(&ctx->null_cond);
1832              ctx->null_cond = cond_make_n();
1833              ctx->psw_n_nonzero = true;
1834          } else {
1835              cond_free(&ctx->null_cond);
1836          }
1837      }
1838      return true;
1839  }
1840  
1841  /* Implement
1842   *    if (IAOQ_Front{30..31} < GR[b]{30..31})
1843   *      IAOQ_Next{30..31} ← GR[b]{30..31};
1844   *    else
1845   *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1846   * which keeps the privilege level from being increased.
1847   */
1848  static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
1849  {
1850      TCGv_i64 dest;
1851      switch (ctx->privilege) {
1852      case 0:
1853          /* Privilege 0 is maximum and is allowed to decrease.  */
1854          return offset;
1855      case 3:
1856          /* Privilege 3 is minimum and is never allowed to increase.  */
1857          dest = tcg_temp_new_i64();
1858          tcg_gen_ori_i64(dest, offset, 3);
1859          break;
1860      default:
1861          dest = tcg_temp_new_i64();
1862          tcg_gen_andi_i64(dest, offset, -4);
1863          tcg_gen_ori_i64(dest, dest, ctx->privilege);
1864          tcg_gen_movcond_i64(TCG_COND_GTU, dest, dest, offset, dest, offset);
1865          break;
1866      }
1867      return dest;
1868  }
1869  
1870  #ifdef CONFIG_USER_ONLY
1871  /* On Linux, page zero is normally marked execute only + gateway.
1872     Therefore normal read or write is supposed to fail, but specific
1873     offsets have kernel code mapped to raise permissions to implement
1874     system calls.  Handling this via an explicit check here, rather
1875     in than the "be disp(sr2,r0)" instruction that probably sent us
1876     here, is the easiest way to handle the branch delay slot on the
1877     aforementioned BE.  */
1878  static void do_page_zero(DisasContext *ctx)
1879  {
1880      TCGv_i64 tmp;
1881  
1882      /* If by some means we get here with PSW[N]=1, that implies that
1883         the B,GATE instruction would be skipped, and we'd fault on the
1884         next insn within the privileged page.  */
1885      switch (ctx->null_cond.c) {
1886      case TCG_COND_NEVER:
1887          break;
1888      case TCG_COND_ALWAYS:
1889          tcg_gen_movi_i64(cpu_psw_n, 0);
1890          goto do_sigill;
1891      default:
1892          /* Since this is always the first (and only) insn within the
1893             TB, we should know the state of PSW[N] from TB->FLAGS.  */
1894          g_assert_not_reached();
1895      }
1896  
1897      /* Check that we didn't arrive here via some means that allowed
1898         non-sequential instruction execution.  Normally the PSW[B] bit
1899         detects this by disallowing the B,GATE instruction to execute
1900         under such conditions.  */
1901      if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1902          goto do_sigill;
1903      }
1904  
1905      switch (ctx->iaoq_f & -4) {
1906      case 0x00: /* Null pointer call */
1907          gen_excp_1(EXCP_IMP);
1908          ctx->base.is_jmp = DISAS_NORETURN;
1909          break;
1910  
1911      case 0xb0: /* LWS */
1912          gen_excp_1(EXCP_SYSCALL_LWS);
1913          ctx->base.is_jmp = DISAS_NORETURN;
1914          break;
1915  
1916      case 0xe0: /* SET_THREAD_POINTER */
1917          tcg_gen_st_i64(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
1918          tmp = tcg_temp_new_i64();
1919          tcg_gen_ori_i64(tmp, cpu_gr[31], 3);
1920          copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
1921          tcg_gen_addi_i64(tmp, tmp, 4);
1922          copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
1923          ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1924          break;
1925  
1926      case 0x100: /* SYSCALL */
1927          gen_excp_1(EXCP_SYSCALL);
1928          ctx->base.is_jmp = DISAS_NORETURN;
1929          break;
1930  
1931      default:
1932      do_sigill:
1933          gen_excp_1(EXCP_ILL);
1934          ctx->base.is_jmp = DISAS_NORETURN;
1935          break;
1936      }
1937  }
1938  #endif
1939  
1940  static bool trans_nop(DisasContext *ctx, arg_nop *a)
1941  {
1942      cond_free(&ctx->null_cond);
1943      return true;
1944  }
1945  
1946  static bool trans_break(DisasContext *ctx, arg_break *a)
1947  {
1948      return gen_excp_iir(ctx, EXCP_BREAK);
1949  }
1950  
1951  static bool trans_sync(DisasContext *ctx, arg_sync *a)
1952  {
1953      /* No point in nullifying the memory barrier.  */
1954      tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
1955  
1956      cond_free(&ctx->null_cond);
1957      return true;
1958  }
1959  
1960  static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
1961  {
1962      unsigned rt = a->t;
1963      TCGv_i64 tmp = dest_gpr(ctx, rt);
1964      tcg_gen_movi_i64(tmp, ctx->iaoq_f);
1965      save_gpr(ctx, rt, tmp);
1966  
1967      cond_free(&ctx->null_cond);
1968      return true;
1969  }
1970  
1971  static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
1972  {
1973      unsigned rt = a->t;
1974      unsigned rs = a->sp;
1975      TCGv_i64 t0 = tcg_temp_new_i64();
1976  
1977      load_spr(ctx, t0, rs);
1978      tcg_gen_shri_i64(t0, t0, 32);
1979  
1980      save_gpr(ctx, rt, t0);
1981  
1982      cond_free(&ctx->null_cond);
1983      return true;
1984  }
1985  
1986  static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
1987  {
1988      unsigned rt = a->t;
1989      unsigned ctl = a->r;
1990      TCGv_i64 tmp;
1991  
1992      switch (ctl) {
1993      case CR_SAR:
1994          if (a->e == 0) {
1995              /* MFSAR without ,W masks low 5 bits.  */
1996              tmp = dest_gpr(ctx, rt);
1997              tcg_gen_andi_i64(tmp, cpu_sar, 31);
1998              save_gpr(ctx, rt, tmp);
1999              goto done;
2000          }
2001          save_gpr(ctx, rt, cpu_sar);
2002          goto done;
2003      case CR_IT: /* Interval Timer */
2004          /* FIXME: Respect PSW_S bit.  */
2005          nullify_over(ctx);
2006          tmp = dest_gpr(ctx, rt);
2007          if (translator_io_start(&ctx->base)) {
2008              gen_helper_read_interval_timer(tmp);
2009              ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2010          } else {
2011              gen_helper_read_interval_timer(tmp);
2012          }
2013          save_gpr(ctx, rt, tmp);
2014          return nullify_end(ctx);
2015      case 26:
2016      case 27:
2017          break;
2018      default:
2019          /* All other control registers are privileged.  */
2020          CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2021          break;
2022      }
2023  
2024      tmp = tcg_temp_new_i64();
2025      tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2026      save_gpr(ctx, rt, tmp);
2027  
2028   done:
2029      cond_free(&ctx->null_cond);
2030      return true;
2031  }
2032  
2033  static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2034  {
2035      unsigned rr = a->r;
2036      unsigned rs = a->sp;
2037      TCGv_i64 tmp;
2038  
2039      if (rs >= 5) {
2040          CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2041      }
2042      nullify_over(ctx);
2043  
2044      tmp = tcg_temp_new_i64();
2045      tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2046  
2047      if (rs >= 4) {
2048          tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2049          ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2050      } else {
2051          tcg_gen_mov_i64(cpu_sr[rs], tmp);
2052      }
2053  
2054      return nullify_end(ctx);
2055  }
2056  
2057  static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2058  {
2059      unsigned ctl = a->t;
2060      TCGv_i64 reg;
2061      TCGv_i64 tmp;
2062  
2063      if (ctl == CR_SAR) {
2064          reg = load_gpr(ctx, a->r);
2065          tmp = tcg_temp_new_i64();
2066          tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2067          save_or_nullify(ctx, cpu_sar, tmp);
2068  
2069          cond_free(&ctx->null_cond);
2070          return true;
2071      }
2072  
2073      /* All other control registers are privileged or read-only.  */
2074      CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2075  
2076  #ifndef CONFIG_USER_ONLY
2077      nullify_over(ctx);
2078  
2079      if (ctx->is_pa20) {
2080          reg = load_gpr(ctx, a->r);
2081      } else {
2082          reg = tcg_temp_new_i64();
2083          tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2084      }
2085  
2086      switch (ctl) {
2087      case CR_IT:
2088          gen_helper_write_interval_timer(tcg_env, reg);
2089          break;
2090      case CR_EIRR:
2091          gen_helper_write_eirr(tcg_env, reg);
2092          break;
2093      case CR_EIEM:
2094          gen_helper_write_eiem(tcg_env, reg);
2095          ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2096          break;
2097  
2098      case CR_IIASQ:
2099      case CR_IIAOQ:
2100          /* FIXME: Respect PSW_Q bit */
2101          /* The write advances the queue and stores to the back element.  */
2102          tmp = tcg_temp_new_i64();
2103          tcg_gen_ld_i64(tmp, tcg_env,
2104                         offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2105          tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2106          tcg_gen_st_i64(reg, tcg_env,
2107                         offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2108          break;
2109  
2110      case CR_PID1:
2111      case CR_PID2:
2112      case CR_PID3:
2113      case CR_PID4:
2114          tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2115  #ifndef CONFIG_USER_ONLY
2116          gen_helper_change_prot_id(tcg_env);
2117  #endif
2118          break;
2119  
2120      default:
2121          tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2122          break;
2123      }
2124      return nullify_end(ctx);
2125  #endif
2126  }
2127  
2128  static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2129  {
2130      TCGv_i64 tmp = tcg_temp_new_i64();
2131  
2132      tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2133      tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2134      save_or_nullify(ctx, cpu_sar, tmp);
2135  
2136      cond_free(&ctx->null_cond);
2137      return true;
2138  }
2139  
2140  static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2141  {
2142      TCGv_i64 dest = dest_gpr(ctx, a->t);
2143  
2144  #ifdef CONFIG_USER_ONLY
2145      /* We don't implement space registers in user mode. */
2146      tcg_gen_movi_i64(dest, 0);
2147  #else
2148      tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2149      tcg_gen_shri_i64(dest, dest, 32);
2150  #endif
2151      save_gpr(ctx, a->t, dest);
2152  
2153      cond_free(&ctx->null_cond);
2154      return true;
2155  }
2156  
2157  static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2158  {
2159      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2160  #ifndef CONFIG_USER_ONLY
2161      TCGv_i64 tmp;
2162  
2163      nullify_over(ctx);
2164  
2165      tmp = tcg_temp_new_i64();
2166      tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2167      tcg_gen_andi_i64(tmp, tmp, ~a->i);
2168      gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2169      save_gpr(ctx, a->t, tmp);
2170  
2171      /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2172      ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2173      return nullify_end(ctx);
2174  #endif
2175  }
2176  
2177  static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2178  {
2179      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2180  #ifndef CONFIG_USER_ONLY
2181      TCGv_i64 tmp;
2182  
2183      nullify_over(ctx);
2184  
2185      tmp = tcg_temp_new_i64();
2186      tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2187      tcg_gen_ori_i64(tmp, tmp, a->i);
2188      gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2189      save_gpr(ctx, a->t, tmp);
2190  
2191      /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2192      ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2193      return nullify_end(ctx);
2194  #endif
2195  }
2196  
2197  static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2198  {
2199      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2200  #ifndef CONFIG_USER_ONLY
2201      TCGv_i64 tmp, reg;
2202      nullify_over(ctx);
2203  
2204      reg = load_gpr(ctx, a->r);
2205      tmp = tcg_temp_new_i64();
2206      gen_helper_swap_system_mask(tmp, tcg_env, reg);
2207  
2208      /* Exit the TB to recognize new interrupts.  */
2209      ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2210      return nullify_end(ctx);
2211  #endif
2212  }
2213  
2214  static bool do_rfi(DisasContext *ctx, bool rfi_r)
2215  {
2216      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2217  #ifndef CONFIG_USER_ONLY
2218      nullify_over(ctx);
2219  
2220      if (rfi_r) {
2221          gen_helper_rfi_r(tcg_env);
2222      } else {
2223          gen_helper_rfi(tcg_env);
2224      }
2225      /* Exit the TB to recognize new interrupts.  */
2226      tcg_gen_exit_tb(NULL, 0);
2227      ctx->base.is_jmp = DISAS_NORETURN;
2228  
2229      return nullify_end(ctx);
2230  #endif
2231  }
2232  
2233  static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2234  {
2235      return do_rfi(ctx, false);
2236  }
2237  
2238  static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2239  {
2240      return do_rfi(ctx, true);
2241  }
2242  
2243  static bool trans_halt(DisasContext *ctx, arg_halt *a)
2244  {
2245      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2246  #ifndef CONFIG_USER_ONLY
2247      nullify_over(ctx);
2248      gen_helper_halt(tcg_env);
2249      ctx->base.is_jmp = DISAS_NORETURN;
2250      return nullify_end(ctx);
2251  #endif
2252  }
2253  
2254  static bool trans_reset(DisasContext *ctx, arg_reset *a)
2255  {
2256      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2257  #ifndef CONFIG_USER_ONLY
2258      nullify_over(ctx);
2259      gen_helper_reset(tcg_env);
2260      ctx->base.is_jmp = DISAS_NORETURN;
2261      return nullify_end(ctx);
2262  #endif
2263  }
2264  
2265  static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2266  {
2267      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2268  #ifndef CONFIG_USER_ONLY
2269      nullify_over(ctx);
2270      gen_helper_getshadowregs(tcg_env);
2271      return nullify_end(ctx);
2272  #endif
2273  }
2274  
2275  static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2276  {
2277      if (a->m) {
2278          TCGv_i64 dest = dest_gpr(ctx, a->b);
2279          TCGv_i64 src1 = load_gpr(ctx, a->b);
2280          TCGv_i64 src2 = load_gpr(ctx, a->x);
2281  
2282          /* The only thing we need to do is the base register modification.  */
2283          tcg_gen_add_i64(dest, src1, src2);
2284          save_gpr(ctx, a->b, dest);
2285      }
2286      cond_free(&ctx->null_cond);
2287      return true;
2288  }
2289  
2290  static bool trans_probe(DisasContext *ctx, arg_probe *a)
2291  {
2292      TCGv_i64 dest, ofs;
2293      TCGv_i32 level, want;
2294      TCGv_i64 addr;
2295  
2296      nullify_over(ctx);
2297  
2298      dest = dest_gpr(ctx, a->t);
2299      form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2300  
2301      if (a->imm) {
2302          level = tcg_constant_i32(a->ri & 3);
2303      } else {
2304          level = tcg_temp_new_i32();
2305          tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2306          tcg_gen_andi_i32(level, level, 3);
2307      }
2308      want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2309  
2310      gen_helper_probe(dest, tcg_env, addr, level, want);
2311  
2312      save_gpr(ctx, a->t, dest);
2313      return nullify_end(ctx);
2314  }
2315  
2316  static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2317  {
2318      if (ctx->is_pa20) {
2319          return false;
2320      }
2321      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2322  #ifndef CONFIG_USER_ONLY
2323      TCGv_i64 addr;
2324      TCGv_i64 ofs, reg;
2325  
2326      nullify_over(ctx);
2327  
2328      form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2329      reg = load_gpr(ctx, a->r);
2330      if (a->addr) {
2331          gen_helper_itlba_pa11(tcg_env, addr, reg);
2332      } else {
2333          gen_helper_itlbp_pa11(tcg_env, addr, reg);
2334      }
2335  
2336      /* Exit TB for TLB change if mmu is enabled.  */
2337      if (ctx->tb_flags & PSW_C) {
2338          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2339      }
2340      return nullify_end(ctx);
2341  #endif
2342  }
2343  
2344  static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2345  {
2346      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2347  #ifndef CONFIG_USER_ONLY
2348      TCGv_i64 addr;
2349      TCGv_i64 ofs;
2350  
2351      nullify_over(ctx);
2352  
2353      form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2354  
2355      /*
2356       * Page align now, rather than later, so that we can add in the
2357       * page_size field from pa2.0 from the low 4 bits of GR[b].
2358       */
2359      tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2360      if (ctx->is_pa20) {
2361          tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2362      }
2363  
2364      if (local) {
2365          gen_helper_ptlb_l(tcg_env, addr);
2366      } else {
2367          gen_helper_ptlb(tcg_env, addr);
2368      }
2369  
2370      if (a->m) {
2371          save_gpr(ctx, a->b, ofs);
2372      }
2373  
2374      /* Exit TB for TLB change if mmu is enabled.  */
2375      if (ctx->tb_flags & PSW_C) {
2376          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2377      }
2378      return nullify_end(ctx);
2379  #endif
2380  }
2381  
2382  static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2383  {
2384      return do_pxtlb(ctx, a, false);
2385  }
2386  
2387  static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2388  {
2389      return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2390  }
2391  
2392  static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2393  {
2394      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2395  #ifndef CONFIG_USER_ONLY
2396      nullify_over(ctx);
2397  
2398      trans_nop_addrx(ctx, a);
2399      gen_helper_ptlbe(tcg_env);
2400  
2401      /* Exit TB for TLB change if mmu is enabled.  */
2402      if (ctx->tb_flags & PSW_C) {
2403          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2404      }
2405      return nullify_end(ctx);
2406  #endif
2407  }
2408  
2409  /*
2410   * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2411   * See
2412   *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2413   *     page 13-9 (195/206)
2414   */
2415  static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2416  {
2417      if (ctx->is_pa20) {
2418          return false;
2419      }
2420      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2421  #ifndef CONFIG_USER_ONLY
2422      TCGv_i64 addr, atl, stl;
2423      TCGv_i64 reg;
2424  
2425      nullify_over(ctx);
2426  
2427      /*
2428       * FIXME:
2429       *  if (not (pcxl or pcxl2))
2430       *    return gen_illegal(ctx);
2431       */
2432  
2433      atl = tcg_temp_new_i64();
2434      stl = tcg_temp_new_i64();
2435      addr = tcg_temp_new_i64();
2436  
2437      tcg_gen_ld32u_i64(stl, tcg_env,
2438                        a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2439                        : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2440      tcg_gen_ld32u_i64(atl, tcg_env,
2441                        a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2442                        : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2443      tcg_gen_shli_i64(stl, stl, 32);
2444      tcg_gen_or_i64(addr, atl, stl);
2445  
2446      reg = load_gpr(ctx, a->r);
2447      if (a->addr) {
2448          gen_helper_itlba_pa11(tcg_env, addr, reg);
2449      } else {
2450          gen_helper_itlbp_pa11(tcg_env, addr, reg);
2451      }
2452  
2453      /* Exit TB for TLB change if mmu is enabled.  */
2454      if (ctx->tb_flags & PSW_C) {
2455          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2456      }
2457      return nullify_end(ctx);
2458  #endif
2459  }
2460  
2461  static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2462  {
2463      if (!ctx->is_pa20) {
2464          return false;
2465      }
2466      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467  #ifndef CONFIG_USER_ONLY
2468      nullify_over(ctx);
2469      {
2470          TCGv_i64 src1 = load_gpr(ctx, a->r1);
2471          TCGv_i64 src2 = load_gpr(ctx, a->r2);
2472  
2473          if (a->data) {
2474              gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2475          } else {
2476              gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2477          }
2478      }
2479      /* Exit TB for TLB change if mmu is enabled.  */
2480      if (ctx->tb_flags & PSW_C) {
2481          ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2482      }
2483      return nullify_end(ctx);
2484  #endif
2485  }
2486  
2487  static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2488  {
2489      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2490  #ifndef CONFIG_USER_ONLY
2491      TCGv_i64 vaddr;
2492      TCGv_i64 ofs, paddr;
2493  
2494      nullify_over(ctx);
2495  
2496      form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2497  
2498      paddr = tcg_temp_new_i64();
2499      gen_helper_lpa(paddr, tcg_env, vaddr);
2500  
2501      /* Note that physical address result overrides base modification.  */
2502      if (a->m) {
2503          save_gpr(ctx, a->b, ofs);
2504      }
2505      save_gpr(ctx, a->t, paddr);
2506  
2507      return nullify_end(ctx);
2508  #endif
2509  }
2510  
2511  static bool trans_lci(DisasContext *ctx, arg_lci *a)
2512  {
2513      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2514  
2515      /* The Coherence Index is an implementation-defined function of the
2516         physical address.  Two addresses with the same CI have a coherent
2517         view of the cache.  Our implementation is to return 0 for all,
2518         since the entire address space is coherent.  */
2519      save_gpr(ctx, a->t, ctx->zero);
2520  
2521      cond_free(&ctx->null_cond);
2522      return true;
2523  }
2524  
2525  static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2526  {
2527      return do_add_reg(ctx, a, false, false, false, false);
2528  }
2529  
2530  static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2531  {
2532      return do_add_reg(ctx, a, true, false, false, false);
2533  }
2534  
2535  static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2536  {
2537      return do_add_reg(ctx, a, false, true, false, false);
2538  }
2539  
2540  static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2541  {
2542      return do_add_reg(ctx, a, false, false, false, true);
2543  }
2544  
2545  static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2546  {
2547      return do_add_reg(ctx, a, false, true, false, true);
2548  }
2549  
2550  static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2551  {
2552      return do_sub_reg(ctx, a, false, false, false);
2553  }
2554  
2555  static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2556  {
2557      return do_sub_reg(ctx, a, true, false, false);
2558  }
2559  
2560  static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2561  {
2562      return do_sub_reg(ctx, a, false, false, true);
2563  }
2564  
2565  static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2566  {
2567      return do_sub_reg(ctx, a, true, false, true);
2568  }
2569  
2570  static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2571  {
2572      return do_sub_reg(ctx, a, false, true, false);
2573  }
2574  
2575  static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2576  {
2577      return do_sub_reg(ctx, a, true, true, false);
2578  }
2579  
2580  static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2581  {
2582      return do_log_reg(ctx, a, tcg_gen_andc_i64);
2583  }
2584  
2585  static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2586  {
2587      return do_log_reg(ctx, a, tcg_gen_and_i64);
2588  }
2589  
2590  static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2591  {
2592      if (a->cf == 0) {
2593          unsigned r2 = a->r2;
2594          unsigned r1 = a->r1;
2595          unsigned rt = a->t;
2596  
2597          if (rt == 0) { /* NOP */
2598              cond_free(&ctx->null_cond);
2599              return true;
2600          }
2601          if (r2 == 0) { /* COPY */
2602              if (r1 == 0) {
2603                  TCGv_i64 dest = dest_gpr(ctx, rt);
2604                  tcg_gen_movi_i64(dest, 0);
2605                  save_gpr(ctx, rt, dest);
2606              } else {
2607                  save_gpr(ctx, rt, cpu_gr[r1]);
2608              }
2609              cond_free(&ctx->null_cond);
2610              return true;
2611          }
2612  #ifndef CONFIG_USER_ONLY
2613          /* These are QEMU extensions and are nops in the real architecture:
2614           *
2615           * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2616           * or %r31,%r31,%r31 -- death loop; offline cpu
2617           *                      currently implemented as idle.
2618           */
2619          if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2620              /* No need to check for supervisor, as userland can only pause
2621                 until the next timer interrupt.  */
2622              nullify_over(ctx);
2623  
2624              /* Advance the instruction queue.  */
2625              copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2626              copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2627              nullify_set(ctx, 0);
2628  
2629              /* Tell the qemu main loop to halt until this cpu has work.  */
2630              tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2631                             offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2632              gen_excp_1(EXCP_HALTED);
2633              ctx->base.is_jmp = DISAS_NORETURN;
2634  
2635              return nullify_end(ctx);
2636          }
2637  #endif
2638      }
2639      return do_log_reg(ctx, a, tcg_gen_or_i64);
2640  }
2641  
2642  static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2643  {
2644      return do_log_reg(ctx, a, tcg_gen_xor_i64);
2645  }
2646  
2647  static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2648  {
2649      TCGv_i64 tcg_r1, tcg_r2;
2650  
2651      if (a->cf) {
2652          nullify_over(ctx);
2653      }
2654      tcg_r1 = load_gpr(ctx, a->r1);
2655      tcg_r2 = load_gpr(ctx, a->r2);
2656      do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2657      return nullify_end(ctx);
2658  }
2659  
2660  static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2661  {
2662      TCGv_i64 tcg_r1, tcg_r2;
2663  
2664      if (a->cf) {
2665          nullify_over(ctx);
2666      }
2667      tcg_r1 = load_gpr(ctx, a->r1);
2668      tcg_r2 = load_gpr(ctx, a->r2);
2669      do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, false, tcg_gen_xor_i64);
2670      return nullify_end(ctx);
2671  }
2672  
2673  static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2674  {
2675      TCGv_i64 tcg_r1, tcg_r2, tmp;
2676  
2677      if (a->cf) {
2678          nullify_over(ctx);
2679      }
2680      tcg_r1 = load_gpr(ctx, a->r1);
2681      tcg_r2 = load_gpr(ctx, a->r2);
2682      tmp = tcg_temp_new_i64();
2683      tcg_gen_not_i64(tmp, tcg_r2);
2684      do_unit(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, tcg_gen_add_i64);
2685      return nullify_end(ctx);
2686  }
2687  
2688  static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2689  {
2690      return do_uaddcm(ctx, a, false);
2691  }
2692  
2693  static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2694  {
2695      return do_uaddcm(ctx, a, true);
2696  }
2697  
2698  static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2699  {
2700      TCGv_i64 tmp;
2701  
2702      nullify_over(ctx);
2703  
2704      tmp = tcg_temp_new_i64();
2705      tcg_gen_shri_i64(tmp, cpu_psw_cb, 3);
2706      if (!is_i) {
2707          tcg_gen_not_i64(tmp, tmp);
2708      }
2709      tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2710      tcg_gen_muli_i64(tmp, tmp, 6);
2711      do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, a->d, false,
2712              is_i ? tcg_gen_add_i64 : tcg_gen_sub_i64);
2713      return nullify_end(ctx);
2714  }
2715  
2716  static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2717  {
2718      return do_dcor(ctx, a, false);
2719  }
2720  
2721  static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2722  {
2723      return do_dcor(ctx, a, true);
2724  }
2725  
2726  static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2727  {
2728      TCGv_i64 dest, add1, add2, addc, in1, in2;
2729      TCGv_i64 cout;
2730  
2731      nullify_over(ctx);
2732  
2733      in1 = load_gpr(ctx, a->r1);
2734      in2 = load_gpr(ctx, a->r2);
2735  
2736      add1 = tcg_temp_new_i64();
2737      add2 = tcg_temp_new_i64();
2738      addc = tcg_temp_new_i64();
2739      dest = tcg_temp_new_i64();
2740  
2741      /* Form R1 << 1 | PSW[CB]{8}.  */
2742      tcg_gen_add_i64(add1, in1, in1);
2743      tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2744  
2745      /*
2746       * Add or subtract R2, depending on PSW[V].  Proper computation of
2747       * carry requires that we subtract via + ~R2 + 1, as described in
2748       * the manual.  By extracting and masking V, we can produce the
2749       * proper inputs to the addition without movcond.
2750       */
2751      tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
2752      tcg_gen_xor_i64(add2, in2, addc);
2753      tcg_gen_andi_i64(addc, addc, 1);
2754  
2755      tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
2756      tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
2757                       addc, ctx->zero);
2758  
2759      /* Write back the result register.  */
2760      save_gpr(ctx, a->t, dest);
2761  
2762      /* Write back PSW[CB].  */
2763      tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
2764      tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
2765  
2766      /* Write back PSW[V] for the division step.  */
2767      cout = get_psw_carry(ctx, false);
2768      tcg_gen_neg_i64(cpu_psw_v, cout);
2769      tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
2770  
2771      /* Install the new nullification.  */
2772      if (a->cf) {
2773          TCGv_i64 sv = NULL;
2774          if (cond_need_sv(a->cf >> 1)) {
2775              /* ??? The lshift is supposed to contribute to overflow.  */
2776              sv = do_add_sv(ctx, dest, add1, add2);
2777          }
2778          ctx->null_cond = do_cond(ctx, a->cf, false, dest, cout, sv);
2779      }
2780  
2781      return nullify_end(ctx);
2782  }
2783  
2784  static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
2785  {
2786      return do_add_imm(ctx, a, false, false);
2787  }
2788  
2789  static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2790  {
2791      return do_add_imm(ctx, a, true, false);
2792  }
2793  
2794  static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
2795  {
2796      return do_add_imm(ctx, a, false, true);
2797  }
2798  
2799  static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2800  {
2801      return do_add_imm(ctx, a, true, true);
2802  }
2803  
2804  static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2805  {
2806      return do_sub_imm(ctx, a, false);
2807  }
2808  
2809  static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2810  {
2811      return do_sub_imm(ctx, a, true);
2812  }
2813  
2814  static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
2815  {
2816      TCGv_i64 tcg_im, tcg_r2;
2817  
2818      if (a->cf) {
2819          nullify_over(ctx);
2820      }
2821  
2822      tcg_im = tcg_constant_i64(a->i);
2823      tcg_r2 = load_gpr(ctx, a->r);
2824      do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
2825  
2826      return nullify_end(ctx);
2827  }
2828  
2829  static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
2830                            void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
2831  {
2832      TCGv_i64 r1, r2, dest;
2833  
2834      if (!ctx->is_pa20) {
2835          return false;
2836      }
2837  
2838      nullify_over(ctx);
2839  
2840      r1 = load_gpr(ctx, a->r1);
2841      r2 = load_gpr(ctx, a->r2);
2842      dest = dest_gpr(ctx, a->t);
2843  
2844      fn(dest, r1, r2);
2845      save_gpr(ctx, a->t, dest);
2846  
2847      return nullify_end(ctx);
2848  }
2849  
2850  static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
2851                               void (*fn)(TCGv_i64, TCGv_i64, int64_t))
2852  {
2853      TCGv_i64 r, dest;
2854  
2855      if (!ctx->is_pa20) {
2856          return false;
2857      }
2858  
2859      nullify_over(ctx);
2860  
2861      r = load_gpr(ctx, a->r);
2862      dest = dest_gpr(ctx, a->t);
2863  
2864      fn(dest, r, a->i);
2865      save_gpr(ctx, a->t, dest);
2866  
2867      return nullify_end(ctx);
2868  }
2869  
2870  static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
2871                                  void (*fn)(TCGv_i64, TCGv_i64,
2872                                             TCGv_i64, TCGv_i32))
2873  {
2874      TCGv_i64 r1, r2, dest;
2875  
2876      if (!ctx->is_pa20) {
2877          return false;
2878      }
2879  
2880      nullify_over(ctx);
2881  
2882      r1 = load_gpr(ctx, a->r1);
2883      r2 = load_gpr(ctx, a->r2);
2884      dest = dest_gpr(ctx, a->t);
2885  
2886      fn(dest, r1, r2, tcg_constant_i32(a->sh));
2887      save_gpr(ctx, a->t, dest);
2888  
2889      return nullify_end(ctx);
2890  }
2891  
2892  static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
2893  {
2894      return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
2895  }
2896  
2897  static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
2898  {
2899      return do_multimedia(ctx, a, gen_helper_hadd_ss);
2900  }
2901  
2902  static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
2903  {
2904      return do_multimedia(ctx, a, gen_helper_hadd_us);
2905  }
2906  
2907  static bool trans_havg(DisasContext *ctx, arg_rrr *a)
2908  {
2909      return do_multimedia(ctx, a, gen_helper_havg);
2910  }
2911  
2912  static bool trans_hshl(DisasContext *ctx, arg_rri *a)
2913  {
2914      return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
2915  }
2916  
2917  static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
2918  {
2919      return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
2920  }
2921  
2922  static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
2923  {
2924      return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
2925  }
2926  
2927  static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
2928  {
2929      return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
2930  }
2931  
2932  static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
2933  {
2934      return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
2935  }
2936  
2937  static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
2938  {
2939      return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
2940  }
2941  
2942  static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
2943  {
2944      return do_multimedia(ctx, a, gen_helper_hsub_ss);
2945  }
2946  
2947  static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
2948  {
2949      return do_multimedia(ctx, a, gen_helper_hsub_us);
2950  }
2951  
2952  static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2953  {
2954      uint64_t mask = 0xffff0000ffff0000ull;
2955      TCGv_i64 tmp = tcg_temp_new_i64();
2956  
2957      tcg_gen_andi_i64(tmp, r2, mask);
2958      tcg_gen_andi_i64(dst, r1, mask);
2959      tcg_gen_shri_i64(tmp, tmp, 16);
2960      tcg_gen_or_i64(dst, dst, tmp);
2961  }
2962  
2963  static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
2964  {
2965      return do_multimedia(ctx, a, gen_mixh_l);
2966  }
2967  
2968  static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2969  {
2970      uint64_t mask = 0x0000ffff0000ffffull;
2971      TCGv_i64 tmp = tcg_temp_new_i64();
2972  
2973      tcg_gen_andi_i64(tmp, r1, mask);
2974      tcg_gen_andi_i64(dst, r2, mask);
2975      tcg_gen_shli_i64(tmp, tmp, 16);
2976      tcg_gen_or_i64(dst, dst, tmp);
2977  }
2978  
2979  static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
2980  {
2981      return do_multimedia(ctx, a, gen_mixh_r);
2982  }
2983  
2984  static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2985  {
2986      TCGv_i64 tmp = tcg_temp_new_i64();
2987  
2988      tcg_gen_shri_i64(tmp, r2, 32);
2989      tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
2990  }
2991  
2992  static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
2993  {
2994      return do_multimedia(ctx, a, gen_mixw_l);
2995  }
2996  
2997  static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
2998  {
2999      tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3000  }
3001  
3002  static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3003  {
3004      return do_multimedia(ctx, a, gen_mixw_r);
3005  }
3006  
3007  static bool trans_permh(DisasContext *ctx, arg_permh *a)
3008  {
3009      TCGv_i64 r, t0, t1, t2, t3;
3010  
3011      if (!ctx->is_pa20) {
3012          return false;
3013      }
3014  
3015      nullify_over(ctx);
3016  
3017      r = load_gpr(ctx, a->r1);
3018      t0 = tcg_temp_new_i64();
3019      t1 = tcg_temp_new_i64();
3020      t2 = tcg_temp_new_i64();
3021      t3 = tcg_temp_new_i64();
3022  
3023      tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3024      tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3025      tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3026      tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3027  
3028      tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3029      tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3030      tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3031  
3032      save_gpr(ctx, a->t, t0);
3033      return nullify_end(ctx);
3034  }
3035  
3036  static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3037  {
3038      if (ctx->is_pa20) {
3039         /*
3040          * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3041          * Any base modification still occurs.
3042          */
3043          if (a->t == 0) {
3044              return trans_nop_addrx(ctx, a);
3045          }
3046      } else if (a->size > MO_32) {
3047          return gen_illegal(ctx);
3048      }
3049      return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3050                     a->disp, a->sp, a->m, a->size | MO_TE);
3051  }
3052  
3053  static bool trans_st(DisasContext *ctx, arg_ldst *a)
3054  {
3055      assert(a->x == 0 && a->scale == 0);
3056      if (!ctx->is_pa20 && a->size > MO_32) {
3057          return gen_illegal(ctx);
3058      }
3059      return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3060  }
3061  
3062  static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3063  {
3064      MemOp mop = MO_TE | MO_ALIGN | a->size;
3065      TCGv_i64 dest, ofs;
3066      TCGv_i64 addr;
3067  
3068      if (!ctx->is_pa20 && a->size > MO_32) {
3069          return gen_illegal(ctx);
3070      }
3071  
3072      nullify_over(ctx);
3073  
3074      if (a->m) {
3075          /* Base register modification.  Make sure if RT == RB,
3076             we see the result of the load.  */
3077          dest = tcg_temp_new_i64();
3078      } else {
3079          dest = dest_gpr(ctx, a->t);
3080      }
3081  
3082      form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
3083               a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3084  
3085      /*
3086       * For hppa1.1, LDCW is undefined unless aligned mod 16.
3087       * However actual hardware succeeds with aligned mod 4.
3088       * Detect this case and log a GUEST_ERROR.
3089       *
3090       * TODO: HPPA64 relaxes the over-alignment requirement
3091       * with the ,co completer.
3092       */
3093      gen_helper_ldc_check(addr);
3094  
3095      tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3096  
3097      if (a->m) {
3098          save_gpr(ctx, a->b, ofs);
3099      }
3100      save_gpr(ctx, a->t, dest);
3101  
3102      return nullify_end(ctx);
3103  }
3104  
3105  static bool trans_stby(DisasContext *ctx, arg_stby *a)
3106  {
3107      TCGv_i64 ofs, val;
3108      TCGv_i64 addr;
3109  
3110      nullify_over(ctx);
3111  
3112      form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3113               MMU_DISABLED(ctx));
3114      val = load_gpr(ctx, a->r);
3115      if (a->a) {
3116          if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3117              gen_helper_stby_e_parallel(tcg_env, addr, val);
3118          } else {
3119              gen_helper_stby_e(tcg_env, addr, val);
3120          }
3121      } else {
3122          if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3123              gen_helper_stby_b_parallel(tcg_env, addr, val);
3124          } else {
3125              gen_helper_stby_b(tcg_env, addr, val);
3126          }
3127      }
3128      if (a->m) {
3129          tcg_gen_andi_i64(ofs, ofs, ~3);
3130          save_gpr(ctx, a->b, ofs);
3131      }
3132  
3133      return nullify_end(ctx);
3134  }
3135  
3136  static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3137  {
3138      TCGv_i64 ofs, val;
3139      TCGv_i64 addr;
3140  
3141      if (!ctx->is_pa20) {
3142          return false;
3143      }
3144      nullify_over(ctx);
3145  
3146      form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3147               MMU_DISABLED(ctx));
3148      val = load_gpr(ctx, a->r);
3149      if (a->a) {
3150          if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3151              gen_helper_stdby_e_parallel(tcg_env, addr, val);
3152          } else {
3153              gen_helper_stdby_e(tcg_env, addr, val);
3154          }
3155      } else {
3156          if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3157              gen_helper_stdby_b_parallel(tcg_env, addr, val);
3158          } else {
3159              gen_helper_stdby_b(tcg_env, addr, val);
3160          }
3161      }
3162      if (a->m) {
3163          tcg_gen_andi_i64(ofs, ofs, ~7);
3164          save_gpr(ctx, a->b, ofs);
3165      }
3166  
3167      return nullify_end(ctx);
3168  }
3169  
3170  static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3171  {
3172      int hold_mmu_idx = ctx->mmu_idx;
3173  
3174      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3175      ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3176      trans_ld(ctx, a);
3177      ctx->mmu_idx = hold_mmu_idx;
3178      return true;
3179  }
3180  
3181  static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3182  {
3183      int hold_mmu_idx = ctx->mmu_idx;
3184  
3185      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3186      ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3187      trans_st(ctx, a);
3188      ctx->mmu_idx = hold_mmu_idx;
3189      return true;
3190  }
3191  
3192  static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3193  {
3194      TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3195  
3196      tcg_gen_movi_i64(tcg_rt, a->i);
3197      save_gpr(ctx, a->t, tcg_rt);
3198      cond_free(&ctx->null_cond);
3199      return true;
3200  }
3201  
3202  static bool trans_addil(DisasContext *ctx, arg_addil *a)
3203  {
3204      TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3205      TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3206  
3207      tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3208      save_gpr(ctx, 1, tcg_r1);
3209      cond_free(&ctx->null_cond);
3210      return true;
3211  }
3212  
3213  static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3214  {
3215      TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3216  
3217      /* Special case rb == 0, for the LDI pseudo-op.
3218         The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3219      if (a->b == 0) {
3220          tcg_gen_movi_i64(tcg_rt, a->i);
3221      } else {
3222          tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3223      }
3224      save_gpr(ctx, a->t, tcg_rt);
3225      cond_free(&ctx->null_cond);
3226      return true;
3227  }
3228  
3229  static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3230                      unsigned c, unsigned f, bool d, unsigned n, int disp)
3231  {
3232      TCGv_i64 dest, in2, sv;
3233      DisasCond cond;
3234  
3235      in2 = load_gpr(ctx, r);
3236      dest = tcg_temp_new_i64();
3237  
3238      tcg_gen_sub_i64(dest, in1, in2);
3239  
3240      sv = NULL;
3241      if (cond_need_sv(c)) {
3242          sv = do_sub_sv(ctx, dest, in1, in2);
3243      }
3244  
3245      cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3246      return do_cbranch(ctx, disp, n, &cond);
3247  }
3248  
3249  static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3250  {
3251      if (!ctx->is_pa20 && a->d) {
3252          return false;
3253      }
3254      nullify_over(ctx);
3255      return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3256                     a->c, a->f, a->d, a->n, a->disp);
3257  }
3258  
3259  static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3260  {
3261      if (!ctx->is_pa20 && a->d) {
3262          return false;
3263      }
3264      nullify_over(ctx);
3265      return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3266                     a->c, a->f, a->d, a->n, a->disp);
3267  }
3268  
3269  static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3270                      unsigned c, unsigned f, unsigned n, int disp)
3271  {
3272      TCGv_i64 dest, in2, sv, cb_cond;
3273      DisasCond cond;
3274      bool d = false;
3275  
3276      /*
3277       * For hppa64, the ADDB conditions change with PSW.W,
3278       * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3279       */
3280      if (ctx->tb_flags & PSW_W) {
3281          d = c >= 5;
3282          if (d) {
3283              c &= 3;
3284          }
3285      }
3286  
3287      in2 = load_gpr(ctx, r);
3288      dest = tcg_temp_new_i64();
3289      sv = NULL;
3290      cb_cond = NULL;
3291  
3292      if (cond_need_cb(c)) {
3293          TCGv_i64 cb = tcg_temp_new_i64();
3294          TCGv_i64 cb_msb = tcg_temp_new_i64();
3295  
3296          tcg_gen_movi_i64(cb_msb, 0);
3297          tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3298          tcg_gen_xor_i64(cb, in1, in2);
3299          tcg_gen_xor_i64(cb, cb, dest);
3300          cb_cond = get_carry(ctx, d, cb, cb_msb);
3301      } else {
3302          tcg_gen_add_i64(dest, in1, in2);
3303      }
3304      if (cond_need_sv(c)) {
3305          sv = do_add_sv(ctx, dest, in1, in2);
3306      }
3307  
3308      cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3309      save_gpr(ctx, r, dest);
3310      return do_cbranch(ctx, disp, n, &cond);
3311  }
3312  
3313  static bool trans_addb(DisasContext *ctx, arg_addb *a)
3314  {
3315      nullify_over(ctx);
3316      return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3317  }
3318  
3319  static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3320  {
3321      nullify_over(ctx);
3322      return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3323  }
3324  
3325  static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3326  {
3327      TCGv_i64 tmp, tcg_r;
3328      DisasCond cond;
3329  
3330      nullify_over(ctx);
3331  
3332      tmp = tcg_temp_new_i64();
3333      tcg_r = load_gpr(ctx, a->r);
3334      if (cond_need_ext(ctx, a->d)) {
3335          /* Force shift into [32,63] */
3336          tcg_gen_ori_i64(tmp, cpu_sar, 32);
3337          tcg_gen_shl_i64(tmp, tcg_r, tmp);
3338      } else {
3339          tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3340      }
3341  
3342      cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3343      return do_cbranch(ctx, a->disp, a->n, &cond);
3344  }
3345  
3346  static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3347  {
3348      TCGv_i64 tmp, tcg_r;
3349      DisasCond cond;
3350      int p;
3351  
3352      nullify_over(ctx);
3353  
3354      tmp = tcg_temp_new_i64();
3355      tcg_r = load_gpr(ctx, a->r);
3356      p = a->p | (cond_need_ext(ctx, a->d) ? 32 : 0);
3357      tcg_gen_shli_i64(tmp, tcg_r, p);
3358  
3359      cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
3360      return do_cbranch(ctx, a->disp, a->n, &cond);
3361  }
3362  
3363  static bool trans_movb(DisasContext *ctx, arg_movb *a)
3364  {
3365      TCGv_i64 dest;
3366      DisasCond cond;
3367  
3368      nullify_over(ctx);
3369  
3370      dest = dest_gpr(ctx, a->r2);
3371      if (a->r1 == 0) {
3372          tcg_gen_movi_i64(dest, 0);
3373      } else {
3374          tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3375      }
3376  
3377      /* All MOVB conditions are 32-bit. */
3378      cond = do_sed_cond(ctx, a->c, false, dest);
3379      return do_cbranch(ctx, a->disp, a->n, &cond);
3380  }
3381  
3382  static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3383  {
3384      TCGv_i64 dest;
3385      DisasCond cond;
3386  
3387      nullify_over(ctx);
3388  
3389      dest = dest_gpr(ctx, a->r);
3390      tcg_gen_movi_i64(dest, a->i);
3391  
3392      /* All MOVBI conditions are 32-bit. */
3393      cond = do_sed_cond(ctx, a->c, false, dest);
3394      return do_cbranch(ctx, a->disp, a->n, &cond);
3395  }
3396  
3397  static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3398  {
3399      TCGv_i64 dest, src2;
3400  
3401      if (!ctx->is_pa20 && a->d) {
3402          return false;
3403      }
3404      if (a->c) {
3405          nullify_over(ctx);
3406      }
3407  
3408      dest = dest_gpr(ctx, a->t);
3409      src2 = load_gpr(ctx, a->r2);
3410      if (a->r1 == 0) {
3411          if (a->d) {
3412              tcg_gen_shr_i64(dest, src2, cpu_sar);
3413          } else {
3414              TCGv_i64 tmp = tcg_temp_new_i64();
3415  
3416              tcg_gen_ext32u_i64(dest, src2);
3417              tcg_gen_andi_i64(tmp, cpu_sar, 31);
3418              tcg_gen_shr_i64(dest, dest, tmp);
3419          }
3420      } else if (a->r1 == a->r2) {
3421          if (a->d) {
3422              tcg_gen_rotr_i64(dest, src2, cpu_sar);
3423          } else {
3424              TCGv_i32 t32 = tcg_temp_new_i32();
3425              TCGv_i32 s32 = tcg_temp_new_i32();
3426  
3427              tcg_gen_extrl_i64_i32(t32, src2);
3428              tcg_gen_extrl_i64_i32(s32, cpu_sar);
3429              tcg_gen_andi_i32(s32, s32, 31);
3430              tcg_gen_rotr_i32(t32, t32, s32);
3431              tcg_gen_extu_i32_i64(dest, t32);
3432          }
3433      } else {
3434          TCGv_i64 src1 = load_gpr(ctx, a->r1);
3435  
3436          if (a->d) {
3437              TCGv_i64 t = tcg_temp_new_i64();
3438              TCGv_i64 n = tcg_temp_new_i64();
3439  
3440              tcg_gen_xori_i64(n, cpu_sar, 63);
3441              tcg_gen_shl_i64(t, src1, n);
3442              tcg_gen_shli_i64(t, t, 1);
3443              tcg_gen_shr_i64(dest, src2, cpu_sar);
3444              tcg_gen_or_i64(dest, dest, t);
3445          } else {
3446              TCGv_i64 t = tcg_temp_new_i64();
3447              TCGv_i64 s = tcg_temp_new_i64();
3448  
3449              tcg_gen_concat32_i64(t, src2, src1);
3450              tcg_gen_andi_i64(s, cpu_sar, 31);
3451              tcg_gen_shr_i64(dest, t, s);
3452          }
3453      }
3454      save_gpr(ctx, a->t, dest);
3455  
3456      /* Install the new nullification.  */
3457      cond_free(&ctx->null_cond);
3458      if (a->c) {
3459          ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3460      }
3461      return nullify_end(ctx);
3462  }
3463  
3464  static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3465  {
3466      unsigned width, sa;
3467      TCGv_i64 dest, t2;
3468  
3469      if (!ctx->is_pa20 && a->d) {
3470          return false;
3471      }
3472      if (a->c) {
3473          nullify_over(ctx);
3474      }
3475  
3476      width = a->d ? 64 : 32;
3477      sa = width - 1 - a->cpos;
3478  
3479      dest = dest_gpr(ctx, a->t);
3480      t2 = load_gpr(ctx, a->r2);
3481      if (a->r1 == 0) {
3482          tcg_gen_extract_i64(dest, t2, sa, width - sa);
3483      } else if (width == TARGET_LONG_BITS) {
3484          tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3485      } else {
3486          assert(!a->d);
3487          if (a->r1 == a->r2) {
3488              TCGv_i32 t32 = tcg_temp_new_i32();
3489              tcg_gen_extrl_i64_i32(t32, t2);
3490              tcg_gen_rotri_i32(t32, t32, sa);
3491              tcg_gen_extu_i32_i64(dest, t32);
3492          } else {
3493              tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3494              tcg_gen_extract_i64(dest, dest, sa, 32);
3495          }
3496      }
3497      save_gpr(ctx, a->t, dest);
3498  
3499      /* Install the new nullification.  */
3500      cond_free(&ctx->null_cond);
3501      if (a->c) {
3502          ctx->null_cond = do_sed_cond(ctx, a->c, false, dest);
3503      }
3504      return nullify_end(ctx);
3505  }
3506  
3507  static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3508  {
3509      unsigned widthm1 = a->d ? 63 : 31;
3510      TCGv_i64 dest, src, tmp;
3511  
3512      if (!ctx->is_pa20 && a->d) {
3513          return false;
3514      }
3515      if (a->c) {
3516          nullify_over(ctx);
3517      }
3518  
3519      dest = dest_gpr(ctx, a->t);
3520      src = load_gpr(ctx, a->r);
3521      tmp = tcg_temp_new_i64();
3522  
3523      /* Recall that SAR is using big-endian bit numbering.  */
3524      tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3525      tcg_gen_xori_i64(tmp, tmp, widthm1);
3526  
3527      if (a->se) {
3528          if (!a->d) {
3529              tcg_gen_ext32s_i64(dest, src);
3530              src = dest;
3531          }
3532          tcg_gen_sar_i64(dest, src, tmp);
3533          tcg_gen_sextract_i64(dest, dest, 0, a->len);
3534      } else {
3535          if (!a->d) {
3536              tcg_gen_ext32u_i64(dest, src);
3537              src = dest;
3538          }
3539          tcg_gen_shr_i64(dest, src, tmp);
3540          tcg_gen_extract_i64(dest, dest, 0, a->len);
3541      }
3542      save_gpr(ctx, a->t, dest);
3543  
3544      /* Install the new nullification.  */
3545      cond_free(&ctx->null_cond);
3546      if (a->c) {
3547          ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3548      }
3549      return nullify_end(ctx);
3550  }
3551  
3552  static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3553  {
3554      unsigned len, cpos, width;
3555      TCGv_i64 dest, src;
3556  
3557      if (!ctx->is_pa20 && a->d) {
3558          return false;
3559      }
3560      if (a->c) {
3561          nullify_over(ctx);
3562      }
3563  
3564      len = a->len;
3565      width = a->d ? 64 : 32;
3566      cpos = width - 1 - a->pos;
3567      if (cpos + len > width) {
3568          len = width - cpos;
3569      }
3570  
3571      dest = dest_gpr(ctx, a->t);
3572      src = load_gpr(ctx, a->r);
3573      if (a->se) {
3574          tcg_gen_sextract_i64(dest, src, cpos, len);
3575      } else {
3576          tcg_gen_extract_i64(dest, src, cpos, len);
3577      }
3578      save_gpr(ctx, a->t, dest);
3579  
3580      /* Install the new nullification.  */
3581      cond_free(&ctx->null_cond);
3582      if (a->c) {
3583          ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3584      }
3585      return nullify_end(ctx);
3586  }
3587  
3588  static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3589  {
3590      unsigned len, width;
3591      uint64_t mask0, mask1;
3592      TCGv_i64 dest;
3593  
3594      if (!ctx->is_pa20 && a->d) {
3595          return false;
3596      }
3597      if (a->c) {
3598          nullify_over(ctx);
3599      }
3600  
3601      len = a->len;
3602      width = a->d ? 64 : 32;
3603      if (a->cpos + len > width) {
3604          len = width - a->cpos;
3605      }
3606  
3607      dest = dest_gpr(ctx, a->t);
3608      mask0 = deposit64(0, a->cpos, len, a->i);
3609      mask1 = deposit64(-1, a->cpos, len, a->i);
3610  
3611      if (a->nz) {
3612          TCGv_i64 src = load_gpr(ctx, a->t);
3613          tcg_gen_andi_i64(dest, src, mask1);
3614          tcg_gen_ori_i64(dest, dest, mask0);
3615      } else {
3616          tcg_gen_movi_i64(dest, mask0);
3617      }
3618      save_gpr(ctx, a->t, dest);
3619  
3620      /* Install the new nullification.  */
3621      cond_free(&ctx->null_cond);
3622      if (a->c) {
3623          ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3624      }
3625      return nullify_end(ctx);
3626  }
3627  
3628  static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3629  {
3630      unsigned rs = a->nz ? a->t : 0;
3631      unsigned len, width;
3632      TCGv_i64 dest, val;
3633  
3634      if (!ctx->is_pa20 && a->d) {
3635          return false;
3636      }
3637      if (a->c) {
3638          nullify_over(ctx);
3639      }
3640  
3641      len = a->len;
3642      width = a->d ? 64 : 32;
3643      if (a->cpos + len > width) {
3644          len = width - a->cpos;
3645      }
3646  
3647      dest = dest_gpr(ctx, a->t);
3648      val = load_gpr(ctx, a->r);
3649      if (rs == 0) {
3650          tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3651      } else {
3652          tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3653      }
3654      save_gpr(ctx, a->t, dest);
3655  
3656      /* Install the new nullification.  */
3657      cond_free(&ctx->null_cond);
3658      if (a->c) {
3659          ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3660      }
3661      return nullify_end(ctx);
3662  }
3663  
3664  static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3665                         bool d, bool nz, unsigned len, TCGv_i64 val)
3666  {
3667      unsigned rs = nz ? rt : 0;
3668      unsigned widthm1 = d ? 63 : 31;
3669      TCGv_i64 mask, tmp, shift, dest;
3670      uint64_t msb = 1ULL << (len - 1);
3671  
3672      dest = dest_gpr(ctx, rt);
3673      shift = tcg_temp_new_i64();
3674      tmp = tcg_temp_new_i64();
3675  
3676      /* Convert big-endian bit numbering in SAR to left-shift.  */
3677      tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3678      tcg_gen_xori_i64(shift, shift, widthm1);
3679  
3680      mask = tcg_temp_new_i64();
3681      tcg_gen_movi_i64(mask, msb + (msb - 1));
3682      tcg_gen_and_i64(tmp, val, mask);
3683      if (rs) {
3684          tcg_gen_shl_i64(mask, mask, shift);
3685          tcg_gen_shl_i64(tmp, tmp, shift);
3686          tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3687          tcg_gen_or_i64(dest, dest, tmp);
3688      } else {
3689          tcg_gen_shl_i64(dest, tmp, shift);
3690      }
3691      save_gpr(ctx, rt, dest);
3692  
3693      /* Install the new nullification.  */
3694      cond_free(&ctx->null_cond);
3695      if (c) {
3696          ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3697      }
3698      return nullify_end(ctx);
3699  }
3700  
3701  static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3702  {
3703      if (!ctx->is_pa20 && a->d) {
3704          return false;
3705      }
3706      if (a->c) {
3707          nullify_over(ctx);
3708      }
3709      return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3710                        load_gpr(ctx, a->r));
3711  }
3712  
3713  static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3714  {
3715      if (!ctx->is_pa20 && a->d) {
3716          return false;
3717      }
3718      if (a->c) {
3719          nullify_over(ctx);
3720      }
3721      return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3722                        tcg_constant_i64(a->i));
3723  }
3724  
3725  static bool trans_be(DisasContext *ctx, arg_be *a)
3726  {
3727      TCGv_i64 tmp;
3728  
3729  #ifdef CONFIG_USER_ONLY
3730      /* ??? It seems like there should be a good way of using
3731         "be disp(sr2, r0)", the canonical gateway entry mechanism
3732         to our advantage.  But that appears to be inconvenient to
3733         manage along side branch delay slots.  Therefore we handle
3734         entry into the gateway page via absolute address.  */
3735      /* Since we don't implement spaces, just branch.  Do notice the special
3736         case of "be disp(*,r0)" using a direct branch to disp, so that we can
3737         goto_tb to the TB containing the syscall.  */
3738      if (a->b == 0) {
3739          return do_dbranch(ctx, a->disp, a->l, a->n);
3740      }
3741  #else
3742      nullify_over(ctx);
3743  #endif
3744  
3745      tmp = tcg_temp_new_i64();
3746      tcg_gen_addi_i64(tmp, load_gpr(ctx, a->b), a->disp);
3747      tmp = do_ibranch_priv(ctx, tmp);
3748  
3749  #ifdef CONFIG_USER_ONLY
3750      return do_ibranch(ctx, tmp, a->l, a->n);
3751  #else
3752      TCGv_i64 new_spc = tcg_temp_new_i64();
3753  
3754      load_spr(ctx, new_spc, a->sp);
3755      if (a->l) {
3756          copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3757          tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3758      }
3759      if (a->n && use_nullify_skip(ctx)) {
3760          copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3761          tcg_gen_addi_i64(tmp, tmp, 4);
3762          copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3763          tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3764          tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3765      } else {
3766          copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3767          if (ctx->iaoq_b == -1) {
3768              tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3769          }
3770          copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
3771          tcg_gen_mov_i64(cpu_iasq_b, new_spc);
3772          nullify_set(ctx, a->n);
3773      }
3774      tcg_gen_lookup_and_goto_ptr();
3775      ctx->base.is_jmp = DISAS_NORETURN;
3776      return nullify_end(ctx);
3777  #endif
3778  }
3779  
3780  static bool trans_bl(DisasContext *ctx, arg_bl *a)
3781  {
3782      return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
3783  }
3784  
3785  static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3786  {
3787      uint64_t dest = iaoq_dest(ctx, a->disp);
3788  
3789      nullify_over(ctx);
3790  
3791      /* Make sure the caller hasn't done something weird with the queue.
3792       * ??? This is not quite the same as the PSW[B] bit, which would be
3793       * expensive to track.  Real hardware will trap for
3794       *    b  gateway
3795       *    b  gateway+4  (in delay slot of first branch)
3796       * However, checking for a non-sequential instruction queue *will*
3797       * diagnose the security hole
3798       *    b  gateway
3799       *    b  evil
3800       * in which instructions at evil would run with increased privs.
3801       */
3802      if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3803          return gen_illegal(ctx);
3804      }
3805  
3806  #ifndef CONFIG_USER_ONLY
3807      if (ctx->tb_flags & PSW_C) {
3808          CPUHPPAState *env = cpu_env(ctx->cs);
3809          int type = hppa_artype_for_page(env, ctx->base.pc_next);
3810          /* If we could not find a TLB entry, then we need to generate an
3811             ITLB miss exception so the kernel will provide it.
3812             The resulting TLB fill operation will invalidate this TB and
3813             we will re-translate, at which point we *will* be able to find
3814             the TLB entry and determine if this is in fact a gateway page.  */
3815          if (type < 0) {
3816              gen_excp(ctx, EXCP_ITLB_MISS);
3817              return true;
3818          }
3819          /* No change for non-gateway pages or for priv decrease.  */
3820          if (type >= 4 && type - 4 < ctx->privilege) {
3821              dest = deposit32(dest, 0, 2, type - 4);
3822          }
3823      } else {
3824          dest &= -4;  /* priv = 0 */
3825      }
3826  #endif
3827  
3828      if (a->l) {
3829          TCGv_i64 tmp = dest_gpr(ctx, a->l);
3830          if (ctx->privilege < 3) {
3831              tcg_gen_andi_i64(tmp, tmp, -4);
3832          }
3833          tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
3834          save_gpr(ctx, a->l, tmp);
3835      }
3836  
3837      return do_dbranch(ctx, dest, 0, a->n);
3838  }
3839  
3840  static bool trans_blr(DisasContext *ctx, arg_blr *a)
3841  {
3842      if (a->x) {
3843          TCGv_i64 tmp = tcg_temp_new_i64();
3844          tcg_gen_shli_i64(tmp, load_gpr(ctx, a->x), 3);
3845          tcg_gen_addi_i64(tmp, tmp, ctx->iaoq_f + 8);
3846          /* The computation here never changes privilege level.  */
3847          return do_ibranch(ctx, tmp, a->l, a->n);
3848      } else {
3849          /* BLR R0,RX is a good way to load PC+8 into RX.  */
3850          return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3851      }
3852  }
3853  
3854  static bool trans_bv(DisasContext *ctx, arg_bv *a)
3855  {
3856      TCGv_i64 dest;
3857  
3858      if (a->x == 0) {
3859          dest = load_gpr(ctx, a->b);
3860      } else {
3861          dest = tcg_temp_new_i64();
3862          tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
3863          tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
3864      }
3865      dest = do_ibranch_priv(ctx, dest);
3866      return do_ibranch(ctx, dest, 0, a->n);
3867  }
3868  
3869  static bool trans_bve(DisasContext *ctx, arg_bve *a)
3870  {
3871      TCGv_i64 dest;
3872  
3873  #ifdef CONFIG_USER_ONLY
3874      dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3875      return do_ibranch(ctx, dest, a->l, a->n);
3876  #else
3877      nullify_over(ctx);
3878      dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3879  
3880      copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3881      if (ctx->iaoq_b == -1) {
3882          tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3883      }
3884      copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
3885      tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
3886      if (a->l) {
3887          copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
3888      }
3889      nullify_set(ctx, a->n);
3890      tcg_gen_lookup_and_goto_ptr();
3891      ctx->base.is_jmp = DISAS_NORETURN;
3892      return nullify_end(ctx);
3893  #endif
3894  }
3895  
3896  static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
3897  {
3898      /* All branch target stack instructions implement as nop. */
3899      return ctx->is_pa20;
3900  }
3901  
3902  /*
3903   * Float class 0
3904   */
3905  
3906  static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3907  {
3908      tcg_gen_mov_i32(dst, src);
3909  }
3910  
3911  static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3912  {
3913      uint64_t ret;
3914  
3915      if (ctx->is_pa20) {
3916          ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3917      } else {
3918          ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3919      }
3920  
3921      nullify_over(ctx);
3922      save_frd(0, tcg_constant_i64(ret));
3923      return nullify_end(ctx);
3924  }
3925  
3926  static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
3927  {
3928      return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
3929  }
3930  
3931  static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3932  {
3933      tcg_gen_mov_i64(dst, src);
3934  }
3935  
3936  static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
3937  {
3938      return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
3939  }
3940  
3941  static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3942  {
3943      tcg_gen_andi_i32(dst, src, INT32_MAX);
3944  }
3945  
3946  static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
3947  {
3948      return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
3949  }
3950  
3951  static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3952  {
3953      tcg_gen_andi_i64(dst, src, INT64_MAX);
3954  }
3955  
3956  static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
3957  {
3958      return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
3959  }
3960  
3961  static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
3962  {
3963      return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
3964  }
3965  
3966  static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
3967  {
3968      return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
3969  }
3970  
3971  static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
3972  {
3973      return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
3974  }
3975  
3976  static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
3977  {
3978      return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
3979  }
3980  
3981  static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
3982  {
3983      tcg_gen_xori_i32(dst, src, INT32_MIN);
3984  }
3985  
3986  static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
3987  {
3988      return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
3989  }
3990  
3991  static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3992  {
3993      tcg_gen_xori_i64(dst, src, INT64_MIN);
3994  }
3995  
3996  static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3997  {
3998      return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3999  }
4000  
4001  static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4002  {
4003      tcg_gen_ori_i32(dst, src, INT32_MIN);
4004  }
4005  
4006  static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4007  {
4008      return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4009  }
4010  
4011  static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4012  {
4013      tcg_gen_ori_i64(dst, src, INT64_MIN);
4014  }
4015  
4016  static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4017  {
4018      return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4019  }
4020  
4021  /*
4022   * Float class 1
4023   */
4024  
4025  static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4026  {
4027      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4028  }
4029  
4030  static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4031  {
4032      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4033  }
4034  
4035  static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4036  {
4037      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4038  }
4039  
4040  static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4041  {
4042      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4043  }
4044  
4045  static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4046  {
4047      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4048  }
4049  
4050  static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4051  {
4052      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4053  }
4054  
4055  static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4056  {
4057      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4058  }
4059  
4060  static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4061  {
4062      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4063  }
4064  
4065  static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4066  {
4067      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4068  }
4069  
4070  static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4071  {
4072      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4073  }
4074  
4075  static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4076  {
4077      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4078  }
4079  
4080  static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4081  {
4082      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4083  }
4084  
4085  static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4086  {
4087      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4088  }
4089  
4090  static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4091  {
4092      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4093  }
4094  
4095  static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4096  {
4097      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4098  }
4099  
4100  static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4101  {
4102      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4103  }
4104  
4105  static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4106  {
4107      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4108  }
4109  
4110  static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4111  {
4112      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4113  }
4114  
4115  static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4116  {
4117      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4118  }
4119  
4120  static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4121  {
4122      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4123  }
4124  
4125  static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4126  {
4127      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4128  }
4129  
4130  static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4131  {
4132      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4133  }
4134  
4135  static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4136  {
4137      return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4138  }
4139  
4140  static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4141  {
4142      return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4143  }
4144  
4145  static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4146  {
4147      return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4148  }
4149  
4150  static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4151  {
4152      return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4153  }
4154  
4155  /*
4156   * Float class 2
4157   */
4158  
4159  static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4160  {
4161      TCGv_i32 ta, tb, tc, ty;
4162  
4163      nullify_over(ctx);
4164  
4165      ta = load_frw0_i32(a->r1);
4166      tb = load_frw0_i32(a->r2);
4167      ty = tcg_constant_i32(a->y);
4168      tc = tcg_constant_i32(a->c);
4169  
4170      gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4171  
4172      return nullify_end(ctx);
4173  }
4174  
4175  static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4176  {
4177      TCGv_i64 ta, tb;
4178      TCGv_i32 tc, ty;
4179  
4180      nullify_over(ctx);
4181  
4182      ta = load_frd0(a->r1);
4183      tb = load_frd0(a->r2);
4184      ty = tcg_constant_i32(a->y);
4185      tc = tcg_constant_i32(a->c);
4186  
4187      gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4188  
4189      return nullify_end(ctx);
4190  }
4191  
4192  static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4193  {
4194      TCGv_i64 t;
4195  
4196      nullify_over(ctx);
4197  
4198      t = tcg_temp_new_i64();
4199      tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4200  
4201      if (a->y == 1) {
4202          int mask;
4203          bool inv = false;
4204  
4205          switch (a->c) {
4206          case 0: /* simple */
4207              tcg_gen_andi_i64(t, t, 0x4000000);
4208              ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4209              goto done;
4210          case 2: /* rej */
4211              inv = true;
4212              /* fallthru */
4213          case 1: /* acc */
4214              mask = 0x43ff800;
4215              break;
4216          case 6: /* rej8 */
4217              inv = true;
4218              /* fallthru */
4219          case 5: /* acc8 */
4220              mask = 0x43f8000;
4221              break;
4222          case 9: /* acc6 */
4223              mask = 0x43e0000;
4224              break;
4225          case 13: /* acc4 */
4226              mask = 0x4380000;
4227              break;
4228          case 17: /* acc2 */
4229              mask = 0x4200000;
4230              break;
4231          default:
4232              gen_illegal(ctx);
4233              return true;
4234          }
4235          if (inv) {
4236              TCGv_i64 c = tcg_constant_i64(mask);
4237              tcg_gen_or_i64(t, t, c);
4238              ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
4239          } else {
4240              tcg_gen_andi_i64(t, t, mask);
4241              ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
4242          }
4243      } else {
4244          unsigned cbit = (a->y ^ 1) - 1;
4245  
4246          tcg_gen_extract_i64(t, t, 21 - cbit, 1);
4247          ctx->null_cond = cond_make_0(TCG_COND_NE, t);
4248      }
4249  
4250   done:
4251      return nullify_end(ctx);
4252  }
4253  
4254  /*
4255   * Float class 2
4256   */
4257  
4258  static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4259  {
4260      return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4261  }
4262  
4263  static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4264  {
4265      return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4266  }
4267  
4268  static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4269  {
4270      return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4271  }
4272  
4273  static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4274  {
4275      return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4276  }
4277  
4278  static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4279  {
4280      return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4281  }
4282  
4283  static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4284  {
4285      return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4286  }
4287  
4288  static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4289  {
4290      return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4291  }
4292  
4293  static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4294  {
4295      return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4296  }
4297  
4298  static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4299  {
4300      TCGv_i64 x, y;
4301  
4302      nullify_over(ctx);
4303  
4304      x = load_frw0_i64(a->r1);
4305      y = load_frw0_i64(a->r2);
4306      tcg_gen_mul_i64(x, x, y);
4307      save_frd(a->t, x);
4308  
4309      return nullify_end(ctx);
4310  }
4311  
4312  /* Convert the fmpyadd single-precision register encodings to standard.  */
4313  static inline int fmpyadd_s_reg(unsigned r)
4314  {
4315      return (r & 16) * 2 + 16 + (r & 15);
4316  }
4317  
4318  static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4319  {
4320      int tm = fmpyadd_s_reg(a->tm);
4321      int ra = fmpyadd_s_reg(a->ra);
4322      int ta = fmpyadd_s_reg(a->ta);
4323      int rm2 = fmpyadd_s_reg(a->rm2);
4324      int rm1 = fmpyadd_s_reg(a->rm1);
4325  
4326      nullify_over(ctx);
4327  
4328      do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4329      do_fop_weww(ctx, ta, ta, ra,
4330                  is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4331  
4332      return nullify_end(ctx);
4333  }
4334  
4335  static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4336  {
4337      return do_fmpyadd_s(ctx, a, false);
4338  }
4339  
4340  static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4341  {
4342      return do_fmpyadd_s(ctx, a, true);
4343  }
4344  
4345  static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4346  {
4347      nullify_over(ctx);
4348  
4349      do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4350      do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4351                  is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4352  
4353      return nullify_end(ctx);
4354  }
4355  
4356  static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4357  {
4358      return do_fmpyadd_d(ctx, a, false);
4359  }
4360  
4361  static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4362  {
4363      return do_fmpyadd_d(ctx, a, true);
4364  }
4365  
4366  static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4367  {
4368      TCGv_i32 x, y, z;
4369  
4370      nullify_over(ctx);
4371      x = load_frw0_i32(a->rm1);
4372      y = load_frw0_i32(a->rm2);
4373      z = load_frw0_i32(a->ra3);
4374  
4375      if (a->neg) {
4376          gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4377      } else {
4378          gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4379      }
4380  
4381      save_frw_i32(a->t, x);
4382      return nullify_end(ctx);
4383  }
4384  
4385  static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4386  {
4387      TCGv_i64 x, y, z;
4388  
4389      nullify_over(ctx);
4390      x = load_frd0(a->rm1);
4391      y = load_frd0(a->rm2);
4392      z = load_frd0(a->ra3);
4393  
4394      if (a->neg) {
4395          gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4396      } else {
4397          gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4398      }
4399  
4400      save_frd(a->t, x);
4401      return nullify_end(ctx);
4402  }
4403  
4404  static bool trans_diag(DisasContext *ctx, arg_diag *a)
4405  {
4406      CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4407  #ifndef CONFIG_USER_ONLY
4408      if (a->i == 0x100) {
4409          /* emulate PDC BTLB, called by SeaBIOS-hppa */
4410          nullify_over(ctx);
4411          gen_helper_diag_btlb(tcg_env);
4412          return nullify_end(ctx);
4413      }
4414  #endif
4415      qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4416      return true;
4417  }
4418  
4419  static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4420  {
4421      DisasContext *ctx = container_of(dcbase, DisasContext, base);
4422      int bound;
4423  
4424      ctx->cs = cs;
4425      ctx->tb_flags = ctx->base.tb->flags;
4426      ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4427  
4428  #ifdef CONFIG_USER_ONLY
4429      ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
4430      ctx->mmu_idx = MMU_USER_IDX;
4431      ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4432      ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
4433      ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4434  #else
4435      ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4436      ctx->mmu_idx = (ctx->tb_flags & PSW_D
4437                      ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4438                      : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4439  
4440      /* Recover the IAOQ values from the GVA + PRIV.  */
4441      uint64_t cs_base = ctx->base.tb->cs_base;
4442      uint64_t iasq_f = cs_base & ~0xffffffffull;
4443      int32_t diff = cs_base;
4444  
4445      ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4446      ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4447  #endif
4448      ctx->iaoq_n = -1;
4449      ctx->iaoq_n_var = NULL;
4450  
4451      ctx->zero = tcg_constant_i64(0);
4452  
4453      /* Bound the number of instructions by those left on the page.  */
4454      bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4455      ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4456  }
4457  
4458  static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4459  {
4460      DisasContext *ctx = container_of(dcbase, DisasContext, base);
4461  
4462      /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4463      ctx->null_cond = cond_make_f();
4464      ctx->psw_n_nonzero = false;
4465      if (ctx->tb_flags & PSW_N) {
4466          ctx->null_cond.c = TCG_COND_ALWAYS;
4467          ctx->psw_n_nonzero = true;
4468      }
4469      ctx->null_lab = NULL;
4470  }
4471  
4472  static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4473  {
4474      DisasContext *ctx = container_of(dcbase, DisasContext, base);
4475  
4476      tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b, 0);
4477      ctx->insn_start = tcg_last_op();
4478  }
4479  
4480  static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4481  {
4482      DisasContext *ctx = container_of(dcbase, DisasContext, base);
4483      CPUHPPAState *env = cpu_env(cs);
4484      DisasJumpType ret;
4485  
4486      /* Execute one insn.  */
4487  #ifdef CONFIG_USER_ONLY
4488      if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4489          do_page_zero(ctx);
4490          ret = ctx->base.is_jmp;
4491          assert(ret != DISAS_NEXT);
4492      } else
4493  #endif
4494      {
4495          /* Always fetch the insn, even if nullified, so that we check
4496             the page permissions for execute.  */
4497          uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4498  
4499          /* Set up the IA queue for the next insn.
4500             This will be overwritten by a branch.  */
4501          if (ctx->iaoq_b == -1) {
4502              ctx->iaoq_n = -1;
4503              ctx->iaoq_n_var = tcg_temp_new_i64();
4504              tcg_gen_addi_i64(ctx->iaoq_n_var, cpu_iaoq_b, 4);
4505          } else {
4506              ctx->iaoq_n = ctx->iaoq_b + 4;
4507              ctx->iaoq_n_var = NULL;
4508          }
4509  
4510          if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4511              ctx->null_cond.c = TCG_COND_NEVER;
4512              ret = DISAS_NEXT;
4513          } else {
4514              ctx->insn = insn;
4515              if (!decode(ctx, insn)) {
4516                  gen_illegal(ctx);
4517              }
4518              ret = ctx->base.is_jmp;
4519              assert(ctx->null_lab == NULL);
4520          }
4521      }
4522  
4523      /* Advance the insn queue.  Note that this check also detects
4524         a priority change within the instruction queue.  */
4525      if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
4526          if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4527              && use_goto_tb(ctx, ctx->iaoq_b)
4528              && (ctx->null_cond.c == TCG_COND_NEVER
4529                  || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4530              nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4531              gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
4532              ctx->base.is_jmp = ret = DISAS_NORETURN;
4533          } else {
4534              ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
4535          }
4536      }
4537      ctx->iaoq_f = ctx->iaoq_b;
4538      ctx->iaoq_b = ctx->iaoq_n;
4539      ctx->base.pc_next += 4;
4540  
4541      switch (ret) {
4542      case DISAS_NORETURN:
4543      case DISAS_IAQ_N_UPDATED:
4544          break;
4545  
4546      case DISAS_NEXT:
4547      case DISAS_IAQ_N_STALE:
4548      case DISAS_IAQ_N_STALE_EXIT:
4549          if (ctx->iaoq_f == -1) {
4550              copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
4551              copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
4552  #ifndef CONFIG_USER_ONLY
4553              tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
4554  #endif
4555              nullify_save(ctx);
4556              ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4557                                  ? DISAS_EXIT
4558                                  : DISAS_IAQ_N_UPDATED);
4559          } else if (ctx->iaoq_b == -1) {
4560              copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
4561          }
4562          break;
4563  
4564      default:
4565          g_assert_not_reached();
4566      }
4567  }
4568  
4569  static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4570  {
4571      DisasContext *ctx = container_of(dcbase, DisasContext, base);
4572      DisasJumpType is_jmp = ctx->base.is_jmp;
4573  
4574      switch (is_jmp) {
4575      case DISAS_NORETURN:
4576          break;
4577      case DISAS_TOO_MANY:
4578      case DISAS_IAQ_N_STALE:
4579      case DISAS_IAQ_N_STALE_EXIT:
4580          copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4581          copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4582          nullify_save(ctx);
4583          /* FALLTHRU */
4584      case DISAS_IAQ_N_UPDATED:
4585          if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
4586              tcg_gen_lookup_and_goto_ptr();
4587              break;
4588          }
4589          /* FALLTHRU */
4590      case DISAS_EXIT:
4591          tcg_gen_exit_tb(NULL, 0);
4592          break;
4593      default:
4594          g_assert_not_reached();
4595      }
4596  }
4597  
4598  static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4599                                CPUState *cs, FILE *logfile)
4600  {
4601      target_ulong pc = dcbase->pc_first;
4602  
4603  #ifdef CONFIG_USER_ONLY
4604      switch (pc) {
4605      case 0x00:
4606          fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4607          return;
4608      case 0xb0:
4609          fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4610          return;
4611      case 0xe0:
4612          fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4613          return;
4614      case 0x100:
4615          fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4616          return;
4617      }
4618  #endif
4619  
4620      fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4621      target_disas(logfile, cs, pc, dcbase->tb->size);
4622  }
4623  
4624  static const TranslatorOps hppa_tr_ops = {
4625      .init_disas_context = hppa_tr_init_disas_context,
4626      .tb_start           = hppa_tr_tb_start,
4627      .insn_start         = hppa_tr_insn_start,
4628      .translate_insn     = hppa_tr_translate_insn,
4629      .tb_stop            = hppa_tr_tb_stop,
4630      .disas_log          = hppa_tr_disas_log,
4631  };
4632  
4633  void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
4634                             vaddr pc, void *host_pc)
4635  {
4636      DisasContext ctx;
4637      translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4638  }
4639