1 /*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "qemu/host-utils.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "exec/translation-block.h"
31 #include "exec/log.h"
32
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef HELPER_H
36
37 /* Choose to use explicit sizes within this file. */
38 #undef tcg_temp_new
39
40 typedef struct DisasCond {
41 TCGCond c;
42 TCGv_i64 a0, a1;
43 } DisasCond;
44
45 typedef struct DisasIAQE {
46 /* IASQ; may be null for no change from TB. */
47 TCGv_i64 space;
48 /* IAOQ base; may be null for relative address. */
49 TCGv_i64 base;
50 /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
51 int64_t disp;
52 } DisasIAQE;
53
54 typedef struct DisasDelayException {
55 struct DisasDelayException *next;
56 TCGLabel *lab;
57 uint32_t insn;
58 bool set_iir;
59 int8_t set_n;
60 uint8_t excp;
61 /* Saved state at parent insn. */
62 DisasIAQE iaq_f, iaq_b;
63 } DisasDelayException;
64
65 typedef struct DisasContext {
66 DisasContextBase base;
67 CPUState *cs;
68
69 /* IAQ_Front, IAQ_Back. */
70 DisasIAQE iaq_f, iaq_b;
71 /* IAQ_Next, for jumps, otherwise null for simple advance. */
72 DisasIAQE iaq_j, *iaq_n;
73
74 /* IAOQ_Front at entry to TB. */
75 uint64_t iaoq_first;
76 uint64_t gva_offset_mask;
77
78 DisasCond null_cond;
79 TCGLabel *null_lab;
80
81 DisasDelayException *delay_excp_list;
82 TCGv_i64 zero;
83
84 uint32_t insn;
85 uint32_t tb_flags;
86 int mmu_idx;
87 int privilege;
88 uint32_t psw_xb;
89 bool psw_n_nonzero;
90 bool psw_b_next;
91 bool is_pa20;
92 bool insn_start_updated;
93
94 #ifdef CONFIG_USER_ONLY
95 MemOp unalign;
96 #endif
97 } DisasContext;
98
99 #ifdef CONFIG_USER_ONLY
100 #define UNALIGN(C) (C)->unalign
101 #define MMU_DISABLED(C) false
102 #else
103 #define UNALIGN(C) MO_ALIGN
104 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
105 #endif
106
107 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
expand_sm_imm(DisasContext * ctx,int val)108 static int expand_sm_imm(DisasContext *ctx, int val)
109 {
110 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
111 if (ctx->is_pa20) {
112 if (val & PSW_SM_W) {
113 val |= PSW_W;
114 }
115 val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
116 } else {
117 val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
118 }
119 return val;
120 }
121
122 /* Inverted space register indicates 0 means sr0 not inferred from base. */
expand_sr3x(DisasContext * ctx,int val)123 static int expand_sr3x(DisasContext *ctx, int val)
124 {
125 return ~val;
126 }
127
128 /* Convert the M:A bits within a memory insn to the tri-state value
129 we use for the final M. */
ma_to_m(DisasContext * ctx,int val)130 static int ma_to_m(DisasContext *ctx, int val)
131 {
132 return val & 2 ? (val & 1 ? -1 : 1) : 0;
133 }
134
135 /* Convert the sign of the displacement to a pre or post-modify. */
pos_to_m(DisasContext * ctx,int val)136 static int pos_to_m(DisasContext *ctx, int val)
137 {
138 return val ? 1 : -1;
139 }
140
neg_to_m(DisasContext * ctx,int val)141 static int neg_to_m(DisasContext *ctx, int val)
142 {
143 return val ? -1 : 1;
144 }
145
146 /* Used for branch targets and fp memory ops. */
expand_shl2(DisasContext * ctx,int val)147 static int expand_shl2(DisasContext *ctx, int val)
148 {
149 return val << 2;
150 }
151
152 /* Used for assemble_21. */
expand_shl11(DisasContext * ctx,int val)153 static int expand_shl11(DisasContext *ctx, int val)
154 {
155 return val << 11;
156 }
157
assemble_6(DisasContext * ctx,int val)158 static int assemble_6(DisasContext *ctx, int val)
159 {
160 /*
161 * Officially, 32 * x + 32 - y.
162 * Here, x is already in bit 5, and y is [4:0].
163 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
164 * with the overflow from bit 4 summing with x.
165 */
166 return (val ^ 31) + 1;
167 }
168
169 /* Expander for assemble_16a(s,cat(im10a,0),i). */
expand_11a(DisasContext * ctx,int val)170 static int expand_11a(DisasContext *ctx, int val)
171 {
172 /*
173 * @val is bit 0 and bits [4:15].
174 * Swizzle thing around depending on PSW.W.
175 */
176 int im10a = extract32(val, 1, 10);
177 int s = extract32(val, 11, 2);
178 int i = (-(val & 1) << 13) | (im10a << 3);
179
180 if (ctx->tb_flags & PSW_W) {
181 i ^= s << 13;
182 }
183 return i;
184 }
185
186 /* Expander for assemble_16a(s,im11a,i). */
expand_12a(DisasContext * ctx,int val)187 static int expand_12a(DisasContext *ctx, int val)
188 {
189 /*
190 * @val is bit 0 and bits [3:15].
191 * Swizzle thing around depending on PSW.W.
192 */
193 int im11a = extract32(val, 1, 11);
194 int s = extract32(val, 12, 2);
195 int i = (-(val & 1) << 13) | (im11a << 2);
196
197 if (ctx->tb_flags & PSW_W) {
198 i ^= s << 13;
199 }
200 return i;
201 }
202
203 /* Expander for assemble_16(s,im14). */
expand_16(DisasContext * ctx,int val)204 static int expand_16(DisasContext *ctx, int val)
205 {
206 /*
207 * @val is bits [0:15], containing both im14 and s.
208 * Swizzle thing around depending on PSW.W.
209 */
210 int s = extract32(val, 14, 2);
211 int i = (-(val & 1) << 13) | extract32(val, 1, 13);
212
213 if (ctx->tb_flags & PSW_W) {
214 i ^= s << 13;
215 }
216 return i;
217 }
218
219 /* The sp field is only present with !PSW_W. */
sp0_if_wide(DisasContext * ctx,int sp)220 static int sp0_if_wide(DisasContext *ctx, int sp)
221 {
222 return ctx->tb_flags & PSW_W ? 0 : sp;
223 }
224
225 /* Translate CMPI doubleword conditions to standard. */
cmpbid_c(DisasContext * ctx,int val)226 static int cmpbid_c(DisasContext *ctx, int val)
227 {
228 return val ? val : 4; /* 0 == "*<<" */
229 }
230
231 /*
232 * In many places pa1.x did not decode the bit that later became
233 * the pa2.0 D bit. Suppress D unless the cpu is pa2.0.
234 */
pa20_d(DisasContext * ctx,int val)235 static int pa20_d(DisasContext *ctx, int val)
236 {
237 return ctx->is_pa20 & val;
238 }
239
240 /* Include the auto-generated decoder. */
241 #include "decode-insns.c.inc"
242
243 /* We are not using a goto_tb (for whatever reason), but have updated
244 the iaq (for whatever reason), so don't do it again on exit. */
245 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
246
247 /* We are exiting the TB, but have neither emitted a goto_tb, nor
248 updated the iaq for the next instruction to be executed. */
249 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
250
251 /* Similarly, but we want to return to the main loop immediately
252 to recognize unmasked interrupts. */
253 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
254 #define DISAS_EXIT DISAS_TARGET_3
255
256 /* global register indexes */
257 static TCGv_i64 cpu_gr[32];
258 static TCGv_i64 cpu_sr[4];
259 static TCGv_i64 cpu_srH;
260 static TCGv_i64 cpu_iaoq_f;
261 static TCGv_i64 cpu_iaoq_b;
262 static TCGv_i64 cpu_iasq_f;
263 static TCGv_i64 cpu_iasq_b;
264 static TCGv_i64 cpu_sar;
265 static TCGv_i64 cpu_psw_n;
266 static TCGv_i64 cpu_psw_v;
267 static TCGv_i64 cpu_psw_cb;
268 static TCGv_i64 cpu_psw_cb_msb;
269 static TCGv_i32 cpu_psw_xb;
270
hppa_translate_init(void)271 void hppa_translate_init(void)
272 {
273 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
274
275 typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
276 static const GlobalVar vars[] = {
277 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
278 DEF_VAR(psw_n),
279 DEF_VAR(psw_v),
280 DEF_VAR(psw_cb),
281 DEF_VAR(psw_cb_msb),
282 DEF_VAR(iaoq_f),
283 DEF_VAR(iaoq_b),
284 };
285
286 #undef DEF_VAR
287
288 /* Use the symbolic register names that match the disassembler. */
289 static const char gr_names[32][4] = {
290 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
291 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
292 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
293 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
294 };
295 /* SR[4-7] are not global registers so that we can index them. */
296 static const char sr_names[5][4] = {
297 "sr0", "sr1", "sr2", "sr3", "srH"
298 };
299
300 int i;
301
302 cpu_gr[0] = NULL;
303 for (i = 1; i < 32; i++) {
304 cpu_gr[i] = tcg_global_mem_new(tcg_env,
305 offsetof(CPUHPPAState, gr[i]),
306 gr_names[i]);
307 }
308 for (i = 0; i < 4; i++) {
309 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
310 offsetof(CPUHPPAState, sr[i]),
311 sr_names[i]);
312 }
313 cpu_srH = tcg_global_mem_new_i64(tcg_env,
314 offsetof(CPUHPPAState, sr[4]),
315 sr_names[4]);
316
317 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
318 const GlobalVar *v = &vars[i];
319 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
320 }
321
322 cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
323 offsetof(CPUHPPAState, psw_xb),
324 "psw_xb");
325 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
326 offsetof(CPUHPPAState, iasq_f),
327 "iasq_f");
328 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
329 offsetof(CPUHPPAState, iasq_b),
330 "iasq_b");
331 }
332
set_insn_breg(DisasContext * ctx,int breg)333 static void set_insn_breg(DisasContext *ctx, int breg)
334 {
335 assert(!ctx->insn_start_updated);
336 ctx->insn_start_updated = true;
337 tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
338 }
339
cond_make_f(void)340 static DisasCond cond_make_f(void)
341 {
342 return (DisasCond){
343 .c = TCG_COND_NEVER,
344 .a0 = NULL,
345 .a1 = NULL,
346 };
347 }
348
cond_make_t(void)349 static DisasCond cond_make_t(void)
350 {
351 return (DisasCond){
352 .c = TCG_COND_ALWAYS,
353 .a0 = NULL,
354 .a1 = NULL,
355 };
356 }
357
cond_make_n(void)358 static DisasCond cond_make_n(void)
359 {
360 return (DisasCond){
361 .c = TCG_COND_NE,
362 .a0 = cpu_psw_n,
363 .a1 = tcg_constant_i64(0)
364 };
365 }
366
cond_make_tt(TCGCond c,TCGv_i64 a0,TCGv_i64 a1)367 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
368 {
369 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
370 return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
371 }
372
cond_make_ti(TCGCond c,TCGv_i64 a0,uint64_t imm)373 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
374 {
375 return cond_make_tt(c, a0, tcg_constant_i64(imm));
376 }
377
cond_make_vi(TCGCond c,TCGv_i64 a0,uint64_t imm)378 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
379 {
380 TCGv_i64 tmp = tcg_temp_new_i64();
381 tcg_gen_mov_i64(tmp, a0);
382 return cond_make_ti(c, tmp, imm);
383 }
384
cond_make_vv(TCGCond c,TCGv_i64 a0,TCGv_i64 a1)385 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
386 {
387 TCGv_i64 t0 = tcg_temp_new_i64();
388 TCGv_i64 t1 = tcg_temp_new_i64();
389
390 tcg_gen_mov_i64(t0, a0);
391 tcg_gen_mov_i64(t1, a1);
392 return cond_make_tt(c, t0, t1);
393 }
394
load_gpr(DisasContext * ctx,unsigned reg)395 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
396 {
397 if (reg == 0) {
398 return ctx->zero;
399 } else {
400 return cpu_gr[reg];
401 }
402 }
403
dest_gpr(DisasContext * ctx,unsigned reg)404 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
405 {
406 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
407 return tcg_temp_new_i64();
408 } else {
409 return cpu_gr[reg];
410 }
411 }
412
save_or_nullify(DisasContext * ctx,TCGv_i64 dest,TCGv_i64 t)413 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
414 {
415 if (ctx->null_cond.c != TCG_COND_NEVER) {
416 tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
417 ctx->null_cond.a1, dest, t);
418 } else {
419 tcg_gen_mov_i64(dest, t);
420 }
421 }
422
save_gpr(DisasContext * ctx,unsigned reg,TCGv_i64 t)423 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
424 {
425 if (reg != 0) {
426 save_or_nullify(ctx, cpu_gr[reg], t);
427 }
428 }
429
430 #if HOST_BIG_ENDIAN
431 # define HI_OFS 0
432 # define LO_OFS 4
433 #else
434 # define HI_OFS 4
435 # define LO_OFS 0
436 #endif
437
load_frw_i32(unsigned rt)438 static TCGv_i32 load_frw_i32(unsigned rt)
439 {
440 TCGv_i32 ret = tcg_temp_new_i32();
441 tcg_gen_ld_i32(ret, tcg_env,
442 offsetof(CPUHPPAState, fr[rt & 31])
443 + (rt & 32 ? LO_OFS : HI_OFS));
444 return ret;
445 }
446
load_frw0_i32(unsigned rt)447 static TCGv_i32 load_frw0_i32(unsigned rt)
448 {
449 if (rt == 0) {
450 TCGv_i32 ret = tcg_temp_new_i32();
451 tcg_gen_movi_i32(ret, 0);
452 return ret;
453 } else {
454 return load_frw_i32(rt);
455 }
456 }
457
load_frw0_i64(unsigned rt)458 static TCGv_i64 load_frw0_i64(unsigned rt)
459 {
460 TCGv_i64 ret = tcg_temp_new_i64();
461 if (rt == 0) {
462 tcg_gen_movi_i64(ret, 0);
463 } else {
464 tcg_gen_ld32u_i64(ret, tcg_env,
465 offsetof(CPUHPPAState, fr[rt & 31])
466 + (rt & 32 ? LO_OFS : HI_OFS));
467 }
468 return ret;
469 }
470
save_frw_i32(unsigned rt,TCGv_i32 val)471 static void save_frw_i32(unsigned rt, TCGv_i32 val)
472 {
473 tcg_gen_st_i32(val, tcg_env,
474 offsetof(CPUHPPAState, fr[rt & 31])
475 + (rt & 32 ? LO_OFS : HI_OFS));
476 }
477
478 #undef HI_OFS
479 #undef LO_OFS
480
load_frd(unsigned rt)481 static TCGv_i64 load_frd(unsigned rt)
482 {
483 TCGv_i64 ret = tcg_temp_new_i64();
484 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
485 return ret;
486 }
487
load_frd0(unsigned rt)488 static TCGv_i64 load_frd0(unsigned rt)
489 {
490 if (rt == 0) {
491 TCGv_i64 ret = tcg_temp_new_i64();
492 tcg_gen_movi_i64(ret, 0);
493 return ret;
494 } else {
495 return load_frd(rt);
496 }
497 }
498
save_frd(unsigned rt,TCGv_i64 val)499 static void save_frd(unsigned rt, TCGv_i64 val)
500 {
501 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
502 }
503
load_spr(DisasContext * ctx,TCGv_i64 dest,unsigned reg)504 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
505 {
506 #ifdef CONFIG_USER_ONLY
507 tcg_gen_movi_i64(dest, 0);
508 #else
509 if (reg < 4) {
510 tcg_gen_mov_i64(dest, cpu_sr[reg]);
511 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
512 tcg_gen_mov_i64(dest, cpu_srH);
513 } else {
514 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
515 }
516 #endif
517 }
518
519 /*
520 * Write a value to psw_xb, bearing in mind the known value.
521 * To be used just before exiting the TB, so do not update the known value.
522 */
store_psw_xb(DisasContext * ctx,uint32_t xb)523 static void store_psw_xb(DisasContext *ctx, uint32_t xb)
524 {
525 tcg_debug_assert(xb == 0 || xb == PSW_B);
526 if (ctx->psw_xb != xb) {
527 tcg_gen_movi_i32(cpu_psw_xb, xb);
528 }
529 }
530
531 /* Write a value to psw_xb, and update the known value. */
set_psw_xb(DisasContext * ctx,uint32_t xb)532 static void set_psw_xb(DisasContext *ctx, uint32_t xb)
533 {
534 store_psw_xb(ctx, xb);
535 ctx->psw_xb = xb;
536 }
537
538 /* Skip over the implementation of an insn that has been nullified.
539 Use this when the insn is too complex for a conditional move. */
nullify_over(DisasContext * ctx)540 static void nullify_over(DisasContext *ctx)
541 {
542 if (ctx->null_cond.c != TCG_COND_NEVER) {
543 /* The always condition should have been handled in the main loop. */
544 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
545
546 ctx->null_lab = gen_new_label();
547
548 /* If we're using PSW[N], copy it to a temp because... */
549 if (ctx->null_cond.a0 == cpu_psw_n) {
550 ctx->null_cond.a0 = tcg_temp_new_i64();
551 tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
552 }
553 /* ... we clear it before branching over the implementation,
554 so that (1) it's clear after nullifying this insn and
555 (2) if this insn nullifies the next, PSW[N] is valid. */
556 if (ctx->psw_n_nonzero) {
557 ctx->psw_n_nonzero = false;
558 tcg_gen_movi_i64(cpu_psw_n, 0);
559 }
560
561 tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
562 ctx->null_cond.a1, ctx->null_lab);
563 ctx->null_cond = cond_make_f();
564 }
565 }
566
567 /* Save the current nullification state to PSW[N]. */
nullify_save(DisasContext * ctx)568 static void nullify_save(DisasContext *ctx)
569 {
570 if (ctx->null_cond.c == TCG_COND_NEVER) {
571 if (ctx->psw_n_nonzero) {
572 tcg_gen_movi_i64(cpu_psw_n, 0);
573 }
574 return;
575 }
576 if (ctx->null_cond.a0 != cpu_psw_n) {
577 tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
578 ctx->null_cond.a0, ctx->null_cond.a1);
579 ctx->psw_n_nonzero = true;
580 }
581 ctx->null_cond = cond_make_f();
582 }
583
584 /* Set a PSW[N] to X. The intention is that this is used immediately
585 before a goto_tb/exit_tb, so that there is no fallthru path to other
586 code within the TB. Therefore we do not update psw_n_nonzero. */
nullify_set(DisasContext * ctx,bool x)587 static void nullify_set(DisasContext *ctx, bool x)
588 {
589 if (ctx->psw_n_nonzero || x) {
590 tcg_gen_movi_i64(cpu_psw_n, x);
591 }
592 }
593
594 /* Mark the end of an instruction that may have been nullified.
595 This is the pair to nullify_over. Always returns true so that
596 it may be tail-called from a translate function. */
nullify_end(DisasContext * ctx)597 static bool nullify_end(DisasContext *ctx)
598 {
599 TCGLabel *null_lab = ctx->null_lab;
600 DisasJumpType status = ctx->base.is_jmp;
601
602 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
603 For UPDATED, we cannot update on the nullified path. */
604 assert(status != DISAS_IAQ_N_UPDATED);
605 /* Taken branches are handled manually. */
606 assert(!ctx->psw_b_next);
607
608 if (likely(null_lab == NULL)) {
609 /* The current insn wasn't conditional or handled the condition
610 applied to it without a branch, so the (new) setting of
611 NULL_COND can be applied directly to the next insn. */
612 return true;
613 }
614 ctx->null_lab = NULL;
615
616 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
617 /* The next instruction will be unconditional,
618 and NULL_COND already reflects that. */
619 gen_set_label(null_lab);
620 } else {
621 /* The insn that we just executed is itself nullifying the next
622 instruction. Store the condition in the PSW[N] global.
623 We asserted PSW[N] = 0 in nullify_over, so that after the
624 label we have the proper value in place. */
625 nullify_save(ctx);
626 gen_set_label(null_lab);
627 ctx->null_cond = cond_make_n();
628 }
629 if (status == DISAS_NORETURN) {
630 ctx->base.is_jmp = DISAS_NEXT;
631 }
632 return true;
633 }
634
iaqe_variable(const DisasIAQE * e)635 static bool iaqe_variable(const DisasIAQE *e)
636 {
637 return e->base || e->space;
638 }
639
iaqe_incr(const DisasIAQE * e,int64_t disp)640 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
641 {
642 return (DisasIAQE){
643 .space = e->space,
644 .base = e->base,
645 .disp = e->disp + disp,
646 };
647 }
648
iaqe_branchi(DisasContext * ctx,int64_t disp)649 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
650 {
651 return (DisasIAQE){
652 .space = ctx->iaq_b.space,
653 .disp = ctx->iaq_f.disp + 8 + disp,
654 };
655 }
656
iaqe_next_absv(DisasContext * ctx,TCGv_i64 var)657 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
658 {
659 return (DisasIAQE){
660 .space = ctx->iaq_b.space,
661 .base = var,
662 };
663 }
664
copy_iaoq_entry(DisasContext * ctx,TCGv_i64 dest,const DisasIAQE * src)665 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
666 const DisasIAQE *src)
667 {
668 tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
669 }
670
install_iaq_entries(DisasContext * ctx,const DisasIAQE * f,const DisasIAQE * b)671 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
672 const DisasIAQE *b)
673 {
674 DisasIAQE b_next;
675
676 if (b == NULL) {
677 b_next = iaqe_incr(f, 4);
678 b = &b_next;
679 }
680
681 /*
682 * There is an edge case
683 * bv r0(rN)
684 * b,l disp,r0
685 * for which F will use cpu_iaoq_b (from the indirect branch),
686 * and B will use cpu_iaoq_f (from the direct branch).
687 * In this case we need an extra temporary.
688 */
689 if (f->base != cpu_iaoq_b) {
690 copy_iaoq_entry(ctx, cpu_iaoq_b, b);
691 copy_iaoq_entry(ctx, cpu_iaoq_f, f);
692 } else if (f->base == b->base) {
693 copy_iaoq_entry(ctx, cpu_iaoq_f, f);
694 tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
695 } else {
696 TCGv_i64 tmp = tcg_temp_new_i64();
697 copy_iaoq_entry(ctx, tmp, b);
698 copy_iaoq_entry(ctx, cpu_iaoq_f, f);
699 tcg_gen_mov_i64(cpu_iaoq_b, tmp);
700 }
701
702 if (f->space) {
703 tcg_gen_mov_i64(cpu_iasq_f, f->space);
704 }
705 if (b->space || f->space) {
706 tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
707 }
708 }
709
install_link(DisasContext * ctx,unsigned link,bool with_sr0)710 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
711 {
712 tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
713 if (!link) {
714 return;
715 }
716 DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
717 copy_iaoq_entry(ctx, cpu_gr[link], &next);
718 #ifndef CONFIG_USER_ONLY
719 if (with_sr0) {
720 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
721 }
722 #endif
723 }
724
gen_excp_1(int exception)725 static void gen_excp_1(int exception)
726 {
727 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
728 }
729
gen_excp(DisasContext * ctx,int exception)730 static void gen_excp(DisasContext *ctx, int exception)
731 {
732 install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
733 nullify_save(ctx);
734 gen_excp_1(exception);
735 ctx->base.is_jmp = DISAS_NORETURN;
736 }
737
delay_excp(DisasContext * ctx,uint8_t excp)738 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
739 {
740 DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
741
742 memset(e, 0, sizeof(*e));
743 e->next = ctx->delay_excp_list;
744 ctx->delay_excp_list = e;
745
746 e->lab = gen_new_label();
747 e->insn = ctx->insn;
748 e->set_iir = true;
749 e->set_n = ctx->psw_n_nonzero ? 0 : -1;
750 e->excp = excp;
751 e->iaq_f = ctx->iaq_f;
752 e->iaq_b = ctx->iaq_b;
753
754 return e;
755 }
756
gen_excp_iir(DisasContext * ctx,int exc)757 static bool gen_excp_iir(DisasContext *ctx, int exc)
758 {
759 if (ctx->null_cond.c == TCG_COND_NEVER) {
760 tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
761 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
762 gen_excp(ctx, exc);
763 } else {
764 DisasDelayException *e = delay_excp(ctx, exc);
765 tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
766 ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
767 ctx->null_cond = cond_make_f();
768 }
769 return true;
770 }
771
gen_illegal(DisasContext * ctx)772 static bool gen_illegal(DisasContext *ctx)
773 {
774 return gen_excp_iir(ctx, EXCP_ILL);
775 }
776
777 #ifdef CONFIG_USER_ONLY
778 #define CHECK_MOST_PRIVILEGED(EXCP) \
779 return gen_excp_iir(ctx, EXCP)
780 #else
781 #define CHECK_MOST_PRIVILEGED(EXCP) \
782 do { \
783 if (ctx->privilege != 0) { \
784 return gen_excp_iir(ctx, EXCP); \
785 } \
786 } while (0)
787 #endif
788
use_goto_tb(DisasContext * ctx,const DisasIAQE * f,const DisasIAQE * b)789 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
790 const DisasIAQE *b)
791 {
792 return (!iaqe_variable(f) &&
793 (b == NULL || !iaqe_variable(b)) &&
794 translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
795 }
796
797 /* If the next insn is to be nullified, and it's on the same page,
798 and we're not attempting to set a breakpoint on it, then we can
799 totally skip the nullified insn. This avoids creating and
800 executing a TB that merely branches to the next TB. */
use_nullify_skip(DisasContext * ctx)801 static bool use_nullify_skip(DisasContext *ctx)
802 {
803 return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
804 && !iaqe_variable(&ctx->iaq_b)
805 && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
806 & TARGET_PAGE_MASK) == 0);
807 }
808
gen_goto_tb(DisasContext * ctx,int which,const DisasIAQE * f,const DisasIAQE * b)809 static void gen_goto_tb(DisasContext *ctx, int which,
810 const DisasIAQE *f, const DisasIAQE *b)
811 {
812 install_iaq_entries(ctx, f, b);
813 if (use_goto_tb(ctx, f, b)) {
814 tcg_gen_goto_tb(which);
815 tcg_gen_exit_tb(ctx->base.tb, which);
816 } else {
817 tcg_gen_lookup_and_goto_ptr();
818 }
819 }
820
cond_need_sv(int c)821 static bool cond_need_sv(int c)
822 {
823 return c == 2 || c == 3 || c == 6;
824 }
825
cond_need_cb(int c)826 static bool cond_need_cb(int c)
827 {
828 return c == 4 || c == 5;
829 }
830
831 /*
832 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
833 * the Parisc 1.1 Architecture Reference Manual for details.
834 */
835
do_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res,TCGv_i64 uv,TCGv_i64 sv)836 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
837 TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
838 {
839 TCGCond sign_cond, zero_cond;
840 uint64_t sign_imm, zero_imm;
841 DisasCond cond;
842 TCGv_i64 tmp;
843
844 if (d) {
845 /* 64-bit condition. */
846 sign_imm = 0;
847 sign_cond = TCG_COND_LT;
848 zero_imm = 0;
849 zero_cond = TCG_COND_EQ;
850 } else {
851 /* 32-bit condition. */
852 sign_imm = 1ull << 31;
853 sign_cond = TCG_COND_TSTNE;
854 zero_imm = UINT32_MAX;
855 zero_cond = TCG_COND_TSTEQ;
856 }
857
858 switch (cf >> 1) {
859 case 0: /* Never / TR (0 / 1) */
860 cond = cond_make_f();
861 break;
862 case 1: /* = / <> (Z / !Z) */
863 cond = cond_make_vi(zero_cond, res, zero_imm);
864 break;
865 case 2: /* < / >= (N ^ V / !(N ^ V) */
866 tmp = tcg_temp_new_i64();
867 tcg_gen_xor_i64(tmp, res, sv);
868 cond = cond_make_ti(sign_cond, tmp, sign_imm);
869 break;
870 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
871 /*
872 * Simplify:
873 * (N ^ V) | Z
874 * ((res < 0) ^ (sv < 0)) | !res
875 * ((res ^ sv) < 0) | !res
876 * ((res ^ sv) < 0 ? 1 : !res)
877 * !((res ^ sv) < 0 ? 0 : res)
878 */
879 tmp = tcg_temp_new_i64();
880 tcg_gen_xor_i64(tmp, res, sv);
881 tcg_gen_movcond_i64(sign_cond, tmp,
882 tmp, tcg_constant_i64(sign_imm),
883 ctx->zero, res);
884 cond = cond_make_ti(zero_cond, tmp, zero_imm);
885 break;
886 case 4: /* NUV / UV (!UV / UV) */
887 cond = cond_make_vi(TCG_COND_EQ, uv, 0);
888 break;
889 case 5: /* ZNV / VNZ (!UV | Z / UV & !Z) */
890 tmp = tcg_temp_new_i64();
891 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
892 cond = cond_make_ti(zero_cond, tmp, zero_imm);
893 break;
894 case 6: /* SV / NSV (V / !V) */
895 cond = cond_make_vi(sign_cond, sv, sign_imm);
896 break;
897 case 7: /* OD / EV */
898 cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
899 break;
900 default:
901 g_assert_not_reached();
902 }
903 if (cf & 1) {
904 cond.c = tcg_invert_cond(cond.c);
905 }
906
907 return cond;
908 }
909
910 /* Similar, but for the special case of subtraction without borrow, we
911 can use the inputs directly. This can allow other computation to be
912 deleted as unused. */
913
do_sub_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2,TCGv_i64 sv)914 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
915 TCGv_i64 res, TCGv_i64 in1,
916 TCGv_i64 in2, TCGv_i64 sv)
917 {
918 TCGCond tc;
919 bool ext_uns;
920
921 switch (cf >> 1) {
922 case 1: /* = / <> */
923 tc = TCG_COND_EQ;
924 ext_uns = true;
925 break;
926 case 2: /* < / >= */
927 tc = TCG_COND_LT;
928 ext_uns = false;
929 break;
930 case 3: /* <= / > */
931 tc = TCG_COND_LE;
932 ext_uns = false;
933 break;
934 case 4: /* << / >>= */
935 tc = TCG_COND_LTU;
936 ext_uns = true;
937 break;
938 case 5: /* <<= / >> */
939 tc = TCG_COND_LEU;
940 ext_uns = true;
941 break;
942 default:
943 return do_cond(ctx, cf, d, res, NULL, sv);
944 }
945
946 if (cf & 1) {
947 tc = tcg_invert_cond(tc);
948 }
949 if (!d) {
950 TCGv_i64 t1 = tcg_temp_new_i64();
951 TCGv_i64 t2 = tcg_temp_new_i64();
952
953 if (ext_uns) {
954 tcg_gen_ext32u_i64(t1, in1);
955 tcg_gen_ext32u_i64(t2, in2);
956 } else {
957 tcg_gen_ext32s_i64(t1, in1);
958 tcg_gen_ext32s_i64(t2, in2);
959 }
960 return cond_make_tt(tc, t1, t2);
961 }
962 return cond_make_vv(tc, in1, in2);
963 }
964
965 /*
966 * Similar, but for logicals, where the carry and overflow bits are not
967 * computed, and use of them is undefined.
968 *
969 * Undefined or not, hardware does not trap. It seems reasonable to
970 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
971 * how cases c={2,3} are treated.
972 */
973
do_log_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res)974 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
975 TCGv_i64 res)
976 {
977 TCGCond tc;
978 uint64_t imm;
979
980 switch (cf >> 1) {
981 case 0: /* never / always */
982 case 4: /* undef, C */
983 case 5: /* undef, C & !Z */
984 case 6: /* undef, V */
985 return cf & 1 ? cond_make_t() : cond_make_f();
986 case 1: /* == / <> */
987 tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
988 imm = d ? 0 : UINT32_MAX;
989 break;
990 case 2: /* < / >= */
991 tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
992 imm = d ? 0 : 1ull << 31;
993 break;
994 case 3: /* <= / > */
995 tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
996 if (!d) {
997 TCGv_i64 tmp = tcg_temp_new_i64();
998 tcg_gen_ext32s_i64(tmp, res);
999 return cond_make_ti(tc, tmp, 0);
1000 }
1001 return cond_make_vi(tc, res, 0);
1002 case 7: /* OD / EV */
1003 tc = TCG_COND_TSTNE;
1004 imm = 1;
1005 break;
1006 default:
1007 g_assert_not_reached();
1008 }
1009 if (cf & 1) {
1010 tc = tcg_invert_cond(tc);
1011 }
1012 return cond_make_vi(tc, res, imm);
1013 }
1014
1015 /* Similar, but for shift/extract/deposit conditions. */
1016
do_sed_cond(DisasContext * ctx,unsigned orig,bool d,TCGv_i64 res)1017 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1018 TCGv_i64 res)
1019 {
1020 unsigned c, f;
1021
1022 /* Convert the compressed condition codes to standard.
1023 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1024 4-7 are the reverse of 0-3. */
1025 c = orig & 3;
1026 if (c == 3) {
1027 c = 7;
1028 }
1029 f = (orig & 4) / 4;
1030
1031 return do_log_cond(ctx, c * 2 + f, d, res);
1032 }
1033
1034 /* Similar, but for unit zero conditions. */
do_unit_zero_cond(unsigned cf,bool d,TCGv_i64 res)1035 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
1036 {
1037 TCGv_i64 tmp;
1038 uint64_t d_repl = d ? 0x0000000100000001ull : 1;
1039 uint64_t ones = 0, sgns = 0;
1040
1041 switch (cf >> 1) {
1042 case 1: /* SBW / NBW */
1043 if (d) {
1044 ones = d_repl;
1045 sgns = d_repl << 31;
1046 }
1047 break;
1048 case 2: /* SBZ / NBZ */
1049 ones = d_repl * 0x01010101u;
1050 sgns = ones << 7;
1051 break;
1052 case 3: /* SHZ / NHZ */
1053 ones = d_repl * 0x00010001u;
1054 sgns = ones << 15;
1055 break;
1056 }
1057 if (ones == 0) {
1058 /* Undefined, or 0/1 (never/always). */
1059 return cf & 1 ? cond_make_t() : cond_make_f();
1060 }
1061
1062 /*
1063 * See hasless(v,1) from
1064 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1065 */
1066 tmp = tcg_temp_new_i64();
1067 tcg_gen_subi_i64(tmp, res, ones);
1068 tcg_gen_andc_i64(tmp, tmp, res);
1069
1070 return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
1071 }
1072
get_carry(DisasContext * ctx,bool d,TCGv_i64 cb,TCGv_i64 cb_msb)1073 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1074 TCGv_i64 cb, TCGv_i64 cb_msb)
1075 {
1076 if (!d) {
1077 TCGv_i64 t = tcg_temp_new_i64();
1078 tcg_gen_extract_i64(t, cb, 32, 1);
1079 return t;
1080 }
1081 return cb_msb;
1082 }
1083
get_psw_carry(DisasContext * ctx,bool d)1084 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1085 {
1086 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1087 }
1088
1089 /* Compute signed overflow for addition. */
do_add_sv(DisasContext * ctx,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2,TCGv_i64 orig_in1,int shift,bool d)1090 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1091 TCGv_i64 in1, TCGv_i64 in2,
1092 TCGv_i64 orig_in1, int shift, bool d)
1093 {
1094 TCGv_i64 sv = tcg_temp_new_i64();
1095 TCGv_i64 tmp = tcg_temp_new_i64();
1096
1097 tcg_gen_xor_i64(sv, res, in1);
1098 tcg_gen_xor_i64(tmp, in1, in2);
1099 tcg_gen_andc_i64(sv, sv, tmp);
1100
1101 switch (shift) {
1102 case 0:
1103 break;
1104 case 1:
1105 /* Shift left by one and compare the sign. */
1106 tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1107 tcg_gen_xor_i64(tmp, tmp, orig_in1);
1108 /* Incorporate into the overflow. */
1109 tcg_gen_or_i64(sv, sv, tmp);
1110 break;
1111 default:
1112 {
1113 int sign_bit = d ? 63 : 31;
1114
1115 /* Compare the sign against all lower bits. */
1116 tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1117 tcg_gen_xor_i64(tmp, tmp, orig_in1);
1118 /*
1119 * If one of the bits shifting into or through the sign
1120 * differs, then we have overflow.
1121 */
1122 tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1123 tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1124 tcg_constant_i64(-1), sv);
1125 }
1126 }
1127 return sv;
1128 }
1129
1130 /* Compute unsigned overflow for addition. */
do_add_uv(DisasContext * ctx,TCGv_i64 cb,TCGv_i64 cb_msb,TCGv_i64 in1,int shift,bool d)1131 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1132 TCGv_i64 in1, int shift, bool d)
1133 {
1134 if (shift == 0) {
1135 return get_carry(ctx, d, cb, cb_msb);
1136 } else {
1137 TCGv_i64 tmp = tcg_temp_new_i64();
1138 tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1139 tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1140 return tmp;
1141 }
1142 }
1143
1144 /* Compute signed overflow for subtraction. */
do_sub_sv(DisasContext * ctx,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2)1145 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1146 TCGv_i64 in1, TCGv_i64 in2)
1147 {
1148 TCGv_i64 sv = tcg_temp_new_i64();
1149 TCGv_i64 tmp = tcg_temp_new_i64();
1150
1151 tcg_gen_xor_i64(sv, res, in1);
1152 tcg_gen_xor_i64(tmp, in1, in2);
1153 tcg_gen_and_i64(sv, sv, tmp);
1154
1155 return sv;
1156 }
1157
gen_tc(DisasContext * ctx,DisasCond * cond)1158 static void gen_tc(DisasContext *ctx, DisasCond *cond)
1159 {
1160 DisasDelayException *e;
1161
1162 switch (cond->c) {
1163 case TCG_COND_NEVER:
1164 break;
1165 case TCG_COND_ALWAYS:
1166 gen_excp_iir(ctx, EXCP_COND);
1167 break;
1168 default:
1169 e = delay_excp(ctx, EXCP_COND);
1170 tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
1171 /* In the non-trap path, the condition is known false. */
1172 *cond = cond_make_f();
1173 break;
1174 }
1175 }
1176
gen_tsv(DisasContext * ctx,TCGv_i64 * sv,bool d)1177 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
1178 {
1179 DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
1180 DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
1181
1182 tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
1183
1184 /* In the non-trap path, V is known zero. */
1185 *sv = tcg_constant_i64(0);
1186 }
1187
do_add(DisasContext * ctx,unsigned rt,TCGv_i64 orig_in1,TCGv_i64 in2,unsigned shift,bool is_l,bool is_tsv,bool is_tc,bool is_c,unsigned cf,bool d)1188 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1189 TCGv_i64 in2, unsigned shift, bool is_l,
1190 bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1191 {
1192 TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1193 unsigned c = cf >> 1;
1194 DisasCond cond;
1195
1196 dest = tcg_temp_new_i64();
1197 cb = NULL;
1198 cb_msb = NULL;
1199
1200 in1 = orig_in1;
1201 if (shift) {
1202 tmp = tcg_temp_new_i64();
1203 tcg_gen_shli_i64(tmp, in1, shift);
1204 in1 = tmp;
1205 }
1206
1207 if (!is_l || cond_need_cb(c)) {
1208 cb_msb = tcg_temp_new_i64();
1209 cb = tcg_temp_new_i64();
1210
1211 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1212 if (is_c) {
1213 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb,
1214 get_psw_carry(ctx, d), ctx->zero);
1215 }
1216 tcg_gen_xor_i64(cb, in1, in2);
1217 tcg_gen_xor_i64(cb, cb, dest);
1218 } else {
1219 tcg_gen_add_i64(dest, in1, in2);
1220 if (is_c) {
1221 tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1222 }
1223 }
1224
1225 /* Compute signed overflow if required. */
1226 sv = NULL;
1227 if (is_tsv || cond_need_sv(c)) {
1228 sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1229 if (is_tsv) {
1230 gen_tsv(ctx, &sv, d);
1231 }
1232 }
1233
1234 /* Compute unsigned overflow if required. */
1235 uv = NULL;
1236 if (cond_need_cb(c)) {
1237 uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1238 }
1239
1240 /* Emit any conditional trap before any writeback. */
1241 cond = do_cond(ctx, cf, d, dest, uv, sv);
1242 if (is_tc) {
1243 gen_tc(ctx, &cond);
1244 }
1245
1246 /* Write back the result. */
1247 if (!is_l) {
1248 save_or_nullify(ctx, cpu_psw_cb, cb);
1249 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1250 }
1251 save_gpr(ctx, rt, dest);
1252
1253 /* Install the new nullification. */
1254 ctx->null_cond = cond;
1255 }
1256
do_add_reg(DisasContext * ctx,arg_rrr_cf_d_sh * a,bool is_l,bool is_tsv,bool is_tc,bool is_c)1257 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1258 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1259 {
1260 TCGv_i64 tcg_r1, tcg_r2;
1261
1262 if (unlikely(is_tc && a->cf == 1)) {
1263 /* Unconditional trap on condition. */
1264 return gen_excp_iir(ctx, EXCP_COND);
1265 }
1266 if (a->cf) {
1267 nullify_over(ctx);
1268 }
1269 tcg_r1 = load_gpr(ctx, a->r1);
1270 tcg_r2 = load_gpr(ctx, a->r2);
1271 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1272 is_tsv, is_tc, is_c, a->cf, a->d);
1273 return nullify_end(ctx);
1274 }
1275
do_add_imm(DisasContext * ctx,arg_rri_cf * a,bool is_tsv,bool is_tc)1276 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1277 bool is_tsv, bool is_tc)
1278 {
1279 TCGv_i64 tcg_im, tcg_r2;
1280
1281 if (unlikely(is_tc && a->cf == 1)) {
1282 /* Unconditional trap on condition. */
1283 return gen_excp_iir(ctx, EXCP_COND);
1284 }
1285 if (a->cf) {
1286 nullify_over(ctx);
1287 }
1288 tcg_im = tcg_constant_i64(a->i);
1289 tcg_r2 = load_gpr(ctx, a->r);
1290 /* All ADDI conditions are 32-bit. */
1291 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1292 return nullify_end(ctx);
1293 }
1294
do_sub(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,bool is_tsv,bool is_b,bool is_tc,unsigned cf,bool d)1295 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1296 TCGv_i64 in2, bool is_tsv, bool is_b,
1297 bool is_tc, unsigned cf, bool d)
1298 {
1299 TCGv_i64 dest, sv, cb, cb_msb;
1300 unsigned c = cf >> 1;
1301 DisasCond cond;
1302
1303 dest = tcg_temp_new_i64();
1304 cb = tcg_temp_new_i64();
1305 cb_msb = tcg_temp_new_i64();
1306
1307 if (is_b) {
1308 /* DEST,C = IN1 + ~IN2 + C. */
1309 tcg_gen_not_i64(cb, in2);
1310 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero,
1311 get_psw_carry(ctx, d), ctx->zero);
1312 tcg_gen_add2_i64(dest, cb_msb, dest, cb_msb, cb, ctx->zero);
1313 tcg_gen_xor_i64(cb, cb, in1);
1314 tcg_gen_xor_i64(cb, cb, dest);
1315 } else {
1316 /*
1317 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1318 * operations by seeding the high word with 1 and subtracting.
1319 */
1320 TCGv_i64 one = tcg_constant_i64(1);
1321 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1322 tcg_gen_eqv_i64(cb, in1, in2);
1323 tcg_gen_xor_i64(cb, cb, dest);
1324 }
1325
1326 /* Compute signed overflow if required. */
1327 sv = NULL;
1328 if (is_tsv || cond_need_sv(c)) {
1329 sv = do_sub_sv(ctx, dest, in1, in2);
1330 if (is_tsv) {
1331 gen_tsv(ctx, &sv, d);
1332 }
1333 }
1334
1335 /* Compute the condition. We cannot use the special case for borrow. */
1336 if (!is_b) {
1337 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1338 } else {
1339 cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1340 }
1341
1342 /* Emit any conditional trap before any writeback. */
1343 if (is_tc) {
1344 gen_tc(ctx, &cond);
1345 }
1346
1347 /* Write back the result. */
1348 save_or_nullify(ctx, cpu_psw_cb, cb);
1349 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1350 save_gpr(ctx, rt, dest);
1351
1352 /* Install the new nullification. */
1353 ctx->null_cond = cond;
1354 }
1355
do_sub_reg(DisasContext * ctx,arg_rrr_cf_d * a,bool is_tsv,bool is_b,bool is_tc)1356 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1357 bool is_tsv, bool is_b, bool is_tc)
1358 {
1359 TCGv_i64 tcg_r1, tcg_r2;
1360
1361 if (a->cf) {
1362 nullify_over(ctx);
1363 }
1364 tcg_r1 = load_gpr(ctx, a->r1);
1365 tcg_r2 = load_gpr(ctx, a->r2);
1366 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1367 return nullify_end(ctx);
1368 }
1369
do_sub_imm(DisasContext * ctx,arg_rri_cf * a,bool is_tsv)1370 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1371 {
1372 TCGv_i64 tcg_im, tcg_r2;
1373
1374 if (a->cf) {
1375 nullify_over(ctx);
1376 }
1377 tcg_im = tcg_constant_i64(a->i);
1378 tcg_r2 = load_gpr(ctx, a->r);
1379 /* All SUBI conditions are 32-bit. */
1380 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1381 return nullify_end(ctx);
1382 }
1383
do_cmpclr(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d)1384 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1385 TCGv_i64 in2, unsigned cf, bool d)
1386 {
1387 TCGv_i64 dest, sv;
1388 DisasCond cond;
1389
1390 dest = tcg_temp_new_i64();
1391 tcg_gen_sub_i64(dest, in1, in2);
1392
1393 /* Compute signed overflow if required. */
1394 sv = NULL;
1395 if (cond_need_sv(cf >> 1)) {
1396 sv = do_sub_sv(ctx, dest, in1, in2);
1397 }
1398
1399 /* Form the condition for the compare. */
1400 cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1401
1402 /* Clear. */
1403 tcg_gen_movi_i64(dest, 0);
1404 save_gpr(ctx, rt, dest);
1405
1406 /* Install the new nullification. */
1407 ctx->null_cond = cond;
1408 }
1409
do_log(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))1410 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1411 TCGv_i64 in2, unsigned cf, bool d,
1412 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1413 {
1414 TCGv_i64 dest = dest_gpr(ctx, rt);
1415
1416 /* Perform the operation, and writeback. */
1417 fn(dest, in1, in2);
1418 save_gpr(ctx, rt, dest);
1419
1420 /* Install the new nullification. */
1421 ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1422 }
1423
do_log_reg(DisasContext * ctx,arg_rrr_cf_d * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))1424 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1425 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1426 {
1427 TCGv_i64 tcg_r1, tcg_r2;
1428
1429 if (a->cf) {
1430 nullify_over(ctx);
1431 }
1432 tcg_r1 = load_gpr(ctx, a->r1);
1433 tcg_r2 = load_gpr(ctx, a->r2);
1434 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1435 return nullify_end(ctx);
1436 }
1437
do_unit_addsub(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d,bool is_tc,bool is_add)1438 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1439 TCGv_i64 in2, unsigned cf, bool d,
1440 bool is_tc, bool is_add)
1441 {
1442 TCGv_i64 dest = tcg_temp_new_i64();
1443 uint64_t test_cb = 0;
1444 DisasCond cond;
1445
1446 /* Select which carry-out bits to test. */
1447 switch (cf >> 1) {
1448 case 4: /* NDC / SDC -- 4-bit carries */
1449 test_cb = dup_const(MO_8, 0x88);
1450 break;
1451 case 5: /* NWC / SWC -- 32-bit carries */
1452 if (d) {
1453 test_cb = dup_const(MO_32, INT32_MIN);
1454 } else {
1455 cf &= 1; /* undefined -- map to never/always */
1456 }
1457 break;
1458 case 6: /* NBC / SBC -- 8-bit carries */
1459 test_cb = dup_const(MO_8, INT8_MIN);
1460 break;
1461 case 7: /* NHC / SHC -- 16-bit carries */
1462 test_cb = dup_const(MO_16, INT16_MIN);
1463 break;
1464 }
1465 if (!d) {
1466 test_cb = (uint32_t)test_cb;
1467 }
1468
1469 if (!test_cb) {
1470 /* No need to compute carries if we don't need to test them. */
1471 if (is_add) {
1472 tcg_gen_add_i64(dest, in1, in2);
1473 } else {
1474 tcg_gen_sub_i64(dest, in1, in2);
1475 }
1476 cond = do_unit_zero_cond(cf, d, dest);
1477 } else {
1478 TCGv_i64 cb = tcg_temp_new_i64();
1479
1480 if (d) {
1481 TCGv_i64 cb_msb = tcg_temp_new_i64();
1482 if (is_add) {
1483 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1484 tcg_gen_xor_i64(cb, in1, in2);
1485 } else {
1486 /* See do_sub, !is_b. */
1487 TCGv_i64 one = tcg_constant_i64(1);
1488 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1489 tcg_gen_eqv_i64(cb, in1, in2);
1490 }
1491 tcg_gen_xor_i64(cb, cb, dest);
1492 tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1493 } else {
1494 if (is_add) {
1495 tcg_gen_add_i64(dest, in1, in2);
1496 tcg_gen_xor_i64(cb, in1, in2);
1497 } else {
1498 tcg_gen_sub_i64(dest, in1, in2);
1499 tcg_gen_eqv_i64(cb, in1, in2);
1500 }
1501 tcg_gen_xor_i64(cb, cb, dest);
1502 tcg_gen_shri_i64(cb, cb, 1);
1503 }
1504
1505 cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
1506 cb, test_cb);
1507 }
1508
1509 if (is_tc) {
1510 gen_tc(ctx, &cond);
1511 }
1512 save_gpr(ctx, rt, dest);
1513
1514 ctx->null_cond = cond;
1515 }
1516
1517 #ifndef CONFIG_USER_ONLY
1518 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1519 from the top 2 bits of the base register. There are a few system
1520 instructions that have a 3-bit space specifier, for which SR0 is
1521 not special. To handle this, pass ~SP. */
space_select(DisasContext * ctx,int sp,TCGv_i64 base)1522 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1523 {
1524 TCGv_ptr ptr;
1525 TCGv_i64 tmp;
1526 TCGv_i64 spc;
1527
1528 if (sp != 0) {
1529 if (sp < 0) {
1530 sp = ~sp;
1531 }
1532 spc = tcg_temp_new_i64();
1533 load_spr(ctx, spc, sp);
1534 return spc;
1535 }
1536 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1537 return cpu_srH;
1538 }
1539
1540 ptr = tcg_temp_new_ptr();
1541 tmp = tcg_temp_new_i64();
1542 spc = tcg_temp_new_i64();
1543
1544 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1545 tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1546 tcg_gen_andi_i64(tmp, tmp, 030);
1547 tcg_gen_trunc_i64_ptr(ptr, tmp);
1548
1549 tcg_gen_add_ptr(ptr, ptr, tcg_env);
1550 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1551
1552 return spc;
1553 }
1554 #endif
1555
form_gva(DisasContext * ctx,TCGv_i64 * pgva,TCGv_i64 * pofs,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,bool is_phys)1556 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1557 unsigned rb, unsigned rx, int scale, int64_t disp,
1558 unsigned sp, int modify, bool is_phys)
1559 {
1560 TCGv_i64 base = load_gpr(ctx, rb);
1561 TCGv_i64 ofs;
1562 TCGv_i64 addr;
1563
1564 set_insn_breg(ctx, rb);
1565
1566 /* Note that RX is mutually exclusive with DISP. */
1567 if (rx) {
1568 ofs = tcg_temp_new_i64();
1569 tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1570 tcg_gen_add_i64(ofs, ofs, base);
1571 } else if (disp || modify) {
1572 ofs = tcg_temp_new_i64();
1573 tcg_gen_addi_i64(ofs, base, disp);
1574 } else {
1575 ofs = base;
1576 }
1577
1578 *pofs = ofs;
1579 *pgva = addr = tcg_temp_new_i64();
1580 tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1581 ctx->gva_offset_mask);
1582 #ifndef CONFIG_USER_ONLY
1583 if (!is_phys) {
1584 tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1585 }
1586 #endif
1587 }
1588
1589 /* Emit a memory load. The modify parameter should be
1590 * < 0 for pre-modify,
1591 * > 0 for post-modify,
1592 * = 0 for no base register update.
1593 */
do_load_32(DisasContext * ctx,TCGv_i32 dest,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1594 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1595 unsigned rx, int scale, int64_t disp,
1596 unsigned sp, int modify, MemOp mop)
1597 {
1598 TCGv_i64 ofs;
1599 TCGv_i64 addr;
1600
1601 /* Caller uses nullify_over/nullify_end. */
1602 assert(ctx->null_cond.c == TCG_COND_NEVER);
1603
1604 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1605 MMU_DISABLED(ctx));
1606 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1607 if (modify) {
1608 save_gpr(ctx, rb, ofs);
1609 }
1610 }
1611
do_load_64(DisasContext * ctx,TCGv_i64 dest,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1612 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1613 unsigned rx, int scale, int64_t disp,
1614 unsigned sp, int modify, MemOp mop)
1615 {
1616 TCGv_i64 ofs;
1617 TCGv_i64 addr;
1618
1619 /* Caller uses nullify_over/nullify_end. */
1620 assert(ctx->null_cond.c == TCG_COND_NEVER);
1621
1622 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1623 MMU_DISABLED(ctx));
1624 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1625 if (modify) {
1626 save_gpr(ctx, rb, ofs);
1627 }
1628 }
1629
do_store_32(DisasContext * ctx,TCGv_i32 src,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1630 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1631 unsigned rx, int scale, int64_t disp,
1632 unsigned sp, int modify, MemOp mop)
1633 {
1634 TCGv_i64 ofs;
1635 TCGv_i64 addr;
1636
1637 /* Caller uses nullify_over/nullify_end. */
1638 assert(ctx->null_cond.c == TCG_COND_NEVER);
1639
1640 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1641 MMU_DISABLED(ctx));
1642 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1643 if (modify) {
1644 save_gpr(ctx, rb, ofs);
1645 }
1646 }
1647
do_store_64(DisasContext * ctx,TCGv_i64 src,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1648 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1649 unsigned rx, int scale, int64_t disp,
1650 unsigned sp, int modify, MemOp mop)
1651 {
1652 TCGv_i64 ofs;
1653 TCGv_i64 addr;
1654
1655 /* Caller uses nullify_over/nullify_end. */
1656 assert(ctx->null_cond.c == TCG_COND_NEVER);
1657
1658 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1659 MMU_DISABLED(ctx));
1660 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1661 if (modify) {
1662 save_gpr(ctx, rb, ofs);
1663 }
1664 }
1665
do_load(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1666 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1667 unsigned rx, int scale, int64_t disp,
1668 unsigned sp, int modify, MemOp mop)
1669 {
1670 TCGv_i64 dest;
1671
1672 nullify_over(ctx);
1673
1674 if (modify == 0) {
1675 /* No base register update. */
1676 dest = dest_gpr(ctx, rt);
1677 } else {
1678 /* Make sure if RT == RB, we see the result of the load. */
1679 dest = tcg_temp_new_i64();
1680 }
1681 do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1682 save_gpr(ctx, rt, dest);
1683
1684 return nullify_end(ctx);
1685 }
1686
do_floadw(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1687 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1688 unsigned rx, int scale, int64_t disp,
1689 unsigned sp, int modify)
1690 {
1691 TCGv_i32 tmp;
1692
1693 nullify_over(ctx);
1694
1695 tmp = tcg_temp_new_i32();
1696 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1697 save_frw_i32(rt, tmp);
1698
1699 if (rt == 0) {
1700 gen_helper_loaded_fr0(tcg_env);
1701 }
1702
1703 return nullify_end(ctx);
1704 }
1705
trans_fldw(DisasContext * ctx,arg_ldst * a)1706 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1707 {
1708 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1709 a->disp, a->sp, a->m);
1710 }
1711
do_floadd(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1712 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1713 unsigned rx, int scale, int64_t disp,
1714 unsigned sp, int modify)
1715 {
1716 TCGv_i64 tmp;
1717
1718 nullify_over(ctx);
1719
1720 tmp = tcg_temp_new_i64();
1721 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1722 save_frd(rt, tmp);
1723
1724 if (rt == 0) {
1725 gen_helper_loaded_fr0(tcg_env);
1726 }
1727
1728 return nullify_end(ctx);
1729 }
1730
trans_fldd(DisasContext * ctx,arg_ldst * a)1731 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1732 {
1733 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1734 a->disp, a->sp, a->m);
1735 }
1736
do_store(DisasContext * ctx,unsigned rt,unsigned rb,int64_t disp,unsigned sp,int modify,MemOp mop)1737 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1738 int64_t disp, unsigned sp,
1739 int modify, MemOp mop)
1740 {
1741 nullify_over(ctx);
1742 do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1743 return nullify_end(ctx);
1744 }
1745
do_fstorew(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1746 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1747 unsigned rx, int scale, int64_t disp,
1748 unsigned sp, int modify)
1749 {
1750 TCGv_i32 tmp;
1751
1752 nullify_over(ctx);
1753
1754 tmp = load_frw_i32(rt);
1755 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1756
1757 return nullify_end(ctx);
1758 }
1759
trans_fstw(DisasContext * ctx,arg_ldst * a)1760 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1761 {
1762 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1763 a->disp, a->sp, a->m);
1764 }
1765
do_fstored(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1766 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1767 unsigned rx, int scale, int64_t disp,
1768 unsigned sp, int modify)
1769 {
1770 TCGv_i64 tmp;
1771
1772 nullify_over(ctx);
1773
1774 tmp = load_frd(rt);
1775 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1776
1777 return nullify_end(ctx);
1778 }
1779
trans_fstd(DisasContext * ctx,arg_ldst * a)1780 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1781 {
1782 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1783 a->disp, a->sp, a->m);
1784 }
1785
do_fop_wew(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i32,TCGv_env,TCGv_i32))1786 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1787 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1788 {
1789 TCGv_i32 tmp;
1790
1791 nullify_over(ctx);
1792 tmp = load_frw0_i32(ra);
1793
1794 func(tmp, tcg_env, tmp);
1795
1796 save_frw_i32(rt, tmp);
1797 return nullify_end(ctx);
1798 }
1799
do_fop_wed(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i32,TCGv_env,TCGv_i64))1800 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1801 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1802 {
1803 TCGv_i32 dst;
1804 TCGv_i64 src;
1805
1806 nullify_over(ctx);
1807 src = load_frd(ra);
1808 dst = tcg_temp_new_i32();
1809
1810 func(dst, tcg_env, src);
1811
1812 save_frw_i32(rt, dst);
1813 return nullify_end(ctx);
1814 }
1815
do_fop_ded(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i64,TCGv_env,TCGv_i64))1816 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1817 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1818 {
1819 TCGv_i64 tmp;
1820
1821 nullify_over(ctx);
1822 tmp = load_frd0(ra);
1823
1824 func(tmp, tcg_env, tmp);
1825
1826 save_frd(rt, tmp);
1827 return nullify_end(ctx);
1828 }
1829
do_fop_dew(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i64,TCGv_env,TCGv_i32))1830 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1831 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1832 {
1833 TCGv_i32 src;
1834 TCGv_i64 dst;
1835
1836 nullify_over(ctx);
1837 src = load_frw0_i32(ra);
1838 dst = tcg_temp_new_i64();
1839
1840 func(dst, tcg_env, src);
1841
1842 save_frd(rt, dst);
1843 return nullify_end(ctx);
1844 }
1845
do_fop_weww(DisasContext * ctx,unsigned rt,unsigned ra,unsigned rb,void (* func)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))1846 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1847 unsigned ra, unsigned rb,
1848 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1849 {
1850 TCGv_i32 a, b;
1851
1852 nullify_over(ctx);
1853 a = load_frw0_i32(ra);
1854 b = load_frw0_i32(rb);
1855
1856 func(a, tcg_env, a, b);
1857
1858 save_frw_i32(rt, a);
1859 return nullify_end(ctx);
1860 }
1861
do_fop_dedd(DisasContext * ctx,unsigned rt,unsigned ra,unsigned rb,void (* func)(TCGv_i64,TCGv_env,TCGv_i64,TCGv_i64))1862 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1863 unsigned ra, unsigned rb,
1864 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1865 {
1866 TCGv_i64 a, b;
1867
1868 nullify_over(ctx);
1869 a = load_frd0(ra);
1870 b = load_frd0(rb);
1871
1872 func(a, tcg_env, a, b);
1873
1874 save_frd(rt, a);
1875 return nullify_end(ctx);
1876 }
1877
1878 /* Emit an unconditional branch to a direct target, which may or may not
1879 have already had nullification handled. */
do_dbranch(DisasContext * ctx,int64_t disp,unsigned link,bool is_n)1880 static bool do_dbranch(DisasContext *ctx, int64_t disp,
1881 unsigned link, bool is_n)
1882 {
1883 ctx->iaq_j = iaqe_branchi(ctx, disp);
1884
1885 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1886 install_link(ctx, link, false);
1887 if (is_n) {
1888 if (use_nullify_skip(ctx)) {
1889 nullify_set(ctx, 0);
1890 store_psw_xb(ctx, 0);
1891 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1892 ctx->base.is_jmp = DISAS_NORETURN;
1893 return true;
1894 }
1895 ctx->null_cond.c = TCG_COND_ALWAYS;
1896 }
1897 ctx->iaq_n = &ctx->iaq_j;
1898 ctx->psw_b_next = true;
1899 } else {
1900 nullify_over(ctx);
1901
1902 install_link(ctx, link, false);
1903 if (is_n && use_nullify_skip(ctx)) {
1904 nullify_set(ctx, 0);
1905 store_psw_xb(ctx, 0);
1906 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1907 } else {
1908 nullify_set(ctx, is_n);
1909 store_psw_xb(ctx, PSW_B);
1910 gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
1911 }
1912 nullify_end(ctx);
1913
1914 nullify_set(ctx, 0);
1915 store_psw_xb(ctx, 0);
1916 gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
1917 ctx->base.is_jmp = DISAS_NORETURN;
1918 }
1919 return true;
1920 }
1921
1922 /* Emit a conditional branch to a direct target. If the branch itself
1923 is nullified, we should have already used nullify_over. */
do_cbranch(DisasContext * ctx,int64_t disp,bool is_n,DisasCond * cond)1924 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1925 DisasCond *cond)
1926 {
1927 DisasIAQE next;
1928 TCGLabel *taken = NULL;
1929 TCGCond c = cond->c;
1930 bool n;
1931
1932 assert(ctx->null_cond.c == TCG_COND_NEVER);
1933
1934 /* Handle TRUE and NEVER as direct branches. */
1935 if (c == TCG_COND_ALWAYS) {
1936 return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
1937 }
1938
1939 taken = gen_new_label();
1940 tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1941
1942 /* Not taken: Condition not satisfied; nullify on backward branches. */
1943 n = is_n && disp < 0;
1944 if (n && use_nullify_skip(ctx)) {
1945 nullify_set(ctx, 0);
1946 store_psw_xb(ctx, 0);
1947 next = iaqe_incr(&ctx->iaq_b, 4);
1948 gen_goto_tb(ctx, 0, &next, NULL);
1949 } else {
1950 if (!n && ctx->null_lab) {
1951 gen_set_label(ctx->null_lab);
1952 ctx->null_lab = NULL;
1953 }
1954 nullify_set(ctx, n);
1955 store_psw_xb(ctx, 0);
1956 gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
1957 }
1958
1959 gen_set_label(taken);
1960
1961 /* Taken: Condition satisfied; nullify on forward branches. */
1962 n = is_n && disp >= 0;
1963
1964 next = iaqe_branchi(ctx, disp);
1965 if (n && use_nullify_skip(ctx)) {
1966 nullify_set(ctx, 0);
1967 store_psw_xb(ctx, 0);
1968 gen_goto_tb(ctx, 1, &next, NULL);
1969 } else {
1970 nullify_set(ctx, n);
1971 store_psw_xb(ctx, PSW_B);
1972 gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
1973 }
1974
1975 /* Not taken: the branch itself was nullified. */
1976 if (ctx->null_lab) {
1977 gen_set_label(ctx->null_lab);
1978 ctx->null_lab = NULL;
1979 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1980 } else {
1981 ctx->base.is_jmp = DISAS_NORETURN;
1982 }
1983 return true;
1984 }
1985
1986 /*
1987 * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
1988 * This handles nullification of the branch itself.
1989 */
do_ibranch(DisasContext * ctx,unsigned link,bool with_sr0,bool is_n)1990 static bool do_ibranch(DisasContext *ctx, unsigned link,
1991 bool with_sr0, bool is_n)
1992 {
1993 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1994 install_link(ctx, link, with_sr0);
1995 if (is_n) {
1996 if (use_nullify_skip(ctx)) {
1997 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
1998 nullify_set(ctx, 0);
1999 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2000 return true;
2001 }
2002 ctx->null_cond.c = TCG_COND_ALWAYS;
2003 }
2004 ctx->iaq_n = &ctx->iaq_j;
2005 ctx->psw_b_next = true;
2006 return true;
2007 }
2008
2009 nullify_over(ctx);
2010
2011 install_link(ctx, link, with_sr0);
2012 if (is_n && use_nullify_skip(ctx)) {
2013 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
2014 nullify_set(ctx, 0);
2015 store_psw_xb(ctx, 0);
2016 } else {
2017 install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
2018 nullify_set(ctx, is_n);
2019 store_psw_xb(ctx, PSW_B);
2020 }
2021
2022 tcg_gen_lookup_and_goto_ptr();
2023 ctx->base.is_jmp = DISAS_NORETURN;
2024 return nullify_end(ctx);
2025 }
2026
2027 /* Implement
2028 * if (IAOQ_Front{30..31} < GR[b]{30..31})
2029 * IAOQ_Next{30..31} ← GR[b]{30..31};
2030 * else
2031 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2032 * which keeps the privilege level from being increased.
2033 */
do_ibranch_priv(DisasContext * ctx,TCGv_i64 offset)2034 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
2035 {
2036 TCGv_i64 dest = tcg_temp_new_i64();
2037 switch (ctx->privilege) {
2038 case 0:
2039 /* Privilege 0 is maximum and is allowed to decrease. */
2040 tcg_gen_mov_i64(dest, offset);
2041 break;
2042 case 3:
2043 /* Privilege 3 is minimum and is never allowed to increase. */
2044 tcg_gen_ori_i64(dest, offset, 3);
2045 break;
2046 default:
2047 tcg_gen_andi_i64(dest, offset, -4);
2048 tcg_gen_ori_i64(dest, dest, ctx->privilege);
2049 tcg_gen_umax_i64(dest, dest, offset);
2050 break;
2051 }
2052 return dest;
2053 }
2054
2055 #ifdef CONFIG_USER_ONLY
2056 /* On Linux, page zero is normally marked execute only + gateway.
2057 Therefore normal read or write is supposed to fail, but specific
2058 offsets have kernel code mapped to raise permissions to implement
2059 system calls. Handling this via an explicit check here, rather
2060 in than the "be disp(sr2,r0)" instruction that probably sent us
2061 here, is the easiest way to handle the branch delay slot on the
2062 aforementioned BE. */
do_page_zero(DisasContext * ctx)2063 static void do_page_zero(DisasContext *ctx)
2064 {
2065 assert(ctx->iaq_f.disp == 0);
2066
2067 /* If by some means we get here with PSW[N]=1, that implies that
2068 the B,GATE instruction would be skipped, and we'd fault on the
2069 next insn within the privileged page. */
2070 switch (ctx->null_cond.c) {
2071 case TCG_COND_NEVER:
2072 break;
2073 case TCG_COND_ALWAYS:
2074 tcg_gen_movi_i64(cpu_psw_n, 0);
2075 goto do_sigill;
2076 default:
2077 /* Since this is always the first (and only) insn within the
2078 TB, we should know the state of PSW[N] from TB->FLAGS. */
2079 g_assert_not_reached();
2080 }
2081
2082 /* If PSW[B] is set, the B,GATE insn would trap. */
2083 if (ctx->psw_xb & PSW_B) {
2084 goto do_sigill;
2085 }
2086
2087 switch (ctx->base.pc_first) {
2088 case 0x00: /* Null pointer call */
2089 gen_excp_1(EXCP_IMP);
2090 ctx->base.is_jmp = DISAS_NORETURN;
2091 break;
2092
2093 case 0xb0: /* LWS */
2094 gen_excp_1(EXCP_SYSCALL_LWS);
2095 ctx->base.is_jmp = DISAS_NORETURN;
2096 break;
2097
2098 case 0xe0: /* SET_THREAD_POINTER */
2099 {
2100 DisasIAQE next = { .base = tcg_temp_new_i64() };
2101
2102 tcg_gen_st_i64(cpu_gr[26], tcg_env,
2103 offsetof(CPUHPPAState, cr[27]));
2104 tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
2105 install_iaq_entries(ctx, &next, NULL);
2106 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2107 }
2108 break;
2109
2110 case 0x100: /* SYSCALL */
2111 gen_excp_1(EXCP_SYSCALL);
2112 ctx->base.is_jmp = DISAS_NORETURN;
2113 break;
2114
2115 default:
2116 do_sigill:
2117 gen_excp_1(EXCP_ILL);
2118 ctx->base.is_jmp = DISAS_NORETURN;
2119 break;
2120 }
2121 }
2122 #endif
2123
trans_nop(DisasContext * ctx,arg_nop * a)2124 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2125 {
2126 ctx->null_cond = cond_make_f();
2127 return true;
2128 }
2129
trans_break(DisasContext * ctx,arg_break * a)2130 static bool trans_break(DisasContext *ctx, arg_break *a)
2131 {
2132 return gen_excp_iir(ctx, EXCP_BREAK);
2133 }
2134
trans_sync(DisasContext * ctx,arg_sync * a)2135 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2136 {
2137 /* No point in nullifying the memory barrier. */
2138 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2139
2140 ctx->null_cond = cond_make_f();
2141 return true;
2142 }
2143
trans_mfia(DisasContext * ctx,arg_mfia * a)2144 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2145 {
2146 TCGv_i64 dest = dest_gpr(ctx, a->t);
2147
2148 copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
2149 tcg_gen_andi_i64(dest, dest, -4);
2150
2151 save_gpr(ctx, a->t, dest);
2152 ctx->null_cond = cond_make_f();
2153 return true;
2154 }
2155
trans_mfsp(DisasContext * ctx,arg_mfsp * a)2156 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2157 {
2158 unsigned rt = a->t;
2159 unsigned rs = a->sp;
2160 TCGv_i64 t0 = tcg_temp_new_i64();
2161
2162 load_spr(ctx, t0, rs);
2163 tcg_gen_shri_i64(t0, t0, 32);
2164
2165 save_gpr(ctx, rt, t0);
2166
2167 ctx->null_cond = cond_make_f();
2168 return true;
2169 }
2170
trans_mfctl(DisasContext * ctx,arg_mfctl * a)2171 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2172 {
2173 unsigned rt = a->t;
2174 unsigned ctl = a->r;
2175 TCGv_i64 tmp;
2176
2177 switch (ctl) {
2178 case CR_SAR:
2179 if (a->e == 0) {
2180 /* MFSAR without ,W masks low 5 bits. */
2181 tmp = dest_gpr(ctx, rt);
2182 tcg_gen_andi_i64(tmp, cpu_sar, 31);
2183 save_gpr(ctx, rt, tmp);
2184 goto done;
2185 }
2186 save_gpr(ctx, rt, cpu_sar);
2187 goto done;
2188 case CR_IT: /* Interval Timer */
2189 /* FIXME: Respect PSW_S bit. */
2190 nullify_over(ctx);
2191 tmp = dest_gpr(ctx, rt);
2192 if (translator_io_start(&ctx->base)) {
2193 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2194 }
2195 gen_helper_read_interval_timer(tmp);
2196 save_gpr(ctx, rt, tmp);
2197 return nullify_end(ctx);
2198 case 26:
2199 case 27:
2200 break;
2201 default:
2202 /* All other control registers are privileged. */
2203 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2204 break;
2205 }
2206
2207 tmp = tcg_temp_new_i64();
2208 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2209 save_gpr(ctx, rt, tmp);
2210
2211 done:
2212 ctx->null_cond = cond_make_f();
2213 return true;
2214 }
2215
trans_mtsp(DisasContext * ctx,arg_mtsp * a)2216 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2217 {
2218 unsigned rr = a->r;
2219 unsigned rs = a->sp;
2220 TCGv_i64 tmp;
2221
2222 if (rs >= 5) {
2223 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2224 }
2225 nullify_over(ctx);
2226
2227 tmp = tcg_temp_new_i64();
2228 tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2229
2230 if (rs >= 4) {
2231 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2232 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2233 } else {
2234 tcg_gen_mov_i64(cpu_sr[rs], tmp);
2235 }
2236
2237 return nullify_end(ctx);
2238 }
2239
trans_mtctl(DisasContext * ctx,arg_mtctl * a)2240 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2241 {
2242 unsigned ctl = a->t;
2243 TCGv_i64 reg;
2244 TCGv_i64 tmp;
2245
2246 if (ctl == CR_SAR) {
2247 reg = load_gpr(ctx, a->r);
2248 tmp = tcg_temp_new_i64();
2249 tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2250 save_or_nullify(ctx, cpu_sar, tmp);
2251
2252 ctx->null_cond = cond_make_f();
2253 return true;
2254 }
2255
2256 /* All other control registers are privileged or read-only. */
2257 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2258
2259 #ifndef CONFIG_USER_ONLY
2260 nullify_over(ctx);
2261
2262 if (ctx->is_pa20) {
2263 reg = load_gpr(ctx, a->r);
2264 } else {
2265 reg = tcg_temp_new_i64();
2266 tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2267 }
2268
2269 switch (ctl) {
2270 case CR_IT:
2271 if (translator_io_start(&ctx->base)) {
2272 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2273 }
2274 gen_helper_write_interval_timer(tcg_env, reg);
2275 break;
2276 case CR_EIRR:
2277 /* Helper modifies interrupt lines and is therefore IO. */
2278 translator_io_start(&ctx->base);
2279 gen_helper_write_eirr(tcg_env, reg);
2280 /* Exit to re-evaluate interrupts in the main loop. */
2281 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2282 break;
2283
2284 case CR_IIASQ:
2285 case CR_IIAOQ:
2286 /* FIXME: Respect PSW_Q bit */
2287 /* The write advances the queue and stores to the back element. */
2288 tmp = tcg_temp_new_i64();
2289 tcg_gen_ld_i64(tmp, tcg_env,
2290 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2291 tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2292 tcg_gen_st_i64(reg, tcg_env,
2293 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2294 break;
2295
2296 case CR_PID1:
2297 case CR_PID2:
2298 case CR_PID3:
2299 case CR_PID4:
2300 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2301 #ifndef CONFIG_USER_ONLY
2302 gen_helper_change_prot_id(tcg_env);
2303 #endif
2304 break;
2305
2306 case CR_EIEM:
2307 /* Exit to re-evaluate interrupts in the main loop. */
2308 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2309 /* FALLTHRU */
2310 default:
2311 tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2312 break;
2313 }
2314 return nullify_end(ctx);
2315 #endif
2316 }
2317
trans_mtsarcm(DisasContext * ctx,arg_mtsarcm * a)2318 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2319 {
2320 TCGv_i64 tmp = tcg_temp_new_i64();
2321
2322 tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2323 tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2324 save_or_nullify(ctx, cpu_sar, tmp);
2325
2326 ctx->null_cond = cond_make_f();
2327 return true;
2328 }
2329
trans_ldsid(DisasContext * ctx,arg_ldsid * a)2330 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2331 {
2332 TCGv_i64 dest = dest_gpr(ctx, a->t);
2333
2334 #ifdef CONFIG_USER_ONLY
2335 /* We don't implement space registers in user mode. */
2336 tcg_gen_movi_i64(dest, 0);
2337 #else
2338 tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2339 tcg_gen_shri_i64(dest, dest, 32);
2340 #endif
2341 save_gpr(ctx, a->t, dest);
2342
2343 ctx->null_cond = cond_make_f();
2344 return true;
2345 }
2346
trans_rsm(DisasContext * ctx,arg_rsm * a)2347 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2348 {
2349 #ifdef CONFIG_USER_ONLY
2350 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2351 #else
2352 TCGv_i64 tmp;
2353
2354 /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2355 if (a->i) {
2356 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2357 }
2358
2359 nullify_over(ctx);
2360
2361 tmp = tcg_temp_new_i64();
2362 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2363 tcg_gen_andi_i64(tmp, tmp, ~a->i);
2364 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2365 save_gpr(ctx, a->t, tmp);
2366
2367 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2368 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2369 return nullify_end(ctx);
2370 #endif
2371 }
2372
trans_ssm(DisasContext * ctx,arg_ssm * a)2373 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2374 {
2375 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2376 #ifndef CONFIG_USER_ONLY
2377 TCGv_i64 tmp;
2378
2379 nullify_over(ctx);
2380
2381 tmp = tcg_temp_new_i64();
2382 tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2383 tcg_gen_ori_i64(tmp, tmp, a->i);
2384 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2385 save_gpr(ctx, a->t, tmp);
2386
2387 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2388 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2389 return nullify_end(ctx);
2390 #endif
2391 }
2392
trans_mtsm(DisasContext * ctx,arg_mtsm * a)2393 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2394 {
2395 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2396 #ifndef CONFIG_USER_ONLY
2397 TCGv_i64 tmp, reg;
2398 nullify_over(ctx);
2399
2400 reg = load_gpr(ctx, a->r);
2401 tmp = tcg_temp_new_i64();
2402 gen_helper_swap_system_mask(tmp, tcg_env, reg);
2403
2404 /* Exit the TB to recognize new interrupts. */
2405 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2406 return nullify_end(ctx);
2407 #endif
2408 }
2409
do_rfi(DisasContext * ctx,bool rfi_r)2410 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2411 {
2412 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2413 #ifndef CONFIG_USER_ONLY
2414 nullify_over(ctx);
2415
2416 if (rfi_r) {
2417 gen_helper_rfi_r(tcg_env);
2418 } else {
2419 gen_helper_rfi(tcg_env);
2420 }
2421 /* Exit the TB to recognize new interrupts. */
2422 tcg_gen_exit_tb(NULL, 0);
2423 ctx->base.is_jmp = DISAS_NORETURN;
2424
2425 return nullify_end(ctx);
2426 #endif
2427 }
2428
trans_rfi(DisasContext * ctx,arg_rfi * a)2429 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2430 {
2431 return do_rfi(ctx, false);
2432 }
2433
trans_rfi_r(DisasContext * ctx,arg_rfi_r * a)2434 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2435 {
2436 return do_rfi(ctx, true);
2437 }
2438
trans_halt(DisasContext * ctx,arg_halt * a)2439 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2440 {
2441 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2442 #ifndef CONFIG_USER_ONLY
2443 set_psw_xb(ctx, 0);
2444 nullify_over(ctx);
2445 gen_helper_halt(tcg_env);
2446 ctx->base.is_jmp = DISAS_NORETURN;
2447 return nullify_end(ctx);
2448 #endif
2449 }
2450
trans_reset(DisasContext * ctx,arg_reset * a)2451 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2452 {
2453 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2454 #ifndef CONFIG_USER_ONLY
2455 set_psw_xb(ctx, 0);
2456 nullify_over(ctx);
2457 gen_helper_reset(tcg_env);
2458 ctx->base.is_jmp = DISAS_NORETURN;
2459 return nullify_end(ctx);
2460 #endif
2461 }
2462
do_getshadowregs(DisasContext * ctx)2463 static bool do_getshadowregs(DisasContext *ctx)
2464 {
2465 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2466 nullify_over(ctx);
2467 tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2468 tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2469 tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2470 tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2471 tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2472 tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2473 tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2474 return nullify_end(ctx);
2475 }
2476
do_putshadowregs(DisasContext * ctx)2477 static bool do_putshadowregs(DisasContext *ctx)
2478 {
2479 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2480 nullify_over(ctx);
2481 tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2482 tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2483 tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2484 tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2485 tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2486 tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2487 tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2488 return nullify_end(ctx);
2489 }
2490
trans_getshadowregs(DisasContext * ctx,arg_getshadowregs * a)2491 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2492 {
2493 return do_getshadowregs(ctx);
2494 }
2495
trans_nop_addrx(DisasContext * ctx,arg_ldst * a)2496 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2497 {
2498 if (a->m) {
2499 TCGv_i64 dest = dest_gpr(ctx, a->b);
2500 TCGv_i64 src1 = load_gpr(ctx, a->b);
2501 TCGv_i64 src2 = load_gpr(ctx, a->x);
2502
2503 /* The only thing we need to do is the base register modification. */
2504 tcg_gen_add_i64(dest, src1, src2);
2505 save_gpr(ctx, a->b, dest);
2506 }
2507 ctx->null_cond = cond_make_f();
2508 return true;
2509 }
2510
trans_fic(DisasContext * ctx,arg_ldst * a)2511 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2512 {
2513 /* End TB for flush instruction cache, so we pick up new insns. */
2514 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2515 return trans_nop_addrx(ctx, a);
2516 }
2517
trans_probe(DisasContext * ctx,arg_probe * a)2518 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2519 {
2520 TCGv_i64 dest, ofs;
2521 TCGv_i32 level, want;
2522 TCGv_i64 addr;
2523
2524 nullify_over(ctx);
2525
2526 dest = dest_gpr(ctx, a->t);
2527 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2528
2529 if (a->imm) {
2530 level = tcg_constant_i32(a->ri & 3);
2531 } else {
2532 level = tcg_temp_new_i32();
2533 tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2534 tcg_gen_andi_i32(level, level, 3);
2535 }
2536 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2537
2538 gen_helper_probe(dest, tcg_env, addr, level, want);
2539
2540 save_gpr(ctx, a->t, dest);
2541 return nullify_end(ctx);
2542 }
2543
trans_ixtlbx(DisasContext * ctx,arg_ixtlbx * a)2544 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2545 {
2546 if (ctx->is_pa20) {
2547 return false;
2548 }
2549 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2550 #ifndef CONFIG_USER_ONLY
2551 TCGv_i64 addr;
2552 TCGv_i64 ofs, reg;
2553
2554 nullify_over(ctx);
2555
2556 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2557 reg = load_gpr(ctx, a->r);
2558 if (a->addr) {
2559 gen_helper_itlba_pa11(tcg_env, addr, reg);
2560 } else {
2561 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2562 }
2563
2564 /* Exit TB for TLB change if mmu is enabled. */
2565 if (ctx->tb_flags & PSW_C) {
2566 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2567 }
2568 return nullify_end(ctx);
2569 #endif
2570 }
2571
do_pxtlb(DisasContext * ctx,arg_ldst * a,bool local)2572 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2573 {
2574 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2575 #ifndef CONFIG_USER_ONLY
2576 TCGv_i64 addr;
2577 TCGv_i64 ofs;
2578
2579 nullify_over(ctx);
2580
2581 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2582
2583 /*
2584 * Page align now, rather than later, so that we can add in the
2585 * page_size field from pa2.0 from the low 4 bits of GR[b].
2586 */
2587 tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2588 if (ctx->is_pa20) {
2589 tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2590 }
2591
2592 if (local) {
2593 gen_helper_ptlb_l(tcg_env, addr);
2594 } else {
2595 gen_helper_ptlb(tcg_env, addr);
2596 }
2597
2598 if (a->m) {
2599 save_gpr(ctx, a->b, ofs);
2600 }
2601
2602 /* Exit TB for TLB change if mmu is enabled. */
2603 if (ctx->tb_flags & PSW_C) {
2604 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2605 }
2606 return nullify_end(ctx);
2607 #endif
2608 }
2609
trans_pxtlb(DisasContext * ctx,arg_ldst * a)2610 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2611 {
2612 return do_pxtlb(ctx, a, false);
2613 }
2614
trans_pxtlb_l(DisasContext * ctx,arg_ldst * a)2615 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2616 {
2617 return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2618 }
2619
trans_pxtlbe(DisasContext * ctx,arg_ldst * a)2620 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2621 {
2622 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2623 #ifndef CONFIG_USER_ONLY
2624 nullify_over(ctx);
2625
2626 trans_nop_addrx(ctx, a);
2627 gen_helper_ptlbe(tcg_env);
2628
2629 /* Exit TB for TLB change if mmu is enabled. */
2630 if (ctx->tb_flags & PSW_C) {
2631 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2632 }
2633 return nullify_end(ctx);
2634 #endif
2635 }
2636
2637 /*
2638 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2639 * See
2640 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2641 * page 13-9 (195/206)
2642 */
trans_ixtlbxf(DisasContext * ctx,arg_ixtlbxf * a)2643 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2644 {
2645 if (ctx->is_pa20) {
2646 return false;
2647 }
2648 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2649 #ifndef CONFIG_USER_ONLY
2650 TCGv_i64 addr, atl, stl;
2651 TCGv_i64 reg;
2652
2653 nullify_over(ctx);
2654
2655 /*
2656 * FIXME:
2657 * if (not (pcxl or pcxl2))
2658 * return gen_illegal(ctx);
2659 */
2660
2661 atl = tcg_temp_new_i64();
2662 stl = tcg_temp_new_i64();
2663 addr = tcg_temp_new_i64();
2664
2665 tcg_gen_ld32u_i64(stl, tcg_env,
2666 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2667 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2668 tcg_gen_ld32u_i64(atl, tcg_env,
2669 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2670 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2671 tcg_gen_shli_i64(stl, stl, 32);
2672 tcg_gen_or_i64(addr, atl, stl);
2673
2674 reg = load_gpr(ctx, a->r);
2675 if (a->addr) {
2676 gen_helper_itlba_pa11(tcg_env, addr, reg);
2677 } else {
2678 gen_helper_itlbp_pa11(tcg_env, addr, reg);
2679 }
2680
2681 /* Exit TB for TLB change if mmu is enabled. */
2682 if (ctx->tb_flags & PSW_C) {
2683 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2684 }
2685 return nullify_end(ctx);
2686 #endif
2687 }
2688
trans_ixtlbt(DisasContext * ctx,arg_ixtlbt * a)2689 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2690 {
2691 if (!ctx->is_pa20) {
2692 return false;
2693 }
2694 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2695 #ifndef CONFIG_USER_ONLY
2696 nullify_over(ctx);
2697 {
2698 TCGv_i64 src1 = load_gpr(ctx, a->r1);
2699 TCGv_i64 src2 = load_gpr(ctx, a->r2);
2700
2701 if (a->data) {
2702 gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2703 } else {
2704 gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2705 }
2706 }
2707 /* Exit TB for TLB change if mmu is enabled. */
2708 if (ctx->tb_flags & PSW_C) {
2709 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2710 }
2711 return nullify_end(ctx);
2712 #endif
2713 }
2714
trans_lpa(DisasContext * ctx,arg_ldst * a)2715 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2716 {
2717 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2718 #ifndef CONFIG_USER_ONLY
2719 TCGv_i64 vaddr;
2720 TCGv_i64 ofs, paddr;
2721
2722 nullify_over(ctx);
2723
2724 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2725
2726 paddr = tcg_temp_new_i64();
2727 gen_helper_lpa(paddr, tcg_env, vaddr);
2728
2729 /* Note that physical address result overrides base modification. */
2730 if (a->m) {
2731 save_gpr(ctx, a->b, ofs);
2732 }
2733 save_gpr(ctx, a->t, paddr);
2734
2735 return nullify_end(ctx);
2736 #endif
2737 }
2738
trans_lci(DisasContext * ctx,arg_lci * a)2739 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2740 {
2741 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2742
2743 /* The Coherence Index is an implementation-defined function of the
2744 physical address. Two addresses with the same CI have a coherent
2745 view of the cache. Our implementation is to return 0 for all,
2746 since the entire address space is coherent. */
2747 save_gpr(ctx, a->t, ctx->zero);
2748
2749 ctx->null_cond = cond_make_f();
2750 return true;
2751 }
2752
trans_add(DisasContext * ctx,arg_rrr_cf_d_sh * a)2753 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2754 {
2755 return do_add_reg(ctx, a, false, false, false, false);
2756 }
2757
trans_add_l(DisasContext * ctx,arg_rrr_cf_d_sh * a)2758 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2759 {
2760 return do_add_reg(ctx, a, true, false, false, false);
2761 }
2762
trans_add_tsv(DisasContext * ctx,arg_rrr_cf_d_sh * a)2763 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2764 {
2765 return do_add_reg(ctx, a, false, true, false, false);
2766 }
2767
trans_add_c(DisasContext * ctx,arg_rrr_cf_d_sh * a)2768 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2769 {
2770 return do_add_reg(ctx, a, false, false, false, true);
2771 }
2772
trans_add_c_tsv(DisasContext * ctx,arg_rrr_cf_d_sh * a)2773 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2774 {
2775 return do_add_reg(ctx, a, false, true, false, true);
2776 }
2777
trans_sub(DisasContext * ctx,arg_rrr_cf_d * a)2778 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2779 {
2780 return do_sub_reg(ctx, a, false, false, false);
2781 }
2782
trans_sub_tsv(DisasContext * ctx,arg_rrr_cf_d * a)2783 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2784 {
2785 return do_sub_reg(ctx, a, true, false, false);
2786 }
2787
trans_sub_tc(DisasContext * ctx,arg_rrr_cf_d * a)2788 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2789 {
2790 return do_sub_reg(ctx, a, false, false, true);
2791 }
2792
trans_sub_tsv_tc(DisasContext * ctx,arg_rrr_cf_d * a)2793 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2794 {
2795 return do_sub_reg(ctx, a, true, false, true);
2796 }
2797
trans_sub_b(DisasContext * ctx,arg_rrr_cf_d * a)2798 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2799 {
2800 return do_sub_reg(ctx, a, false, true, false);
2801 }
2802
trans_sub_b_tsv(DisasContext * ctx,arg_rrr_cf_d * a)2803 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2804 {
2805 return do_sub_reg(ctx, a, true, true, false);
2806 }
2807
trans_andcm(DisasContext * ctx,arg_rrr_cf_d * a)2808 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2809 {
2810 return do_log_reg(ctx, a, tcg_gen_andc_i64);
2811 }
2812
trans_and(DisasContext * ctx,arg_rrr_cf_d * a)2813 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2814 {
2815 return do_log_reg(ctx, a, tcg_gen_and_i64);
2816 }
2817
trans_or(DisasContext * ctx,arg_rrr_cf_d * a)2818 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2819 {
2820 if (a->cf == 0) {
2821 unsigned r2 = a->r2;
2822 unsigned r1 = a->r1;
2823 unsigned rt = a->t;
2824
2825 if (rt == 0) { /* NOP */
2826 ctx->null_cond = cond_make_f();
2827 return true;
2828 }
2829 if (r2 == 0) { /* COPY */
2830 if (r1 == 0) {
2831 TCGv_i64 dest = dest_gpr(ctx, rt);
2832 tcg_gen_movi_i64(dest, 0);
2833 save_gpr(ctx, rt, dest);
2834 } else {
2835 save_gpr(ctx, rt, cpu_gr[r1]);
2836 }
2837 ctx->null_cond = cond_make_f();
2838 return true;
2839 }
2840 #ifndef CONFIG_USER_ONLY
2841 /* These are QEMU extensions and are nops in the real architecture:
2842 *
2843 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2844 * or %r31,%r31,%r31 -- death loop; offline cpu
2845 * currently implemented as idle.
2846 */
2847 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2848 /* No need to check for supervisor, as userland can only pause
2849 until the next timer interrupt. */
2850
2851 set_psw_xb(ctx, 0);
2852
2853 nullify_over(ctx);
2854
2855 /* Advance the instruction queue. */
2856 install_iaq_entries(ctx, &ctx->iaq_b, NULL);
2857 nullify_set(ctx, 0);
2858
2859 /* Tell the qemu main loop to halt until this cpu has work. */
2860 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2861 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2862 gen_excp_1(EXCP_HALTED);
2863 ctx->base.is_jmp = DISAS_NORETURN;
2864
2865 return nullify_end(ctx);
2866 }
2867 #endif
2868 }
2869 return do_log_reg(ctx, a, tcg_gen_or_i64);
2870 }
2871
trans_xor(DisasContext * ctx,arg_rrr_cf_d * a)2872 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2873 {
2874 return do_log_reg(ctx, a, tcg_gen_xor_i64);
2875 }
2876
trans_cmpclr(DisasContext * ctx,arg_rrr_cf_d * a)2877 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2878 {
2879 TCGv_i64 tcg_r1, tcg_r2;
2880
2881 if (a->cf) {
2882 nullify_over(ctx);
2883 }
2884 tcg_r1 = load_gpr(ctx, a->r1);
2885 tcg_r2 = load_gpr(ctx, a->r2);
2886 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2887 return nullify_end(ctx);
2888 }
2889
trans_uxor(DisasContext * ctx,arg_rrr_cf_d * a)2890 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2891 {
2892 TCGv_i64 tcg_r1, tcg_r2, dest;
2893
2894 if (a->cf) {
2895 nullify_over(ctx);
2896 }
2897
2898 tcg_r1 = load_gpr(ctx, a->r1);
2899 tcg_r2 = load_gpr(ctx, a->r2);
2900 dest = dest_gpr(ctx, a->t);
2901
2902 tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2903 save_gpr(ctx, a->t, dest);
2904
2905 ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2906 return nullify_end(ctx);
2907 }
2908
do_uaddcm(DisasContext * ctx,arg_rrr_cf_d * a,bool is_tc)2909 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2910 {
2911 TCGv_i64 tcg_r1, tcg_r2, tmp;
2912
2913 if (a->cf == 0) {
2914 tcg_r2 = load_gpr(ctx, a->r2);
2915 tmp = dest_gpr(ctx, a->t);
2916
2917 if (a->r1 == 0) {
2918 /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2919 tcg_gen_not_i64(tmp, tcg_r2);
2920 } else {
2921 /*
2922 * Recall that r1 - r2 == r1 + ~r2 + 1.
2923 * Thus r1 + ~r2 == r1 - r2 - 1,
2924 * which does not require an extra temporary.
2925 */
2926 tcg_r1 = load_gpr(ctx, a->r1);
2927 tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2928 tcg_gen_subi_i64(tmp, tmp, 1);
2929 }
2930 save_gpr(ctx, a->t, tmp);
2931 ctx->null_cond = cond_make_f();
2932 return true;
2933 }
2934
2935 nullify_over(ctx);
2936 tcg_r1 = load_gpr(ctx, a->r1);
2937 tcg_r2 = load_gpr(ctx, a->r2);
2938 tmp = tcg_temp_new_i64();
2939 tcg_gen_not_i64(tmp, tcg_r2);
2940 do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2941 return nullify_end(ctx);
2942 }
2943
trans_uaddcm(DisasContext * ctx,arg_rrr_cf_d * a)2944 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2945 {
2946 return do_uaddcm(ctx, a, false);
2947 }
2948
trans_uaddcm_tc(DisasContext * ctx,arg_rrr_cf_d * a)2949 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2950 {
2951 return do_uaddcm(ctx, a, true);
2952 }
2953
do_dcor(DisasContext * ctx,arg_rr_cf_d * a,bool is_i)2954 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2955 {
2956 TCGv_i64 tmp;
2957
2958 nullify_over(ctx);
2959
2960 tmp = tcg_temp_new_i64();
2961 tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2962 if (!is_i) {
2963 tcg_gen_not_i64(tmp, tmp);
2964 }
2965 tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2966 tcg_gen_muli_i64(tmp, tmp, 6);
2967 do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2968 a->cf, a->d, false, is_i);
2969 return nullify_end(ctx);
2970 }
2971
trans_dcor(DisasContext * ctx,arg_rr_cf_d * a)2972 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2973 {
2974 return do_dcor(ctx, a, false);
2975 }
2976
trans_dcor_i(DisasContext * ctx,arg_rr_cf_d * a)2977 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2978 {
2979 return do_dcor(ctx, a, true);
2980 }
2981
trans_ds(DisasContext * ctx,arg_rrr_cf * a)2982 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2983 {
2984 TCGv_i64 dest, add1, add2, addc, in1, in2;
2985
2986 nullify_over(ctx);
2987
2988 in1 = load_gpr(ctx, a->r1);
2989 in2 = load_gpr(ctx, a->r2);
2990
2991 add1 = tcg_temp_new_i64();
2992 add2 = tcg_temp_new_i64();
2993 addc = tcg_temp_new_i64();
2994 dest = tcg_temp_new_i64();
2995
2996 /* Form R1 << 1 | PSW[CB]{8}. */
2997 tcg_gen_add_i64(add1, in1, in1);
2998 tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2999
3000 /*
3001 * Add or subtract R2, depending on PSW[V]. Proper computation of
3002 * carry requires that we subtract via + ~R2 + 1, as described in
3003 * the manual. By extracting and masking V, we can produce the
3004 * proper inputs to the addition without movcond.
3005 */
3006 tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
3007 tcg_gen_xor_i64(add2, in2, addc);
3008 tcg_gen_andi_i64(addc, addc, 1);
3009
3010 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, add1, ctx->zero, add2, ctx->zero);
3011 tcg_gen_add2_i64(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb,
3012 addc, ctx->zero);
3013
3014 /* Write back the result register. */
3015 save_gpr(ctx, a->t, dest);
3016
3017 /* Write back PSW[CB]. */
3018 tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
3019 tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
3020
3021 /*
3022 * Write back PSW[V] for the division step.
3023 * Shift cb{8} from where it lives in bit 32 to bit 31,
3024 * so that it overlaps r2{32} in bit 31.
3025 */
3026 tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
3027 tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
3028
3029 /* Install the new nullification. */
3030 if (a->cf) {
3031 TCGv_i64 sv = NULL, uv = NULL;
3032 if (cond_need_sv(a->cf >> 1)) {
3033 sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
3034 } else if (cond_need_cb(a->cf >> 1)) {
3035 uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
3036 }
3037 ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
3038 }
3039
3040 return nullify_end(ctx);
3041 }
3042
trans_addi(DisasContext * ctx,arg_rri_cf * a)3043 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
3044 {
3045 return do_add_imm(ctx, a, false, false);
3046 }
3047
trans_addi_tsv(DisasContext * ctx,arg_rri_cf * a)3048 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
3049 {
3050 return do_add_imm(ctx, a, true, false);
3051 }
3052
trans_addi_tc(DisasContext * ctx,arg_rri_cf * a)3053 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
3054 {
3055 return do_add_imm(ctx, a, false, true);
3056 }
3057
trans_addi_tc_tsv(DisasContext * ctx,arg_rri_cf * a)3058 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
3059 {
3060 return do_add_imm(ctx, a, true, true);
3061 }
3062
trans_subi(DisasContext * ctx,arg_rri_cf * a)3063 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
3064 {
3065 return do_sub_imm(ctx, a, false);
3066 }
3067
trans_subi_tsv(DisasContext * ctx,arg_rri_cf * a)3068 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
3069 {
3070 return do_sub_imm(ctx, a, true);
3071 }
3072
trans_cmpiclr(DisasContext * ctx,arg_rri_cf_d * a)3073 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3074 {
3075 TCGv_i64 tcg_im, tcg_r2;
3076
3077 if (a->cf) {
3078 nullify_over(ctx);
3079 }
3080
3081 tcg_im = tcg_constant_i64(a->i);
3082 tcg_r2 = load_gpr(ctx, a->r);
3083 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3084
3085 return nullify_end(ctx);
3086 }
3087
do_multimedia(DisasContext * ctx,arg_rrr * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))3088 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3089 void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3090 {
3091 TCGv_i64 r1, r2, dest;
3092
3093 if (!ctx->is_pa20) {
3094 return false;
3095 }
3096
3097 nullify_over(ctx);
3098
3099 r1 = load_gpr(ctx, a->r1);
3100 r2 = load_gpr(ctx, a->r2);
3101 dest = dest_gpr(ctx, a->t);
3102
3103 fn(dest, r1, r2);
3104 save_gpr(ctx, a->t, dest);
3105
3106 return nullify_end(ctx);
3107 }
3108
do_multimedia_sh(DisasContext * ctx,arg_rri * a,void (* fn)(TCGv_i64,TCGv_i64,int64_t))3109 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3110 void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3111 {
3112 TCGv_i64 r, dest;
3113
3114 if (!ctx->is_pa20) {
3115 return false;
3116 }
3117
3118 nullify_over(ctx);
3119
3120 r = load_gpr(ctx, a->r);
3121 dest = dest_gpr(ctx, a->t);
3122
3123 fn(dest, r, a->i);
3124 save_gpr(ctx, a->t, dest);
3125
3126 return nullify_end(ctx);
3127 }
3128
do_multimedia_shadd(DisasContext * ctx,arg_rrr_sh * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i32))3129 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3130 void (*fn)(TCGv_i64, TCGv_i64,
3131 TCGv_i64, TCGv_i32))
3132 {
3133 TCGv_i64 r1, r2, dest;
3134
3135 if (!ctx->is_pa20) {
3136 return false;
3137 }
3138
3139 nullify_over(ctx);
3140
3141 r1 = load_gpr(ctx, a->r1);
3142 r2 = load_gpr(ctx, a->r2);
3143 dest = dest_gpr(ctx, a->t);
3144
3145 fn(dest, r1, r2, tcg_constant_i32(a->sh));
3146 save_gpr(ctx, a->t, dest);
3147
3148 return nullify_end(ctx);
3149 }
3150
trans_hadd(DisasContext * ctx,arg_rrr * a)3151 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3152 {
3153 return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3154 }
3155
trans_hadd_ss(DisasContext * ctx,arg_rrr * a)3156 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3157 {
3158 return do_multimedia(ctx, a, gen_helper_hadd_ss);
3159 }
3160
trans_hadd_us(DisasContext * ctx,arg_rrr * a)3161 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3162 {
3163 return do_multimedia(ctx, a, gen_helper_hadd_us);
3164 }
3165
trans_havg(DisasContext * ctx,arg_rrr * a)3166 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3167 {
3168 return do_multimedia(ctx, a, gen_helper_havg);
3169 }
3170
trans_hshl(DisasContext * ctx,arg_rri * a)3171 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3172 {
3173 return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3174 }
3175
trans_hshr_s(DisasContext * ctx,arg_rri * a)3176 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3177 {
3178 return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3179 }
3180
trans_hshr_u(DisasContext * ctx,arg_rri * a)3181 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3182 {
3183 return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3184 }
3185
trans_hshladd(DisasContext * ctx,arg_rrr_sh * a)3186 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3187 {
3188 return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3189 }
3190
trans_hshradd(DisasContext * ctx,arg_rrr_sh * a)3191 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3192 {
3193 return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3194 }
3195
trans_hsub(DisasContext * ctx,arg_rrr * a)3196 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3197 {
3198 return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3199 }
3200
trans_hsub_ss(DisasContext * ctx,arg_rrr * a)3201 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3202 {
3203 return do_multimedia(ctx, a, gen_helper_hsub_ss);
3204 }
3205
trans_hsub_us(DisasContext * ctx,arg_rrr * a)3206 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3207 {
3208 return do_multimedia(ctx, a, gen_helper_hsub_us);
3209 }
3210
gen_mixh_l(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3211 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3212 {
3213 uint64_t mask = 0xffff0000ffff0000ull;
3214 TCGv_i64 tmp = tcg_temp_new_i64();
3215
3216 tcg_gen_andi_i64(tmp, r2, mask);
3217 tcg_gen_andi_i64(dst, r1, mask);
3218 tcg_gen_shri_i64(tmp, tmp, 16);
3219 tcg_gen_or_i64(dst, dst, tmp);
3220 }
3221
trans_mixh_l(DisasContext * ctx,arg_rrr * a)3222 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3223 {
3224 return do_multimedia(ctx, a, gen_mixh_l);
3225 }
3226
gen_mixh_r(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3227 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3228 {
3229 uint64_t mask = 0x0000ffff0000ffffull;
3230 TCGv_i64 tmp = tcg_temp_new_i64();
3231
3232 tcg_gen_andi_i64(tmp, r1, mask);
3233 tcg_gen_andi_i64(dst, r2, mask);
3234 tcg_gen_shli_i64(tmp, tmp, 16);
3235 tcg_gen_or_i64(dst, dst, tmp);
3236 }
3237
trans_mixh_r(DisasContext * ctx,arg_rrr * a)3238 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3239 {
3240 return do_multimedia(ctx, a, gen_mixh_r);
3241 }
3242
gen_mixw_l(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3243 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3244 {
3245 TCGv_i64 tmp = tcg_temp_new_i64();
3246
3247 tcg_gen_shri_i64(tmp, r2, 32);
3248 tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3249 }
3250
trans_mixw_l(DisasContext * ctx,arg_rrr * a)3251 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3252 {
3253 return do_multimedia(ctx, a, gen_mixw_l);
3254 }
3255
gen_mixw_r(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3256 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3257 {
3258 tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3259 }
3260
trans_mixw_r(DisasContext * ctx,arg_rrr * a)3261 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3262 {
3263 return do_multimedia(ctx, a, gen_mixw_r);
3264 }
3265
trans_permh(DisasContext * ctx,arg_permh * a)3266 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3267 {
3268 TCGv_i64 r, t0, t1, t2, t3;
3269
3270 if (!ctx->is_pa20) {
3271 return false;
3272 }
3273
3274 nullify_over(ctx);
3275
3276 r = load_gpr(ctx, a->r1);
3277 t0 = tcg_temp_new_i64();
3278 t1 = tcg_temp_new_i64();
3279 t2 = tcg_temp_new_i64();
3280 t3 = tcg_temp_new_i64();
3281
3282 tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3283 tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3284 tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3285 tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3286
3287 tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3288 tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3289 tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3290
3291 save_gpr(ctx, a->t, t0);
3292 return nullify_end(ctx);
3293 }
3294
trans_ld(DisasContext * ctx,arg_ldst * a)3295 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3296 {
3297 if (ctx->is_pa20) {
3298 /*
3299 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3300 * Any base modification still occurs.
3301 */
3302 if (a->t == 0) {
3303 return trans_nop_addrx(ctx, a);
3304 }
3305 } else if (a->size > MO_32) {
3306 return gen_illegal(ctx);
3307 }
3308 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3309 a->disp, a->sp, a->m, a->size | MO_TE);
3310 }
3311
trans_st(DisasContext * ctx,arg_ldst * a)3312 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3313 {
3314 assert(a->x == 0 && a->scale == 0);
3315 if (!ctx->is_pa20 && a->size > MO_32) {
3316 return gen_illegal(ctx);
3317 }
3318 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3319 }
3320
trans_ldc(DisasContext * ctx,arg_ldst * a)3321 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3322 {
3323 MemOp mop = MO_TE | MO_ALIGN | a->size;
3324 TCGv_i64 dest, ofs;
3325 TCGv_i64 addr;
3326
3327 if (!ctx->is_pa20 && a->size > MO_32) {
3328 return gen_illegal(ctx);
3329 }
3330
3331 nullify_over(ctx);
3332
3333 if (a->m) {
3334 /* Base register modification. Make sure if RT == RB,
3335 we see the result of the load. */
3336 dest = tcg_temp_new_i64();
3337 } else {
3338 dest = dest_gpr(ctx, a->t);
3339 }
3340
3341 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3342 a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3343
3344 /*
3345 * For hppa1.1, LDCW is undefined unless aligned mod 16.
3346 * However actual hardware succeeds with aligned mod 4.
3347 * Detect this case and log a GUEST_ERROR.
3348 *
3349 * TODO: HPPA64 relaxes the over-alignment requirement
3350 * with the ,co completer.
3351 */
3352 gen_helper_ldc_check(addr);
3353
3354 tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3355
3356 if (a->m) {
3357 save_gpr(ctx, a->b, ofs);
3358 }
3359 save_gpr(ctx, a->t, dest);
3360
3361 return nullify_end(ctx);
3362 }
3363
trans_stby(DisasContext * ctx,arg_stby * a)3364 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3365 {
3366 TCGv_i64 ofs, val;
3367 TCGv_i64 addr;
3368
3369 nullify_over(ctx);
3370
3371 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3372 MMU_DISABLED(ctx));
3373 val = load_gpr(ctx, a->r);
3374 if (a->a) {
3375 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3376 gen_helper_stby_e_parallel(tcg_env, addr, val);
3377 } else {
3378 gen_helper_stby_e(tcg_env, addr, val);
3379 }
3380 } else {
3381 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3382 gen_helper_stby_b_parallel(tcg_env, addr, val);
3383 } else {
3384 gen_helper_stby_b(tcg_env, addr, val);
3385 }
3386 }
3387 if (a->m) {
3388 tcg_gen_andi_i64(ofs, ofs, ~3);
3389 save_gpr(ctx, a->b, ofs);
3390 }
3391
3392 return nullify_end(ctx);
3393 }
3394
trans_stdby(DisasContext * ctx,arg_stby * a)3395 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3396 {
3397 TCGv_i64 ofs, val;
3398 TCGv_i64 addr;
3399
3400 if (!ctx->is_pa20) {
3401 return false;
3402 }
3403 nullify_over(ctx);
3404
3405 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3406 MMU_DISABLED(ctx));
3407 val = load_gpr(ctx, a->r);
3408 if (a->a) {
3409 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3410 gen_helper_stdby_e_parallel(tcg_env, addr, val);
3411 } else {
3412 gen_helper_stdby_e(tcg_env, addr, val);
3413 }
3414 } else {
3415 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3416 gen_helper_stdby_b_parallel(tcg_env, addr, val);
3417 } else {
3418 gen_helper_stdby_b(tcg_env, addr, val);
3419 }
3420 }
3421 if (a->m) {
3422 tcg_gen_andi_i64(ofs, ofs, ~7);
3423 save_gpr(ctx, a->b, ofs);
3424 }
3425
3426 return nullify_end(ctx);
3427 }
3428
trans_lda(DisasContext * ctx,arg_ldst * a)3429 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3430 {
3431 int hold_mmu_idx = ctx->mmu_idx;
3432
3433 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3434 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3435 trans_ld(ctx, a);
3436 ctx->mmu_idx = hold_mmu_idx;
3437 return true;
3438 }
3439
trans_sta(DisasContext * ctx,arg_ldst * a)3440 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3441 {
3442 int hold_mmu_idx = ctx->mmu_idx;
3443
3444 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3445 ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3446 trans_st(ctx, a);
3447 ctx->mmu_idx = hold_mmu_idx;
3448 return true;
3449 }
3450
trans_ldil(DisasContext * ctx,arg_ldil * a)3451 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3452 {
3453 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3454
3455 tcg_gen_movi_i64(tcg_rt, a->i);
3456 save_gpr(ctx, a->t, tcg_rt);
3457 ctx->null_cond = cond_make_f();
3458 return true;
3459 }
3460
trans_addil(DisasContext * ctx,arg_addil * a)3461 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3462 {
3463 TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3464 TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3465
3466 tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3467 save_gpr(ctx, 1, tcg_r1);
3468 ctx->null_cond = cond_make_f();
3469 return true;
3470 }
3471
trans_ldo(DisasContext * ctx,arg_ldo * a)3472 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3473 {
3474 TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3475
3476 /* Special case rb == 0, for the LDI pseudo-op.
3477 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3478 if (a->b == 0) {
3479 tcg_gen_movi_i64(tcg_rt, a->i);
3480 } else {
3481 tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3482 }
3483 save_gpr(ctx, a->t, tcg_rt);
3484 ctx->null_cond = cond_make_f();
3485 return true;
3486 }
3487
do_cmpb(DisasContext * ctx,unsigned r,TCGv_i64 in1,unsigned c,unsigned f,bool d,unsigned n,int disp)3488 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3489 unsigned c, unsigned f, bool d, unsigned n, int disp)
3490 {
3491 TCGv_i64 dest, in2, sv;
3492 DisasCond cond;
3493
3494 in2 = load_gpr(ctx, r);
3495 dest = tcg_temp_new_i64();
3496
3497 tcg_gen_sub_i64(dest, in1, in2);
3498
3499 sv = NULL;
3500 if (cond_need_sv(c)) {
3501 sv = do_sub_sv(ctx, dest, in1, in2);
3502 }
3503
3504 cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3505 return do_cbranch(ctx, disp, n, &cond);
3506 }
3507
trans_cmpb(DisasContext * ctx,arg_cmpb * a)3508 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3509 {
3510 if (!ctx->is_pa20 && a->d) {
3511 return false;
3512 }
3513 nullify_over(ctx);
3514 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3515 a->c, a->f, a->d, a->n, a->disp);
3516 }
3517
trans_cmpbi(DisasContext * ctx,arg_cmpbi * a)3518 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3519 {
3520 if (!ctx->is_pa20 && a->d) {
3521 return false;
3522 }
3523 nullify_over(ctx);
3524 return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3525 a->c, a->f, a->d, a->n, a->disp);
3526 }
3527
do_addb(DisasContext * ctx,unsigned r,TCGv_i64 in1,unsigned c,unsigned f,unsigned n,int disp)3528 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3529 unsigned c, unsigned f, unsigned n, int disp)
3530 {
3531 TCGv_i64 dest, in2, sv, cb_cond;
3532 DisasCond cond;
3533 bool d = false;
3534
3535 /*
3536 * For hppa64, the ADDB conditions change with PSW.W,
3537 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3538 */
3539 if (ctx->tb_flags & PSW_W) {
3540 d = c >= 5;
3541 if (d) {
3542 c &= 3;
3543 }
3544 }
3545
3546 in2 = load_gpr(ctx, r);
3547 dest = tcg_temp_new_i64();
3548 sv = NULL;
3549 cb_cond = NULL;
3550
3551 if (cond_need_cb(c)) {
3552 TCGv_i64 cb = tcg_temp_new_i64();
3553 TCGv_i64 cb_msb = tcg_temp_new_i64();
3554
3555 tcg_gen_movi_i64(cb_msb, 0);
3556 tcg_gen_add2_i64(dest, cb_msb, in1, cb_msb, in2, cb_msb);
3557 tcg_gen_xor_i64(cb, in1, in2);
3558 tcg_gen_xor_i64(cb, cb, dest);
3559 cb_cond = get_carry(ctx, d, cb, cb_msb);
3560 } else {
3561 tcg_gen_add_i64(dest, in1, in2);
3562 }
3563 if (cond_need_sv(c)) {
3564 sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3565 }
3566
3567 cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3568 save_gpr(ctx, r, dest);
3569 return do_cbranch(ctx, disp, n, &cond);
3570 }
3571
trans_addb(DisasContext * ctx,arg_addb * a)3572 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3573 {
3574 nullify_over(ctx);
3575 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3576 }
3577
trans_addbi(DisasContext * ctx,arg_addbi * a)3578 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3579 {
3580 nullify_over(ctx);
3581 return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3582 }
3583
trans_bb_sar(DisasContext * ctx,arg_bb_sar * a)3584 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3585 {
3586 TCGv_i64 tmp, tcg_r;
3587 DisasCond cond;
3588
3589 nullify_over(ctx);
3590
3591 tmp = tcg_temp_new_i64();
3592 tcg_r = load_gpr(ctx, a->r);
3593 if (a->d) {
3594 tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3595 } else {
3596 /* Force shift into [32,63] */
3597 tcg_gen_ori_i64(tmp, cpu_sar, 32);
3598 tcg_gen_shl_i64(tmp, tcg_r, tmp);
3599 }
3600
3601 cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
3602 return do_cbranch(ctx, a->disp, a->n, &cond);
3603 }
3604
trans_bb_imm(DisasContext * ctx,arg_bb_imm * a)3605 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3606 {
3607 DisasCond cond;
3608 int p = a->p | (a->d ? 0 : 32);
3609
3610 nullify_over(ctx);
3611 cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
3612 load_gpr(ctx, a->r), 1ull << (63 - p));
3613 return do_cbranch(ctx, a->disp, a->n, &cond);
3614 }
3615
trans_movb(DisasContext * ctx,arg_movb * a)3616 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3617 {
3618 TCGv_i64 dest;
3619 DisasCond cond;
3620
3621 nullify_over(ctx);
3622
3623 dest = dest_gpr(ctx, a->r2);
3624 if (a->r1 == 0) {
3625 tcg_gen_movi_i64(dest, 0);
3626 } else {
3627 tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3628 }
3629
3630 /* All MOVB conditions are 32-bit. */
3631 cond = do_sed_cond(ctx, a->c, false, dest);
3632 return do_cbranch(ctx, a->disp, a->n, &cond);
3633 }
3634
trans_movbi(DisasContext * ctx,arg_movbi * a)3635 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3636 {
3637 TCGv_i64 dest;
3638 DisasCond cond;
3639
3640 nullify_over(ctx);
3641
3642 dest = dest_gpr(ctx, a->r);
3643 tcg_gen_movi_i64(dest, a->i);
3644
3645 /* All MOVBI conditions are 32-bit. */
3646 cond = do_sed_cond(ctx, a->c, false, dest);
3647 return do_cbranch(ctx, a->disp, a->n, &cond);
3648 }
3649
trans_shrp_sar(DisasContext * ctx,arg_shrp_sar * a)3650 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3651 {
3652 TCGv_i64 dest, src2;
3653
3654 if (!ctx->is_pa20 && a->d) {
3655 return false;
3656 }
3657 if (a->c) {
3658 nullify_over(ctx);
3659 }
3660
3661 dest = dest_gpr(ctx, a->t);
3662 src2 = load_gpr(ctx, a->r2);
3663 if (a->r1 == 0) {
3664 if (a->d) {
3665 tcg_gen_shr_i64(dest, src2, cpu_sar);
3666 } else {
3667 TCGv_i64 tmp = tcg_temp_new_i64();
3668
3669 tcg_gen_ext32u_i64(dest, src2);
3670 tcg_gen_andi_i64(tmp, cpu_sar, 31);
3671 tcg_gen_shr_i64(dest, dest, tmp);
3672 }
3673 } else if (a->r1 == a->r2) {
3674 if (a->d) {
3675 tcg_gen_rotr_i64(dest, src2, cpu_sar);
3676 } else {
3677 TCGv_i32 t32 = tcg_temp_new_i32();
3678 TCGv_i32 s32 = tcg_temp_new_i32();
3679
3680 tcg_gen_extrl_i64_i32(t32, src2);
3681 tcg_gen_extrl_i64_i32(s32, cpu_sar);
3682 tcg_gen_andi_i32(s32, s32, 31);
3683 tcg_gen_rotr_i32(t32, t32, s32);
3684 tcg_gen_extu_i32_i64(dest, t32);
3685 }
3686 } else {
3687 TCGv_i64 src1 = load_gpr(ctx, a->r1);
3688
3689 if (a->d) {
3690 TCGv_i64 t = tcg_temp_new_i64();
3691 TCGv_i64 n = tcg_temp_new_i64();
3692
3693 tcg_gen_xori_i64(n, cpu_sar, 63);
3694 tcg_gen_shl_i64(t, src1, n);
3695 tcg_gen_shli_i64(t, t, 1);
3696 tcg_gen_shr_i64(dest, src2, cpu_sar);
3697 tcg_gen_or_i64(dest, dest, t);
3698 } else {
3699 TCGv_i64 t = tcg_temp_new_i64();
3700 TCGv_i64 s = tcg_temp_new_i64();
3701
3702 tcg_gen_concat32_i64(t, src2, src1);
3703 tcg_gen_andi_i64(s, cpu_sar, 31);
3704 tcg_gen_shr_i64(dest, t, s);
3705 }
3706 }
3707 save_gpr(ctx, a->t, dest);
3708
3709 /* Install the new nullification. */
3710 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3711 return nullify_end(ctx);
3712 }
3713
trans_shrp_imm(DisasContext * ctx,arg_shrp_imm * a)3714 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3715 {
3716 unsigned width, sa;
3717 TCGv_i64 dest, t2;
3718
3719 if (!ctx->is_pa20 && a->d) {
3720 return false;
3721 }
3722 if (a->c) {
3723 nullify_over(ctx);
3724 }
3725
3726 width = a->d ? 64 : 32;
3727 sa = width - 1 - a->cpos;
3728
3729 dest = dest_gpr(ctx, a->t);
3730 t2 = load_gpr(ctx, a->r2);
3731 if (a->r1 == 0) {
3732 tcg_gen_extract_i64(dest, t2, sa, width - sa);
3733 } else if (width == TARGET_LONG_BITS) {
3734 tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3735 } else {
3736 assert(!a->d);
3737 if (a->r1 == a->r2) {
3738 TCGv_i32 t32 = tcg_temp_new_i32();
3739 tcg_gen_extrl_i64_i32(t32, t2);
3740 tcg_gen_rotri_i32(t32, t32, sa);
3741 tcg_gen_extu_i32_i64(dest, t32);
3742 } else {
3743 tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3744 tcg_gen_extract_i64(dest, dest, sa, 32);
3745 }
3746 }
3747 save_gpr(ctx, a->t, dest);
3748
3749 /* Install the new nullification. */
3750 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3751 return nullify_end(ctx);
3752 }
3753
trans_extr_sar(DisasContext * ctx,arg_extr_sar * a)3754 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3755 {
3756 unsigned widthm1 = a->d ? 63 : 31;
3757 TCGv_i64 dest, src, tmp;
3758
3759 if (!ctx->is_pa20 && a->d) {
3760 return false;
3761 }
3762 if (a->c) {
3763 nullify_over(ctx);
3764 }
3765
3766 dest = dest_gpr(ctx, a->t);
3767 src = load_gpr(ctx, a->r);
3768 tmp = tcg_temp_new_i64();
3769
3770 /* Recall that SAR is using big-endian bit numbering. */
3771 tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3772 tcg_gen_xori_i64(tmp, tmp, widthm1);
3773
3774 if (a->se) {
3775 if (!a->d) {
3776 tcg_gen_ext32s_i64(dest, src);
3777 src = dest;
3778 }
3779 tcg_gen_sar_i64(dest, src, tmp);
3780 tcg_gen_sextract_i64(dest, dest, 0, a->len);
3781 } else {
3782 if (!a->d) {
3783 tcg_gen_ext32u_i64(dest, src);
3784 src = dest;
3785 }
3786 tcg_gen_shr_i64(dest, src, tmp);
3787 tcg_gen_extract_i64(dest, dest, 0, a->len);
3788 }
3789 save_gpr(ctx, a->t, dest);
3790
3791 /* Install the new nullification. */
3792 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3793 return nullify_end(ctx);
3794 }
3795
trans_extr_imm(DisasContext * ctx,arg_extr_imm * a)3796 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3797 {
3798 unsigned len, cpos, width;
3799 TCGv_i64 dest, src;
3800
3801 if (!ctx->is_pa20 && a->d) {
3802 return false;
3803 }
3804 if (a->c) {
3805 nullify_over(ctx);
3806 }
3807
3808 len = a->len;
3809 width = a->d ? 64 : 32;
3810 cpos = width - 1 - a->pos;
3811 if (cpos + len > width) {
3812 len = width - cpos;
3813 }
3814
3815 dest = dest_gpr(ctx, a->t);
3816 src = load_gpr(ctx, a->r);
3817 if (a->se) {
3818 tcg_gen_sextract_i64(dest, src, cpos, len);
3819 } else {
3820 tcg_gen_extract_i64(dest, src, cpos, len);
3821 }
3822 save_gpr(ctx, a->t, dest);
3823
3824 /* Install the new nullification. */
3825 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3826 return nullify_end(ctx);
3827 }
3828
trans_depi_imm(DisasContext * ctx,arg_depi_imm * a)3829 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3830 {
3831 unsigned len, width;
3832 uint64_t mask0, mask1;
3833 TCGv_i64 dest;
3834
3835 if (!ctx->is_pa20 && a->d) {
3836 return false;
3837 }
3838 if (a->c) {
3839 nullify_over(ctx);
3840 }
3841
3842 len = a->len;
3843 width = a->d ? 64 : 32;
3844 if (a->cpos + len > width) {
3845 len = width - a->cpos;
3846 }
3847
3848 dest = dest_gpr(ctx, a->t);
3849 mask0 = deposit64(0, a->cpos, len, a->i);
3850 mask1 = deposit64(-1, a->cpos, len, a->i);
3851
3852 if (a->nz) {
3853 TCGv_i64 src = load_gpr(ctx, a->t);
3854 tcg_gen_andi_i64(dest, src, mask1);
3855 tcg_gen_ori_i64(dest, dest, mask0);
3856 } else {
3857 tcg_gen_movi_i64(dest, mask0);
3858 }
3859 save_gpr(ctx, a->t, dest);
3860
3861 /* Install the new nullification. */
3862 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3863 return nullify_end(ctx);
3864 }
3865
trans_dep_imm(DisasContext * ctx,arg_dep_imm * a)3866 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3867 {
3868 unsigned rs = a->nz ? a->t : 0;
3869 unsigned len, width;
3870 TCGv_i64 dest, val;
3871
3872 if (!ctx->is_pa20 && a->d) {
3873 return false;
3874 }
3875 if (a->c) {
3876 nullify_over(ctx);
3877 }
3878
3879 len = a->len;
3880 width = a->d ? 64 : 32;
3881 if (a->cpos + len > width) {
3882 len = width - a->cpos;
3883 }
3884
3885 dest = dest_gpr(ctx, a->t);
3886 val = load_gpr(ctx, a->r);
3887 if (rs == 0) {
3888 tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3889 } else {
3890 tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3891 }
3892 save_gpr(ctx, a->t, dest);
3893
3894 /* Install the new nullification. */
3895 ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3896 return nullify_end(ctx);
3897 }
3898
do_dep_sar(DisasContext * ctx,unsigned rt,unsigned c,bool d,bool nz,unsigned len,TCGv_i64 val)3899 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3900 bool d, bool nz, unsigned len, TCGv_i64 val)
3901 {
3902 unsigned rs = nz ? rt : 0;
3903 unsigned widthm1 = d ? 63 : 31;
3904 TCGv_i64 mask, tmp, shift, dest;
3905 uint64_t msb = 1ULL << (len - 1);
3906
3907 dest = dest_gpr(ctx, rt);
3908 shift = tcg_temp_new_i64();
3909 tmp = tcg_temp_new_i64();
3910
3911 /* Convert big-endian bit numbering in SAR to left-shift. */
3912 tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3913 tcg_gen_xori_i64(shift, shift, widthm1);
3914
3915 mask = tcg_temp_new_i64();
3916 tcg_gen_movi_i64(mask, msb + (msb - 1));
3917 tcg_gen_and_i64(tmp, val, mask);
3918 if (rs) {
3919 tcg_gen_shl_i64(mask, mask, shift);
3920 tcg_gen_shl_i64(tmp, tmp, shift);
3921 tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3922 tcg_gen_or_i64(dest, dest, tmp);
3923 } else {
3924 tcg_gen_shl_i64(dest, tmp, shift);
3925 }
3926 save_gpr(ctx, rt, dest);
3927
3928 /* Install the new nullification. */
3929 ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3930 return nullify_end(ctx);
3931 }
3932
trans_dep_sar(DisasContext * ctx,arg_dep_sar * a)3933 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3934 {
3935 if (!ctx->is_pa20 && a->d) {
3936 return false;
3937 }
3938 if (a->c) {
3939 nullify_over(ctx);
3940 }
3941 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3942 load_gpr(ctx, a->r));
3943 }
3944
trans_depi_sar(DisasContext * ctx,arg_depi_sar * a)3945 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3946 {
3947 if (!ctx->is_pa20 && a->d) {
3948 return false;
3949 }
3950 if (a->c) {
3951 nullify_over(ctx);
3952 }
3953 return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3954 tcg_constant_i64(a->i));
3955 }
3956
trans_be(DisasContext * ctx,arg_be * a)3957 static bool trans_be(DisasContext *ctx, arg_be *a)
3958 {
3959 #ifndef CONFIG_USER_ONLY
3960 ctx->iaq_j.space = tcg_temp_new_i64();
3961 load_spr(ctx, ctx->iaq_j.space, a->sp);
3962 #endif
3963
3964 ctx->iaq_j.base = tcg_temp_new_i64();
3965 ctx->iaq_j.disp = 0;
3966
3967 tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
3968 ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
3969
3970 return do_ibranch(ctx, a->l, true, a->n);
3971 }
3972
trans_bl(DisasContext * ctx,arg_bl * a)3973 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3974 {
3975 return do_dbranch(ctx, a->disp, a->l, a->n);
3976 }
3977
trans_b_gate(DisasContext * ctx,arg_b_gate * a)3978 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3979 {
3980 int64_t disp = a->disp;
3981 bool indirect = false;
3982
3983 /* Trap if PSW[B] is set. */
3984 if (ctx->psw_xb & PSW_B) {
3985 return gen_illegal(ctx);
3986 }
3987
3988 nullify_over(ctx);
3989
3990 #ifndef CONFIG_USER_ONLY
3991 if (ctx->privilege == 0) {
3992 /* Privilege cannot decrease. */
3993 } else if (!(ctx->tb_flags & PSW_C)) {
3994 /* With paging disabled, priv becomes 0. */
3995 disp -= ctx->privilege;
3996 } else {
3997 /* Adjust the dest offset for the privilege change from the PTE. */
3998 TCGv_i64 off = tcg_temp_new_i64();
3999
4000 copy_iaoq_entry(ctx, off, &ctx->iaq_f);
4001 gen_helper_b_gate_priv(off, tcg_env, off);
4002
4003 ctx->iaq_j.base = off;
4004 ctx->iaq_j.disp = disp + 8;
4005 indirect = true;
4006 }
4007 #endif
4008
4009 if (a->l) {
4010 TCGv_i64 tmp = dest_gpr(ctx, a->l);
4011 if (ctx->privilege < 3) {
4012 tcg_gen_andi_i64(tmp, tmp, -4);
4013 }
4014 tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4015 save_gpr(ctx, a->l, tmp);
4016 }
4017
4018 if (indirect) {
4019 return do_ibranch(ctx, 0, false, a->n);
4020 }
4021 return do_dbranch(ctx, disp, 0, a->n);
4022 }
4023
trans_blr(DisasContext * ctx,arg_blr * a)4024 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4025 {
4026 if (a->x) {
4027 DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
4028 TCGv_i64 t0 = tcg_temp_new_i64();
4029 TCGv_i64 t1 = tcg_temp_new_i64();
4030
4031 /* The computation here never changes privilege level. */
4032 copy_iaoq_entry(ctx, t0, &next);
4033 tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
4034 tcg_gen_add_i64(t0, t0, t1);
4035
4036 ctx->iaq_j = iaqe_next_absv(ctx, t0);
4037 return do_ibranch(ctx, a->l, false, a->n);
4038 } else {
4039 /* BLR R0,RX is a good way to load PC+8 into RX. */
4040 return do_dbranch(ctx, 0, a->l, a->n);
4041 }
4042 }
4043
trans_bv(DisasContext * ctx,arg_bv * a)4044 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4045 {
4046 TCGv_i64 dest;
4047
4048 if (a->x == 0) {
4049 dest = load_gpr(ctx, a->b);
4050 } else {
4051 dest = tcg_temp_new_i64();
4052 tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4053 tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4054 }
4055 dest = do_ibranch_priv(ctx, dest);
4056 ctx->iaq_j = iaqe_next_absv(ctx, dest);
4057
4058 return do_ibranch(ctx, 0, false, a->n);
4059 }
4060
trans_bve(DisasContext * ctx,arg_bve * a)4061 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4062 {
4063 TCGv_i64 b = load_gpr(ctx, a->b);
4064
4065 #ifndef CONFIG_USER_ONLY
4066 ctx->iaq_j.space = space_select(ctx, 0, b);
4067 #endif
4068 ctx->iaq_j.base = do_ibranch_priv(ctx, b);
4069 ctx->iaq_j.disp = 0;
4070
4071 return do_ibranch(ctx, a->l, false, a->n);
4072 }
4073
trans_nopbts(DisasContext * ctx,arg_nopbts * a)4074 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4075 {
4076 /* All branch target stack instructions implement as nop. */
4077 return ctx->is_pa20;
4078 }
4079
4080 /*
4081 * Float class 0
4082 */
4083
gen_fcpy_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4084 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4085 {
4086 tcg_gen_mov_i32(dst, src);
4087 }
4088
trans_fid_f(DisasContext * ctx,arg_fid_f * a)4089 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4090 {
4091 uint64_t ret;
4092
4093 if (ctx->is_pa20) {
4094 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4095 } else {
4096 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4097 }
4098
4099 nullify_over(ctx);
4100 save_frd(0, tcg_constant_i64(ret));
4101 return nullify_end(ctx);
4102 }
4103
trans_fcpy_f(DisasContext * ctx,arg_fclass01 * a)4104 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4105 {
4106 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4107 }
4108
gen_fcpy_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4109 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4110 {
4111 tcg_gen_mov_i64(dst, src);
4112 }
4113
trans_fcpy_d(DisasContext * ctx,arg_fclass01 * a)4114 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4115 {
4116 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4117 }
4118
gen_fabs_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4119 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4120 {
4121 tcg_gen_andi_i32(dst, src, INT32_MAX);
4122 }
4123
trans_fabs_f(DisasContext * ctx,arg_fclass01 * a)4124 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4125 {
4126 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4127 }
4128
gen_fabs_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4129 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4130 {
4131 tcg_gen_andi_i64(dst, src, INT64_MAX);
4132 }
4133
trans_fabs_d(DisasContext * ctx,arg_fclass01 * a)4134 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4135 {
4136 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4137 }
4138
trans_fsqrt_f(DisasContext * ctx,arg_fclass01 * a)4139 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4140 {
4141 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4142 }
4143
trans_fsqrt_d(DisasContext * ctx,arg_fclass01 * a)4144 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4145 {
4146 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4147 }
4148
trans_frnd_f(DisasContext * ctx,arg_fclass01 * a)4149 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4150 {
4151 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4152 }
4153
trans_frnd_d(DisasContext * ctx,arg_fclass01 * a)4154 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4155 {
4156 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4157 }
4158
gen_fneg_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4159 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4160 {
4161 tcg_gen_xori_i32(dst, src, INT32_MIN);
4162 }
4163
trans_fneg_f(DisasContext * ctx,arg_fclass01 * a)4164 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4165 {
4166 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4167 }
4168
gen_fneg_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4169 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4170 {
4171 tcg_gen_xori_i64(dst, src, INT64_MIN);
4172 }
4173
trans_fneg_d(DisasContext * ctx,arg_fclass01 * a)4174 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4175 {
4176 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4177 }
4178
gen_fnegabs_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4179 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4180 {
4181 tcg_gen_ori_i32(dst, src, INT32_MIN);
4182 }
4183
trans_fnegabs_f(DisasContext * ctx,arg_fclass01 * a)4184 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4185 {
4186 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4187 }
4188
gen_fnegabs_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4189 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4190 {
4191 tcg_gen_ori_i64(dst, src, INT64_MIN);
4192 }
4193
trans_fnegabs_d(DisasContext * ctx,arg_fclass01 * a)4194 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4195 {
4196 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4197 }
4198
4199 /*
4200 * Float class 1
4201 */
4202
trans_fcnv_d_f(DisasContext * ctx,arg_fclass01 * a)4203 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4204 {
4205 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4206 }
4207
trans_fcnv_f_d(DisasContext * ctx,arg_fclass01 * a)4208 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4209 {
4210 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4211 }
4212
trans_fcnv_w_f(DisasContext * ctx,arg_fclass01 * a)4213 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4214 {
4215 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4216 }
4217
trans_fcnv_q_f(DisasContext * ctx,arg_fclass01 * a)4218 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4219 {
4220 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4221 }
4222
trans_fcnv_w_d(DisasContext * ctx,arg_fclass01 * a)4223 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4224 {
4225 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4226 }
4227
trans_fcnv_q_d(DisasContext * ctx,arg_fclass01 * a)4228 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4229 {
4230 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4231 }
4232
trans_fcnv_f_w(DisasContext * ctx,arg_fclass01 * a)4233 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4234 {
4235 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4236 }
4237
trans_fcnv_d_w(DisasContext * ctx,arg_fclass01 * a)4238 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4239 {
4240 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4241 }
4242
trans_fcnv_f_q(DisasContext * ctx,arg_fclass01 * a)4243 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4244 {
4245 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4246 }
4247
trans_fcnv_d_q(DisasContext * ctx,arg_fclass01 * a)4248 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4249 {
4250 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4251 }
4252
trans_fcnv_t_f_w(DisasContext * ctx,arg_fclass01 * a)4253 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4254 {
4255 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4256 }
4257
trans_fcnv_t_d_w(DisasContext * ctx,arg_fclass01 * a)4258 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4259 {
4260 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4261 }
4262
trans_fcnv_t_f_q(DisasContext * ctx,arg_fclass01 * a)4263 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4264 {
4265 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4266 }
4267
trans_fcnv_t_d_q(DisasContext * ctx,arg_fclass01 * a)4268 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4269 {
4270 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4271 }
4272
trans_fcnv_uw_f(DisasContext * ctx,arg_fclass01 * a)4273 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4274 {
4275 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4276 }
4277
trans_fcnv_uq_f(DisasContext * ctx,arg_fclass01 * a)4278 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4279 {
4280 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4281 }
4282
trans_fcnv_uw_d(DisasContext * ctx,arg_fclass01 * a)4283 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4284 {
4285 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4286 }
4287
trans_fcnv_uq_d(DisasContext * ctx,arg_fclass01 * a)4288 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4289 {
4290 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4291 }
4292
trans_fcnv_f_uw(DisasContext * ctx,arg_fclass01 * a)4293 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4294 {
4295 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4296 }
4297
trans_fcnv_d_uw(DisasContext * ctx,arg_fclass01 * a)4298 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4299 {
4300 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4301 }
4302
trans_fcnv_f_uq(DisasContext * ctx,arg_fclass01 * a)4303 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4304 {
4305 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4306 }
4307
trans_fcnv_d_uq(DisasContext * ctx,arg_fclass01 * a)4308 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4309 {
4310 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4311 }
4312
trans_fcnv_t_f_uw(DisasContext * ctx,arg_fclass01 * a)4313 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4314 {
4315 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4316 }
4317
trans_fcnv_t_d_uw(DisasContext * ctx,arg_fclass01 * a)4318 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4319 {
4320 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4321 }
4322
trans_fcnv_t_f_uq(DisasContext * ctx,arg_fclass01 * a)4323 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4324 {
4325 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4326 }
4327
trans_fcnv_t_d_uq(DisasContext * ctx,arg_fclass01 * a)4328 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4329 {
4330 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4331 }
4332
4333 /*
4334 * Float class 2
4335 */
4336
trans_fcmp_f(DisasContext * ctx,arg_fclass2 * a)4337 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4338 {
4339 TCGv_i32 ta, tb, tc, ty;
4340
4341 nullify_over(ctx);
4342
4343 ta = load_frw0_i32(a->r1);
4344 tb = load_frw0_i32(a->r2);
4345 ty = tcg_constant_i32(a->y);
4346 tc = tcg_constant_i32(a->c);
4347
4348 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4349
4350 return nullify_end(ctx);
4351 }
4352
trans_fcmp_d(DisasContext * ctx,arg_fclass2 * a)4353 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4354 {
4355 TCGv_i64 ta, tb;
4356 TCGv_i32 tc, ty;
4357
4358 nullify_over(ctx);
4359
4360 ta = load_frd0(a->r1);
4361 tb = load_frd0(a->r2);
4362 ty = tcg_constant_i32(a->y);
4363 tc = tcg_constant_i32(a->c);
4364
4365 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4366
4367 return nullify_end(ctx);
4368 }
4369
trans_ftest(DisasContext * ctx,arg_ftest * a)4370 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4371 {
4372 TCGCond tc = TCG_COND_TSTNE;
4373 uint32_t mask;
4374 TCGv_i64 t;
4375
4376 nullify_over(ctx);
4377
4378 t = tcg_temp_new_i64();
4379 tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4380
4381 if (a->y == 1) {
4382 switch (a->c) {
4383 case 0: /* simple */
4384 mask = R_FPSR_C_MASK;
4385 break;
4386 case 2: /* rej */
4387 tc = TCG_COND_TSTEQ;
4388 /* fallthru */
4389 case 1: /* acc */
4390 mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
4391 break;
4392 case 6: /* rej8 */
4393 tc = TCG_COND_TSTEQ;
4394 /* fallthru */
4395 case 5: /* acc8 */
4396 mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
4397 break;
4398 case 9: /* acc6 */
4399 mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
4400 break;
4401 case 13: /* acc4 */
4402 mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
4403 break;
4404 case 17: /* acc2 */
4405 mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
4406 break;
4407 default:
4408 gen_illegal(ctx);
4409 return true;
4410 }
4411 } else {
4412 unsigned cbit = (a->y ^ 1) - 1;
4413 mask = R_FPSR_CA0_MASK >> cbit;
4414 }
4415
4416 ctx->null_cond = cond_make_ti(tc, t, mask);
4417 return nullify_end(ctx);
4418 }
4419
4420 /*
4421 * Float class 2
4422 */
4423
trans_fadd_f(DisasContext * ctx,arg_fclass3 * a)4424 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4425 {
4426 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4427 }
4428
trans_fadd_d(DisasContext * ctx,arg_fclass3 * a)4429 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4430 {
4431 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4432 }
4433
trans_fsub_f(DisasContext * ctx,arg_fclass3 * a)4434 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4435 {
4436 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4437 }
4438
trans_fsub_d(DisasContext * ctx,arg_fclass3 * a)4439 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4440 {
4441 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4442 }
4443
trans_fmpy_f(DisasContext * ctx,arg_fclass3 * a)4444 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4445 {
4446 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4447 }
4448
trans_fmpy_d(DisasContext * ctx,arg_fclass3 * a)4449 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4450 {
4451 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4452 }
4453
trans_fdiv_f(DisasContext * ctx,arg_fclass3 * a)4454 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4455 {
4456 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4457 }
4458
trans_fdiv_d(DisasContext * ctx,arg_fclass3 * a)4459 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4460 {
4461 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4462 }
4463
trans_xmpyu(DisasContext * ctx,arg_xmpyu * a)4464 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4465 {
4466 TCGv_i64 x, y;
4467
4468 nullify_over(ctx);
4469
4470 x = load_frw0_i64(a->r1);
4471 y = load_frw0_i64(a->r2);
4472 tcg_gen_mul_i64(x, x, y);
4473 save_frd(a->t, x);
4474
4475 return nullify_end(ctx);
4476 }
4477
4478 /* Convert the fmpyadd single-precision register encodings to standard. */
fmpyadd_s_reg(unsigned r)4479 static inline int fmpyadd_s_reg(unsigned r)
4480 {
4481 return (r & 16) * 2 + 16 + (r & 15);
4482 }
4483
do_fmpyadd_s(DisasContext * ctx,arg_mpyadd * a,bool is_sub)4484 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4485 {
4486 int tm = fmpyadd_s_reg(a->tm);
4487 int ra = fmpyadd_s_reg(a->ra);
4488 int ta = fmpyadd_s_reg(a->ta);
4489 int rm2 = fmpyadd_s_reg(a->rm2);
4490 int rm1 = fmpyadd_s_reg(a->rm1);
4491
4492 nullify_over(ctx);
4493
4494 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4495 do_fop_weww(ctx, ta, ta, ra,
4496 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4497
4498 return nullify_end(ctx);
4499 }
4500
trans_fmpyadd_f(DisasContext * ctx,arg_mpyadd * a)4501 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4502 {
4503 return do_fmpyadd_s(ctx, a, false);
4504 }
4505
trans_fmpysub_f(DisasContext * ctx,arg_mpyadd * a)4506 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4507 {
4508 return do_fmpyadd_s(ctx, a, true);
4509 }
4510
do_fmpyadd_d(DisasContext * ctx,arg_mpyadd * a,bool is_sub)4511 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4512 {
4513 nullify_over(ctx);
4514
4515 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4516 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4517 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4518
4519 return nullify_end(ctx);
4520 }
4521
trans_fmpyadd_d(DisasContext * ctx,arg_mpyadd * a)4522 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4523 {
4524 return do_fmpyadd_d(ctx, a, false);
4525 }
4526
trans_fmpysub_d(DisasContext * ctx,arg_mpyadd * a)4527 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4528 {
4529 return do_fmpyadd_d(ctx, a, true);
4530 }
4531
trans_fmpyfadd_f(DisasContext * ctx,arg_fmpyfadd_f * a)4532 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4533 {
4534 TCGv_i32 x, y, z;
4535
4536 nullify_over(ctx);
4537 x = load_frw0_i32(a->rm1);
4538 y = load_frw0_i32(a->rm2);
4539 z = load_frw0_i32(a->ra3);
4540
4541 if (a->neg) {
4542 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4543 } else {
4544 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4545 }
4546
4547 save_frw_i32(a->t, x);
4548 return nullify_end(ctx);
4549 }
4550
trans_fmpyfadd_d(DisasContext * ctx,arg_fmpyfadd_d * a)4551 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4552 {
4553 TCGv_i64 x, y, z;
4554
4555 nullify_over(ctx);
4556 x = load_frd0(a->rm1);
4557 y = load_frd0(a->rm2);
4558 z = load_frd0(a->ra3);
4559
4560 if (a->neg) {
4561 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4562 } else {
4563 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4564 }
4565
4566 save_frd(a->t, x);
4567 return nullify_end(ctx);
4568 }
4569
4570 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
trans_diag_btlb(DisasContext * ctx,arg_diag_btlb * a)4571 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4572 {
4573 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4574 #ifndef CONFIG_USER_ONLY
4575 nullify_over(ctx);
4576 gen_helper_diag_btlb(tcg_env);
4577 return nullify_end(ctx);
4578 #endif
4579 }
4580
4581 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
trans_diag_cout(DisasContext * ctx,arg_diag_cout * a)4582 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4583 {
4584 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4585 #ifndef CONFIG_USER_ONLY
4586 nullify_over(ctx);
4587 gen_helper_diag_console_output(tcg_env);
4588 return nullify_end(ctx);
4589 #endif
4590 }
4591
trans_diag_getshadowregs_pa1(DisasContext * ctx,arg_empty * a)4592 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4593 {
4594 return !ctx->is_pa20 && do_getshadowregs(ctx);
4595 }
4596
trans_diag_putshadowregs_pa1(DisasContext * ctx,arg_empty * a)4597 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4598 {
4599 return !ctx->is_pa20 && do_putshadowregs(ctx);
4600 }
4601
trans_diag_mfdiag(DisasContext * ctx,arg_diag_mfdiag * a)4602 static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a)
4603 {
4604 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4605 nullify_over(ctx);
4606 TCGv_i64 dest = dest_gpr(ctx, a->rt);
4607 tcg_gen_ld_i64(dest, tcg_env,
4608 offsetof(CPUHPPAState, dr[a->dr]));
4609 save_gpr(ctx, a->rt, dest);
4610 return nullify_end(ctx);
4611 }
4612
trans_diag_mtdiag(DisasContext * ctx,arg_diag_mtdiag * a)4613 static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a)
4614 {
4615 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4616 nullify_over(ctx);
4617 tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env,
4618 offsetof(CPUHPPAState, dr[a->dr]));
4619 #ifndef CONFIG_USER_ONLY
4620 if (ctx->is_pa20 && (a->dr == 2)) {
4621 /* Update gva_offset_mask from the new value of %dr2 */
4622 gen_helper_update_gva_offset_mask(tcg_env);
4623 /* Exit to capture the new value for the next TB. */
4624 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4625 }
4626 #endif
4627 return nullify_end(ctx);
4628 }
4629
trans_diag_unimp(DisasContext * ctx,arg_diag_unimp * a)4630 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4631 {
4632 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4633 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4634 return true;
4635 }
4636
hppa_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)4637 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4638 {
4639 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4640 uint64_t cs_base;
4641 int bound;
4642
4643 ctx->cs = cs;
4644 ctx->tb_flags = ctx->base.tb->flags;
4645 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4646 ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
4647 ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask;
4648
4649 #ifdef CONFIG_USER_ONLY
4650 ctx->privilege = PRIV_USER;
4651 ctx->mmu_idx = MMU_USER_IDX;
4652 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4653 #else
4654 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4655 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4656 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4657 : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4658 #endif
4659
4660 cs_base = ctx->base.tb->cs_base;
4661 ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
4662
4663 if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
4664 ctx->iaq_b.space = cpu_iasq_b;
4665 ctx->iaq_b.base = cpu_iaoq_b;
4666 } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
4667 ctx->iaq_b.base = cpu_iaoq_b;
4668 } else {
4669 uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
4670 uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
4671 ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
4672 }
4673
4674 ctx->zero = tcg_constant_i64(0);
4675
4676 /* Bound the number of instructions by those left on the page. */
4677 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4678 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4679 }
4680
hppa_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)4681 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4682 {
4683 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4684
4685 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4686 ctx->null_cond = cond_make_f();
4687 ctx->psw_n_nonzero = false;
4688 if (ctx->tb_flags & PSW_N) {
4689 ctx->null_cond.c = TCG_COND_ALWAYS;
4690 ctx->psw_n_nonzero = true;
4691 }
4692 ctx->null_lab = NULL;
4693 }
4694
hppa_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)4695 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4696 {
4697 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4698 uint64_t iaoq_f, iaoq_b;
4699 int64_t diff;
4700
4701 tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
4702
4703 iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
4704 if (iaqe_variable(&ctx->iaq_b)) {
4705 diff = INT32_MIN;
4706 } else {
4707 iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
4708 diff = iaoq_b - iaoq_f;
4709 /* Direct branches can only produce a 24-bit displacement. */
4710 tcg_debug_assert(diff == (int32_t)diff);
4711 tcg_debug_assert(diff != INT32_MIN);
4712 }
4713
4714 tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
4715 ctx->insn_start_updated = false;
4716 }
4717
hppa_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)4718 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4719 {
4720 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4721 CPUHPPAState *env = cpu_env(cs);
4722 DisasJumpType ret;
4723
4724 /* Execute one insn. */
4725 #ifdef CONFIG_USER_ONLY
4726 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4727 do_page_zero(ctx);
4728 ret = ctx->base.is_jmp;
4729 assert(ret != DISAS_NEXT);
4730 } else
4731 #endif
4732 {
4733 /* Always fetch the insn, even if nullified, so that we check
4734 the page permissions for execute. */
4735 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4736
4737 /*
4738 * Set up the IA queue for the next insn.
4739 * This will be overwritten by a branch.
4740 */
4741 ctx->iaq_n = NULL;
4742 memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
4743 ctx->psw_b_next = false;
4744
4745 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4746 ctx->null_cond.c = TCG_COND_NEVER;
4747 ret = DISAS_NEXT;
4748 } else {
4749 ctx->insn = insn;
4750 if (!decode(ctx, insn)) {
4751 gen_illegal(ctx);
4752 }
4753 ret = ctx->base.is_jmp;
4754 assert(ctx->null_lab == NULL);
4755 }
4756
4757 if (ret != DISAS_NORETURN) {
4758 set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
4759 }
4760 }
4761
4762 /* If the TranslationBlock must end, do so. */
4763 ctx->base.pc_next += 4;
4764 if (ret != DISAS_NEXT) {
4765 return;
4766 }
4767 /* Note this also detects a priority change. */
4768 if (iaqe_variable(&ctx->iaq_b)
4769 || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
4770 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
4771 return;
4772 }
4773
4774 /*
4775 * Advance the insn queue.
4776 * The only exit now is DISAS_TOO_MANY from the translator loop.
4777 */
4778 ctx->iaq_f.disp = ctx->iaq_b.disp;
4779 if (!ctx->iaq_n) {
4780 ctx->iaq_b.disp += 4;
4781 return;
4782 }
4783 /*
4784 * If IAQ_Next is variable in any way, we need to copy into the
4785 * IAQ_Back globals, in case the next insn raises an exception.
4786 */
4787 if (ctx->iaq_n->base) {
4788 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
4789 ctx->iaq_b.base = cpu_iaoq_b;
4790 ctx->iaq_b.disp = 0;
4791 } else {
4792 ctx->iaq_b.disp = ctx->iaq_n->disp;
4793 }
4794 if (ctx->iaq_n->space) {
4795 tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
4796 ctx->iaq_b.space = cpu_iasq_b;
4797 }
4798 }
4799
hppa_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)4800 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4801 {
4802 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4803 DisasJumpType is_jmp = ctx->base.is_jmp;
4804 /* Assume the insn queue has not been advanced. */
4805 DisasIAQE *f = &ctx->iaq_b;
4806 DisasIAQE *b = ctx->iaq_n;
4807
4808 switch (is_jmp) {
4809 case DISAS_NORETURN:
4810 break;
4811 case DISAS_TOO_MANY:
4812 /* The insn queue has not been advanced. */
4813 f = &ctx->iaq_f;
4814 b = &ctx->iaq_b;
4815 /* FALLTHRU */
4816 case DISAS_IAQ_N_STALE:
4817 if (use_goto_tb(ctx, f, b)
4818 && (ctx->null_cond.c == TCG_COND_NEVER
4819 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4820 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4821 gen_goto_tb(ctx, 0, f, b);
4822 break;
4823 }
4824 /* FALLTHRU */
4825 case DISAS_IAQ_N_STALE_EXIT:
4826 install_iaq_entries(ctx, f, b);
4827 nullify_save(ctx);
4828 if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4829 tcg_gen_exit_tb(NULL, 0);
4830 break;
4831 }
4832 /* FALLTHRU */
4833 case DISAS_IAQ_N_UPDATED:
4834 tcg_gen_lookup_and_goto_ptr();
4835 break;
4836 case DISAS_EXIT:
4837 tcg_gen_exit_tb(NULL, 0);
4838 break;
4839 default:
4840 g_assert_not_reached();
4841 }
4842
4843 for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
4844 gen_set_label(e->lab);
4845 if (e->set_n >= 0) {
4846 tcg_gen_movi_i64(cpu_psw_n, e->set_n);
4847 }
4848 if (e->set_iir) {
4849 tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
4850 offsetof(CPUHPPAState, cr[CR_IIR]));
4851 }
4852 install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
4853 gen_excp_1(e->excp);
4854 }
4855 }
4856
4857 #ifdef CONFIG_USER_ONLY
hppa_tr_disas_log(const DisasContextBase * dcbase,CPUState * cs,FILE * logfile)4858 static bool hppa_tr_disas_log(const DisasContextBase *dcbase,
4859 CPUState *cs, FILE *logfile)
4860 {
4861 target_ulong pc = dcbase->pc_first;
4862
4863 switch (pc) {
4864 case 0x00:
4865 fprintf(logfile, "IN:\n0x00000000: (null)\n");
4866 return true;
4867 case 0xb0:
4868 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
4869 return true;
4870 case 0xe0:
4871 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4872 return true;
4873 case 0x100:
4874 fprintf(logfile, "IN:\n0x00000100: syscall\n");
4875 return true;
4876 }
4877 return false;
4878 }
4879 #endif
4880
4881 static const TranslatorOps hppa_tr_ops = {
4882 .init_disas_context = hppa_tr_init_disas_context,
4883 .tb_start = hppa_tr_tb_start,
4884 .insn_start = hppa_tr_insn_start,
4885 .translate_insn = hppa_tr_translate_insn,
4886 .tb_stop = hppa_tr_tb_stop,
4887 #ifdef CONFIG_USER_ONLY
4888 .disas_log = hppa_tr_disas_log,
4889 #endif
4890 };
4891
hppa_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4892 void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
4893 int *max_insns, vaddr pc, void *host_pc)
4894 {
4895 DisasContext ctx = { };
4896 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4897 }
4898