1 /*
2 * PowerPC emulation for qemu: main translation routines.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "qemu/host-utils.h"
28
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31
32 #include "exec/translator.h"
33 #include "exec/translation-block.h"
34 #include "exec/log.h"
35 #include "qemu/atomic128.h"
36 #include "spr_common.h"
37 #include "power8-pmu.h"
38
39 #include "qemu/qemu-print.h"
40 #include "qapi/error.h"
41
42 #define HELPER_H "helper.h"
43 #include "exec/helper-info.c.inc"
44 #undef HELPER_H
45
46 #define CPU_SINGLE_STEP 0x1
47 #define CPU_BRANCH_STEP 0x2
48
49 /* Include definitions for instructions classes and implementations flags */
50 /* #define PPC_DEBUG_DISAS */
51
52 #ifdef PPC_DEBUG_DISAS
53 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
54 #else
55 # define LOG_DISAS(...) do { } while (0)
56 #endif
57 /*****************************************************************************/
58 /* Code translation helpers */
59
60 /* global register indexes */
61 static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */
62 + 10 * 4 + 22 * 5 /* SPE GPRh */
63 + 8 * 5 /* CRF */];
64 static TCGv cpu_gpr[32];
65 static TCGv cpu_gprh[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
75 static TCGv cpu_reserve;
76 static TCGv cpu_reserve_length;
77 static TCGv cpu_reserve_val;
78 #if defined(TARGET_PPC64)
79 static TCGv cpu_reserve_val2;
80 #endif
81 static TCGv cpu_fpscr;
82 static TCGv_i32 cpu_access_type;
83
ppc_translate_init(void)84 void ppc_translate_init(void)
85 {
86 int i;
87 char *p;
88 size_t cpu_reg_names_size;
89
90 p = cpu_reg_names;
91 cpu_reg_names_size = sizeof(cpu_reg_names);
92
93 for (i = 0; i < 8; i++) {
94 snprintf(p, cpu_reg_names_size, "crf%d", i);
95 cpu_crf[i] = tcg_global_mem_new_i32(tcg_env,
96 offsetof(CPUPPCState, crf[i]), p);
97 p += 5;
98 cpu_reg_names_size -= 5;
99 }
100
101 for (i = 0; i < 32; i++) {
102 snprintf(p, cpu_reg_names_size, "r%d", i);
103 cpu_gpr[i] = tcg_global_mem_new(tcg_env,
104 offsetof(CPUPPCState, gpr[i]), p);
105 p += (i < 10) ? 3 : 4;
106 cpu_reg_names_size -= (i < 10) ? 3 : 4;
107 snprintf(p, cpu_reg_names_size, "r%dH", i);
108 cpu_gprh[i] = tcg_global_mem_new(tcg_env,
109 offsetof(CPUPPCState, gprh[i]), p);
110 p += (i < 10) ? 4 : 5;
111 cpu_reg_names_size -= (i < 10) ? 4 : 5;
112 }
113
114 cpu_nip = tcg_global_mem_new(tcg_env,
115 offsetof(CPUPPCState, nip), "nip");
116
117 cpu_msr = tcg_global_mem_new(tcg_env,
118 offsetof(CPUPPCState, msr), "msr");
119
120 cpu_ctr = tcg_global_mem_new(tcg_env,
121 offsetof(CPUPPCState, ctr), "ctr");
122
123 cpu_lr = tcg_global_mem_new(tcg_env,
124 offsetof(CPUPPCState, lr), "lr");
125
126 #if defined(TARGET_PPC64)
127 cpu_cfar = tcg_global_mem_new(tcg_env,
128 offsetof(CPUPPCState, cfar), "cfar");
129 #endif
130
131 cpu_xer = tcg_global_mem_new(tcg_env,
132 offsetof(CPUPPCState, xer), "xer");
133 cpu_so = tcg_global_mem_new(tcg_env,
134 offsetof(CPUPPCState, so), "SO");
135 cpu_ov = tcg_global_mem_new(tcg_env,
136 offsetof(CPUPPCState, ov), "OV");
137 cpu_ca = tcg_global_mem_new(tcg_env,
138 offsetof(CPUPPCState, ca), "CA");
139 cpu_ov32 = tcg_global_mem_new(tcg_env,
140 offsetof(CPUPPCState, ov32), "OV32");
141 cpu_ca32 = tcg_global_mem_new(tcg_env,
142 offsetof(CPUPPCState, ca32), "CA32");
143
144 cpu_reserve = tcg_global_mem_new(tcg_env,
145 offsetof(CPUPPCState, reserve_addr),
146 "reserve_addr");
147 cpu_reserve_length = tcg_global_mem_new(tcg_env,
148 offsetof(CPUPPCState,
149 reserve_length),
150 "reserve_length");
151 cpu_reserve_val = tcg_global_mem_new(tcg_env,
152 offsetof(CPUPPCState, reserve_val),
153 "reserve_val");
154 #if defined(TARGET_PPC64)
155 cpu_reserve_val2 = tcg_global_mem_new(tcg_env,
156 offsetof(CPUPPCState, reserve_val2),
157 "reserve_val2");
158 #endif
159
160 cpu_fpscr = tcg_global_mem_new(tcg_env,
161 offsetof(CPUPPCState, fpscr), "fpscr");
162
163 cpu_access_type = tcg_global_mem_new_i32(tcg_env,
164 offsetof(CPUPPCState, access_type),
165 "access_type");
166 }
167
168 /* internal defines */
169 struct DisasContext {
170 DisasContextBase base;
171 target_ulong cia; /* current instruction address */
172 uint32_t opcode;
173 /* Routine used to access memory */
174 bool pr, hv, dr, le_mode;
175 bool lazy_tlb_flush;
176 bool need_access_type;
177 int mem_idx;
178 int access_type;
179 /* Translation flags */
180 MemOp default_tcg_memop_mask;
181 #if defined(TARGET_PPC64)
182 powerpc_excp_t excp_model;
183 bool sf_mode;
184 bool has_cfar;
185 bool has_bhrb;
186 #endif
187 bool fpu_enabled;
188 bool altivec_enabled;
189 bool vsx_enabled;
190 bool spe_enabled;
191 bool tm_enabled;
192 bool gtse;
193 bool hr;
194 bool mmcr0_pmcc0;
195 bool mmcr0_pmcc1;
196 bool mmcr0_pmcjce;
197 bool pmc_other;
198 bool pmu_insn_cnt;
199 bool bhrb_enable;
200 ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
201 int singlestep_enabled;
202 uint32_t flags;
203 uint64_t insns_flags;
204 uint64_t insns_flags2;
205 };
206
207 #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */
208 #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */
209 #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */
210 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */
211
212 /* Return true iff byteswap is needed in a scalar memop */
need_byteswap(const DisasContext * ctx)213 static inline bool need_byteswap(const DisasContext *ctx)
214 {
215 #if TARGET_BIG_ENDIAN
216 return ctx->le_mode;
217 #else
218 return !ctx->le_mode;
219 #endif
220 }
221
222 /* True when active word size < size of target_long. */
223 #ifdef TARGET_PPC64
224 # define NARROW_MODE(C) (!(C)->sf_mode)
225 #else
226 # define NARROW_MODE(C) 0
227 #endif
228
229 struct opc_handler_t {
230 /* invalid bits for instruction 1 (Rc(opcode) == 0) */
231 uint32_t inval1;
232 /* invalid bits for instruction 2 (Rc(opcode) == 1) */
233 uint32_t inval2;
234 /* instruction type */
235 uint64_t type;
236 /* extended instruction type */
237 uint64_t type2;
238 /* handler */
239 void (*handler)(DisasContext *ctx);
240 };
241
gen_serialize(DisasContext * ctx)242 static inline bool gen_serialize(DisasContext *ctx)
243 {
244 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
245 /* Restart with exclusive lock. */
246 gen_helper_exit_atomic(tcg_env);
247 ctx->base.is_jmp = DISAS_NORETURN;
248 return false;
249 }
250 return true;
251 }
252
253 #if !defined(CONFIG_USER_ONLY)
254 #if defined(TARGET_PPC64)
gen_serialize_core(DisasContext * ctx)255 static inline bool gen_serialize_core(DisasContext *ctx)
256 {
257 if (ctx->flags & POWERPC_FLAG_SMT) {
258 return gen_serialize(ctx);
259 }
260 return true;
261 }
262 #endif
263
gen_serialize_core_lpar(DisasContext * ctx)264 static inline bool gen_serialize_core_lpar(DisasContext *ctx)
265 {
266 #if defined(TARGET_PPC64)
267 if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
268 return gen_serialize(ctx);
269 }
270 #endif
271 return true;
272 }
273 #endif
274
275 /* SPR load/store helpers */
gen_load_spr(TCGv t,int reg)276 static inline void gen_load_spr(TCGv t, int reg)
277 {
278 tcg_gen_ld_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
279 }
280
gen_store_spr(int reg,TCGv t)281 static inline void gen_store_spr(int reg, TCGv t)
282 {
283 tcg_gen_st_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
284 }
285
gen_set_access_type(DisasContext * ctx,int access_type)286 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
287 {
288 if (ctx->need_access_type && ctx->access_type != access_type) {
289 tcg_gen_movi_i32(cpu_access_type, access_type);
290 ctx->access_type = access_type;
291 }
292 }
293
gen_update_nip(DisasContext * ctx,target_ulong nip)294 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
295 {
296 if (NARROW_MODE(ctx)) {
297 nip = (uint32_t)nip;
298 }
299 tcg_gen_movi_tl(cpu_nip, nip);
300 }
301
gen_exception_err_nip(DisasContext * ctx,uint32_t excp,uint32_t error,target_ulong nip)302 static void gen_exception_err_nip(DisasContext *ctx, uint32_t excp,
303 uint32_t error, target_ulong nip)
304 {
305 TCGv_i32 t0, t1;
306
307 gen_update_nip(ctx, nip);
308 t0 = tcg_constant_i32(excp);
309 t1 = tcg_constant_i32(error);
310 gen_helper_raise_exception_err(tcg_env, t0, t1);
311 ctx->base.is_jmp = DISAS_NORETURN;
312 }
313
gen_exception_err(DisasContext * ctx,uint32_t excp,uint32_t error)314 static inline void gen_exception_err(DisasContext *ctx, uint32_t excp,
315 uint32_t error)
316 {
317 /*
318 * These are all synchronous exceptions, we set the PC back to the
319 * faulting instruction
320 */
321 gen_exception_err_nip(ctx, excp, error, ctx->cia);
322 }
323
gen_exception_nip(DisasContext * ctx,uint32_t excp,target_ulong nip)324 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
325 target_ulong nip)
326 {
327 TCGv_i32 t0;
328
329 gen_update_nip(ctx, nip);
330 t0 = tcg_constant_i32(excp);
331 gen_helper_raise_exception(tcg_env, t0);
332 ctx->base.is_jmp = DISAS_NORETURN;
333 }
334
gen_exception(DisasContext * ctx,uint32_t excp)335 static inline void gen_exception(DisasContext *ctx, uint32_t excp)
336 {
337 /*
338 * These are all synchronous exceptions, we set the PC back to the
339 * faulting instruction
340 */
341 gen_exception_nip(ctx, excp, ctx->cia);
342 }
343
344 #if !defined(CONFIG_USER_ONLY)
gen_ppc_maybe_interrupt(DisasContext * ctx)345 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
346 {
347 translator_io_start(&ctx->base);
348 gen_helper_ppc_maybe_interrupt(tcg_env);
349 }
350 #endif
351
352 /*
353 * Tells the caller what is the appropriate exception to generate and prepares
354 * SPR registers for this exception.
355 *
356 * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
357 * POWERPC_EXCP_DEBUG (on BookE).
358 */
gen_debug_exception(DisasContext * ctx,bool rfi_type)359 static void gen_debug_exception(DisasContext *ctx, bool rfi_type)
360 {
361 #if !defined(CONFIG_USER_ONLY)
362 if (ctx->flags & POWERPC_FLAG_DE) {
363 target_ulong dbsr = 0;
364 if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
365 dbsr = DBCR0_ICMP;
366 } else {
367 /* Must have been branch */
368 dbsr = DBCR0_BRT;
369 }
370 TCGv t0 = tcg_temp_new();
371 gen_load_spr(t0, SPR_BOOKE_DBSR);
372 tcg_gen_ori_tl(t0, t0, dbsr);
373 gen_store_spr(SPR_BOOKE_DBSR, t0);
374 gen_helper_raise_exception(tcg_env,
375 tcg_constant_i32(POWERPC_EXCP_DEBUG));
376 ctx->base.is_jmp = DISAS_NORETURN;
377 } else {
378 if (!rfi_type) { /* BookS does not single step rfi type instructions */
379 TCGv t0 = tcg_temp_new();
380 tcg_gen_movi_tl(t0, ctx->cia);
381 gen_helper_book3s_trace(tcg_env, t0);
382 ctx->base.is_jmp = DISAS_NORETURN;
383 }
384 }
385 #endif
386 }
387
gen_inval_exception(DisasContext * ctx,uint32_t error)388 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
389 {
390 /* Will be converted to program check if needed */
391 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
392 }
393
gen_priv_exception(DisasContext * ctx,uint32_t error)394 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
395 {
396 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
397 }
398
gen_hvpriv_exception(DisasContext * ctx,uint32_t error)399 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
400 {
401 /* Will be converted to program check if needed */
402 gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
403 }
404
405 /*****************************************************************************/
406 /* SPR READ/WRITE CALLBACKS */
407
spr_noaccess(DisasContext * ctx,int gprn,int sprn)408 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
409 {
410 #if 0
411 sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
412 printf("ERROR: try to access SPR %d !\n", sprn);
413 #endif
414 }
415
416 /* #define PPC_DUMP_SPR_ACCESSES */
417
418 /*
419 * Generic callbacks:
420 * do nothing but store/retrieve spr value
421 */
spr_load_dump_spr(int sprn)422 static void spr_load_dump_spr(int sprn)
423 {
424 #ifdef PPC_DUMP_SPR_ACCESSES
425 TCGv_i32 t0 = tcg_constant_i32(sprn);
426 gen_helper_load_dump_spr(tcg_env, t0);
427 #endif
428 }
429
spr_read_generic(DisasContext * ctx,int gprn,int sprn)430 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
431 {
432 gen_load_spr(cpu_gpr[gprn], sprn);
433 spr_load_dump_spr(sprn);
434 }
435
spr_store_dump_spr(int sprn)436 static void spr_store_dump_spr(int sprn)
437 {
438 #ifdef PPC_DUMP_SPR_ACCESSES
439 TCGv_i32 t0 = tcg_constant_i32(sprn);
440 gen_helper_store_dump_spr(tcg_env, t0);
441 #endif
442 }
443
spr_write_generic(DisasContext * ctx,int sprn,int gprn)444 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
445 {
446 gen_store_spr(sprn, cpu_gpr[gprn]);
447 spr_store_dump_spr(sprn);
448 }
449
spr_write_generic32(DisasContext * ctx,int sprn,int gprn)450 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
451 {
452 #ifdef TARGET_PPC64
453 TCGv t0 = tcg_temp_new();
454 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
455 gen_store_spr(sprn, t0);
456 spr_store_dump_spr(sprn);
457 #else
458 spr_write_generic(ctx, sprn, gprn);
459 #endif
460 }
461
spr_core_write_generic(DisasContext * ctx,int sprn,int gprn)462 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn)
463 {
464 if (!(ctx->flags & POWERPC_FLAG_SMT)) {
465 spr_write_generic(ctx, sprn, gprn);
466 return;
467 }
468
469 if (!gen_serialize(ctx)) {
470 return;
471 }
472
473 gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn),
474 cpu_gpr[gprn]);
475 spr_store_dump_spr(sprn);
476 }
477
spr_core_write_generic32(DisasContext * ctx,int sprn,int gprn)478 void spr_core_write_generic32(DisasContext *ctx, int sprn, int gprn)
479 {
480 TCGv t0;
481
482 if (!(ctx->flags & POWERPC_FLAG_SMT)) {
483 spr_write_generic32(ctx, sprn, gprn);
484 return;
485 }
486
487 if (!gen_serialize(ctx)) {
488 return;
489 }
490
491 t0 = tcg_temp_new();
492 tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
493 gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn), t0);
494 spr_store_dump_spr(sprn);
495 }
496
spr_core_lpar_write_generic(DisasContext * ctx,int sprn,int gprn)497 void spr_core_lpar_write_generic(DisasContext *ctx, int sprn, int gprn)
498 {
499 if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
500 spr_core_write_generic(ctx, sprn, gprn);
501 } else {
502 spr_write_generic(ctx, sprn, gprn);
503 }
504 }
505
spr_write_CTRL_ST(DisasContext * ctx,int sprn,int gprn)506 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
507 {
508 /* This does not implement >1 thread */
509 TCGv t0 = tcg_temp_new();
510 TCGv t1 = tcg_temp_new();
511 tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
512 tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
513 tcg_gen_or_tl(t1, t1, t0);
514 gen_store_spr(sprn, t1);
515 }
516
spr_write_CTRL(DisasContext * ctx,int sprn,int gprn)517 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
518 {
519 if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) {
520 /* CTRL behaves as 1-thread in LPAR-per-thread mode */
521 spr_write_CTRL_ST(ctx, sprn, gprn);
522 goto out;
523 }
524
525 if (!gen_serialize(ctx)) {
526 return;
527 }
528
529 gen_helper_spr_write_CTRL(tcg_env, tcg_constant_i32(sprn),
530 cpu_gpr[gprn]);
531 out:
532 spr_store_dump_spr(sprn);
533
534 /*
535 * SPR_CTRL writes must force a new translation block,
536 * allowing the PMU to calculate the run latch events with
537 * more accuracy.
538 */
539 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
540 }
541
542 #if !defined(CONFIG_USER_ONLY)
spr_write_clear(DisasContext * ctx,int sprn,int gprn)543 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
544 {
545 TCGv t0 = tcg_temp_new();
546 TCGv t1 = tcg_temp_new();
547 gen_load_spr(t0, sprn);
548 tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
549 tcg_gen_and_tl(t0, t0, t1);
550 gen_store_spr(sprn, t0);
551 }
552
spr_access_nop(DisasContext * ctx,int sprn,int gprn)553 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
554 {
555 }
556
557 #endif
558
559 /* SPR common to all PowerPC */
560 /* XER */
spr_read_xer(DisasContext * ctx,int gprn,int sprn)561 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
562 {
563 TCGv dst = cpu_gpr[gprn];
564 TCGv t0 = tcg_temp_new();
565 TCGv t1 = tcg_temp_new();
566 TCGv t2 = tcg_temp_new();
567 tcg_gen_mov_tl(dst, cpu_xer);
568 tcg_gen_shli_tl(t0, cpu_so, XER_SO);
569 tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
570 tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
571 tcg_gen_or_tl(t0, t0, t1);
572 tcg_gen_or_tl(dst, dst, t2);
573 tcg_gen_or_tl(dst, dst, t0);
574 if (is_isa300(ctx)) {
575 tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
576 tcg_gen_or_tl(dst, dst, t0);
577 tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
578 tcg_gen_or_tl(dst, dst, t0);
579 }
580 }
581
spr_write_xer(DisasContext * ctx,int sprn,int gprn)582 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
583 {
584 TCGv src = cpu_gpr[gprn];
585 /* Write all flags, while reading back check for isa300 */
586 tcg_gen_andi_tl(cpu_xer, src,
587 ~((1u << XER_SO) |
588 (1u << XER_OV) | (1u << XER_OV32) |
589 (1u << XER_CA) | (1u << XER_CA32)));
590 tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
591 tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
592 tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
593 tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
594 tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
595 }
596
597 /* LR */
spr_read_lr(DisasContext * ctx,int gprn,int sprn)598 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
599 {
600 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
601 }
602
spr_write_lr(DisasContext * ctx,int sprn,int gprn)603 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
604 {
605 tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
606 }
607
608 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
609 /* Debug facilities */
610 /* CFAR */
spr_read_cfar(DisasContext * ctx,int gprn,int sprn)611 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
612 {
613 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
614 }
615
spr_write_cfar(DisasContext * ctx,int sprn,int gprn)616 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
617 {
618 tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
619 }
620
621 /* Breakpoint */
spr_write_ciabr(DisasContext * ctx,int sprn,int gprn)622 void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn)
623 {
624 translator_io_start(&ctx->base);
625 gen_helper_store_ciabr(tcg_env, cpu_gpr[gprn]);
626 }
627
628 /* Watchpoint */
spr_write_dawr0(DisasContext * ctx,int sprn,int gprn)629 void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn)
630 {
631 translator_io_start(&ctx->base);
632 gen_helper_store_dawr0(tcg_env, cpu_gpr[gprn]);
633 }
634
spr_write_dawrx0(DisasContext * ctx,int sprn,int gprn)635 void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
636 {
637 translator_io_start(&ctx->base);
638 gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
639 }
640
spr_write_dawr1(DisasContext * ctx,int sprn,int gprn)641 void spr_write_dawr1(DisasContext *ctx, int sprn, int gprn)
642 {
643 translator_io_start(&ctx->base);
644 gen_helper_store_dawr1(tcg_env, cpu_gpr[gprn]);
645 }
646
spr_write_dawrx1(DisasContext * ctx,int sprn,int gprn)647 void spr_write_dawrx1(DisasContext *ctx, int sprn, int gprn)
648 {
649 translator_io_start(&ctx->base);
650 gen_helper_store_dawrx1(tcg_env, cpu_gpr[gprn]);
651 }
652 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
653
654 /* CTR */
spr_read_ctr(DisasContext * ctx,int gprn,int sprn)655 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
656 {
657 tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
658 }
659
spr_write_ctr(DisasContext * ctx,int sprn,int gprn)660 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
661 {
662 tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
663 }
664
665 /* User read access to SPR */
666 /* USPRx */
667 /* UMMCRx */
668 /* UPMCx */
669 /* USIA */
670 /* UDECR */
spr_read_ureg(DisasContext * ctx,int gprn,int sprn)671 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
672 {
673 gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
674 }
675
676 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
spr_write_ureg(DisasContext * ctx,int sprn,int gprn)677 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
678 {
679 gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
680 }
681 #endif
682
683 /* SPR common to all non-embedded PowerPC */
684 /* DECR */
685 #if !defined(CONFIG_USER_ONLY)
spr_read_decr(DisasContext * ctx,int gprn,int sprn)686 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
687 {
688 translator_io_start(&ctx->base);
689 gen_helper_load_decr(cpu_gpr[gprn], tcg_env);
690 }
691
spr_write_decr(DisasContext * ctx,int sprn,int gprn)692 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
693 {
694 translator_io_start(&ctx->base);
695 gen_helper_store_decr(tcg_env, cpu_gpr[gprn]);
696 }
697 #endif
698
699 /* SPR common to all non-embedded PowerPC, except 601 */
700 /* Time base */
spr_read_tbl(DisasContext * ctx,int gprn,int sprn)701 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
702 {
703 translator_io_start(&ctx->base);
704 gen_helper_load_tbl(cpu_gpr[gprn], tcg_env);
705 }
706
spr_read_tbu(DisasContext * ctx,int gprn,int sprn)707 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
708 {
709 translator_io_start(&ctx->base);
710 gen_helper_load_tbu(cpu_gpr[gprn], tcg_env);
711 }
712
spr_read_atbl(DisasContext * ctx,int gprn,int sprn)713 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
714 {
715 gen_helper_load_atbl(cpu_gpr[gprn], tcg_env);
716 }
717
spr_read_atbu(DisasContext * ctx,int gprn,int sprn)718 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
719 {
720 gen_helper_load_atbu(cpu_gpr[gprn], tcg_env);
721 }
722
723 #if !defined(CONFIG_USER_ONLY)
spr_write_tbl(DisasContext * ctx,int sprn,int gprn)724 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
725 {
726 if (!gen_serialize_core_lpar(ctx)) {
727 return;
728 }
729
730 translator_io_start(&ctx->base);
731 gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]);
732 }
733
spr_write_tbu(DisasContext * ctx,int sprn,int gprn)734 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
735 {
736 if (!gen_serialize_core_lpar(ctx)) {
737 return;
738 }
739
740 translator_io_start(&ctx->base);
741 gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]);
742 }
743
spr_write_atbl(DisasContext * ctx,int sprn,int gprn)744 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
745 {
746 gen_helper_store_atbl(tcg_env, cpu_gpr[gprn]);
747 }
748
spr_write_atbu(DisasContext * ctx,int sprn,int gprn)749 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
750 {
751 gen_helper_store_atbu(tcg_env, cpu_gpr[gprn]);
752 }
753
754 #if defined(TARGET_PPC64)
spr_read_purr(DisasContext * ctx,int gprn,int sprn)755 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
756 {
757 translator_io_start(&ctx->base);
758 gen_helper_load_purr(cpu_gpr[gprn], tcg_env);
759 }
760
spr_write_purr(DisasContext * ctx,int sprn,int gprn)761 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
762 {
763 if (!gen_serialize_core_lpar(ctx)) {
764 return;
765 }
766 translator_io_start(&ctx->base);
767 gen_helper_store_purr(tcg_env, cpu_gpr[gprn]);
768 }
769
770 /* HDECR */
spr_read_hdecr(DisasContext * ctx,int gprn,int sprn)771 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
772 {
773 translator_io_start(&ctx->base);
774 gen_helper_load_hdecr(cpu_gpr[gprn], tcg_env);
775 }
776
spr_write_hdecr(DisasContext * ctx,int sprn,int gprn)777 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
778 {
779 if (!gen_serialize_core_lpar(ctx)) {
780 return;
781 }
782 translator_io_start(&ctx->base);
783 gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]);
784 }
785
spr_read_vtb(DisasContext * ctx,int gprn,int sprn)786 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
787 {
788 translator_io_start(&ctx->base);
789 gen_helper_load_vtb(cpu_gpr[gprn], tcg_env);
790 }
791
spr_write_vtb(DisasContext * ctx,int sprn,int gprn)792 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
793 {
794 if (!gen_serialize_core_lpar(ctx)) {
795 return;
796 }
797 translator_io_start(&ctx->base);
798 gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]);
799 }
800
spr_write_tbu40(DisasContext * ctx,int sprn,int gprn)801 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
802 {
803 if (!gen_serialize_core_lpar(ctx)) {
804 return;
805 }
806 translator_io_start(&ctx->base);
807 gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]);
808 }
809
810 #endif
811 #endif
812
813 #if !defined(CONFIG_USER_ONLY)
814 /* IBAT0U...IBAT0U */
815 /* IBAT0L...IBAT7L */
spr_read_ibat(DisasContext * ctx,int gprn,int sprn)816 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
817 {
818 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
819 offsetof(CPUPPCState,
820 IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
821 }
822
spr_read_ibat_h(DisasContext * ctx,int gprn,int sprn)823 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
824 {
825 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
826 offsetof(CPUPPCState,
827 IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
828 }
829
spr_write_ibatu(DisasContext * ctx,int sprn,int gprn)830 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
831 {
832 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
833 gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
834 }
835
spr_write_ibatu_h(DisasContext * ctx,int sprn,int gprn)836 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
837 {
838 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
839 gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
840 }
841
spr_write_ibatl(DisasContext * ctx,int sprn,int gprn)842 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
843 {
844 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
845 gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
846 }
847
spr_write_ibatl_h(DisasContext * ctx,int sprn,int gprn)848 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
849 {
850 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
851 gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
852 }
853
854 /* DBAT0U...DBAT7U */
855 /* DBAT0L...DBAT7L */
spr_read_dbat(DisasContext * ctx,int gprn,int sprn)856 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
857 {
858 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
859 offsetof(CPUPPCState,
860 DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
861 }
862
spr_read_dbat_h(DisasContext * ctx,int gprn,int sprn)863 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
864 {
865 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
866 offsetof(CPUPPCState,
867 DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
868 }
869
spr_write_dbatu(DisasContext * ctx,int sprn,int gprn)870 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
871 {
872 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
873 gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
874 }
875
spr_write_dbatu_h(DisasContext * ctx,int sprn,int gprn)876 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
877 {
878 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
879 gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
880 }
881
spr_write_dbatl(DisasContext * ctx,int sprn,int gprn)882 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
883 {
884 TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
885 gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
886 }
887
spr_write_dbatl_h(DisasContext * ctx,int sprn,int gprn)888 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
889 {
890 TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
891 gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
892 }
893
894 /* SDR1 */
spr_write_sdr1(DisasContext * ctx,int sprn,int gprn)895 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
896 {
897 gen_helper_store_sdr1(tcg_env, cpu_gpr[gprn]);
898 }
899
900 #if defined(TARGET_PPC64)
901 /* 64 bits PowerPC specific SPRs */
902 /* PIDR */
spr_write_pidr(DisasContext * ctx,int sprn,int gprn)903 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
904 {
905 gen_helper_store_pidr(tcg_env, cpu_gpr[gprn]);
906 }
907
spr_write_lpidr(DisasContext * ctx,int sprn,int gprn)908 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
909 {
910 gen_helper_store_lpidr(tcg_env, cpu_gpr[gprn]);
911 }
912
spr_read_hior(DisasContext * ctx,int gprn,int sprn)913 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
914 {
915 tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, offsetof(CPUPPCState, excp_prefix));
916 }
917
spr_write_hior(DisasContext * ctx,int sprn,int gprn)918 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
919 {
920 TCGv t0 = tcg_temp_new();
921 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
922 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
923 }
spr_write_ptcr(DisasContext * ctx,int sprn,int gprn)924 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
925 {
926 if (!gen_serialize_core(ctx)) {
927 return;
928 }
929
930 gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]);
931 }
932
spr_write_pcr(DisasContext * ctx,int sprn,int gprn)933 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
934 {
935 gen_helper_store_pcr(tcg_env, cpu_gpr[gprn]);
936 }
937
938 /* DPDES */
spr_read_dpdes(DisasContext * ctx,int gprn,int sprn)939 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
940 {
941 if (!gen_serialize_core_lpar(ctx)) {
942 return;
943 }
944
945 gen_helper_load_dpdes(cpu_gpr[gprn], tcg_env);
946 }
947
spr_write_dpdes(DisasContext * ctx,int sprn,int gprn)948 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
949 {
950 if (!gen_serialize_core_lpar(ctx)) {
951 return;
952 }
953
954 gen_helper_store_dpdes(tcg_env, cpu_gpr[gprn]);
955 }
956 #endif
957 #endif
958
959 /* PowerPC 40x specific registers */
960 #if !defined(CONFIG_USER_ONLY)
spr_read_40x_pit(DisasContext * ctx,int gprn,int sprn)961 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
962 {
963 translator_io_start(&ctx->base);
964 gen_helper_load_40x_pit(cpu_gpr[gprn], tcg_env);
965 }
966
spr_write_40x_pit(DisasContext * ctx,int sprn,int gprn)967 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
968 {
969 translator_io_start(&ctx->base);
970 gen_helper_store_40x_pit(tcg_env, cpu_gpr[gprn]);
971 }
972
spr_write_40x_dbcr0(DisasContext * ctx,int sprn,int gprn)973 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
974 {
975 translator_io_start(&ctx->base);
976 gen_store_spr(sprn, cpu_gpr[gprn]);
977 gen_helper_store_40x_dbcr0(tcg_env, cpu_gpr[gprn]);
978 /* We must stop translation as we may have rebooted */
979 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
980 }
981
spr_write_40x_sler(DisasContext * ctx,int sprn,int gprn)982 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
983 {
984 translator_io_start(&ctx->base);
985 gen_helper_store_40x_sler(tcg_env, cpu_gpr[gprn]);
986 }
987
spr_write_40x_tcr(DisasContext * ctx,int sprn,int gprn)988 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
989 {
990 translator_io_start(&ctx->base);
991 gen_helper_store_40x_tcr(tcg_env, cpu_gpr[gprn]);
992 }
993
spr_write_40x_tsr(DisasContext * ctx,int sprn,int gprn)994 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
995 {
996 translator_io_start(&ctx->base);
997 gen_helper_store_40x_tsr(tcg_env, cpu_gpr[gprn]);
998 }
999
spr_write_40x_pid(DisasContext * ctx,int sprn,int gprn)1000 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
1001 {
1002 TCGv t0 = tcg_temp_new();
1003 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
1004 gen_helper_store_40x_pid(tcg_env, t0);
1005 }
1006
spr_write_booke_tcr(DisasContext * ctx,int sprn,int gprn)1007 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
1008 {
1009 translator_io_start(&ctx->base);
1010 gen_helper_store_booke_tcr(tcg_env, cpu_gpr[gprn]);
1011 }
1012
spr_write_booke_tsr(DisasContext * ctx,int sprn,int gprn)1013 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
1014 {
1015 translator_io_start(&ctx->base);
1016 gen_helper_store_booke_tsr(tcg_env, cpu_gpr[gprn]);
1017 }
1018 #endif
1019
1020 /* PIR */
1021 #if !defined(CONFIG_USER_ONLY)
spr_write_pir(DisasContext * ctx,int sprn,int gprn)1022 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
1023 {
1024 TCGv t0 = tcg_temp_new();
1025 tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
1026 gen_store_spr(SPR_PIR, t0);
1027 }
1028 #endif
1029
1030 /* SPE specific registers */
spr_read_spefscr(DisasContext * ctx,int gprn,int sprn)1031 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
1032 {
1033 TCGv_i32 t0 = tcg_temp_new_i32();
1034 tcg_gen_ld_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
1035 tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
1036 }
1037
spr_write_spefscr(DisasContext * ctx,int sprn,int gprn)1038 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
1039 {
1040 TCGv_i32 t0 = tcg_temp_new_i32();
1041 tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
1042 tcg_gen_st_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
1043 }
1044
1045 #if !defined(CONFIG_USER_ONLY)
1046 /* Callback used to write the exception vector base */
spr_write_excp_prefix(DisasContext * ctx,int sprn,int gprn)1047 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
1048 {
1049 TCGv t0 = tcg_temp_new();
1050 tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivpr_mask));
1051 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
1052 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
1053 gen_store_spr(sprn, t0);
1054 }
1055
spr_write_excp_vector(DisasContext * ctx,int sprn,int gprn)1056 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
1057 {
1058 int sprn_offs;
1059
1060 if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
1061 sprn_offs = sprn - SPR_BOOKE_IVOR0;
1062 } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
1063 sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
1064 } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
1065 sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
1066 } else {
1067 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
1068 " vector 0x%03x\n", sprn);
1069 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1070 return;
1071 }
1072
1073 TCGv t0 = tcg_temp_new();
1074 tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivor_mask));
1075 tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
1076 tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
1077 gen_store_spr(sprn, t0);
1078 }
1079 #endif
1080
1081 #ifdef TARGET_PPC64
1082 #ifndef CONFIG_USER_ONLY
spr_write_amr(DisasContext * ctx,int sprn,int gprn)1083 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
1084 {
1085 TCGv t0 = tcg_temp_new();
1086 TCGv t1 = tcg_temp_new();
1087 TCGv t2 = tcg_temp_new();
1088
1089 /*
1090 * Note, the HV=1 PR=0 case is handled earlier by simply using
1091 * spr_write_generic for HV mode in the SPR table
1092 */
1093
1094 /* Build insertion mask into t1 based on context */
1095 if (ctx->pr) {
1096 gen_load_spr(t1, SPR_UAMOR);
1097 } else {
1098 gen_load_spr(t1, SPR_AMOR);
1099 }
1100
1101 /* Mask new bits into t2 */
1102 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1103
1104 /* Load AMR and clear new bits in t0 */
1105 gen_load_spr(t0, SPR_AMR);
1106 tcg_gen_andc_tl(t0, t0, t1);
1107
1108 /* Or'in new bits and write it out */
1109 tcg_gen_or_tl(t0, t0, t2);
1110 gen_store_spr(SPR_AMR, t0);
1111 spr_store_dump_spr(SPR_AMR);
1112 }
1113
spr_write_uamor(DisasContext * ctx,int sprn,int gprn)1114 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
1115 {
1116 TCGv t0 = tcg_temp_new();
1117 TCGv t1 = tcg_temp_new();
1118 TCGv t2 = tcg_temp_new();
1119
1120 /*
1121 * Note, the HV=1 case is handled earlier by simply using
1122 * spr_write_generic for HV mode in the SPR table
1123 */
1124
1125 /* Build insertion mask into t1 based on context */
1126 gen_load_spr(t1, SPR_AMOR);
1127
1128 /* Mask new bits into t2 */
1129 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1130
1131 /* Load AMR and clear new bits in t0 */
1132 gen_load_spr(t0, SPR_UAMOR);
1133 tcg_gen_andc_tl(t0, t0, t1);
1134
1135 /* Or'in new bits and write it out */
1136 tcg_gen_or_tl(t0, t0, t2);
1137 gen_store_spr(SPR_UAMOR, t0);
1138 spr_store_dump_spr(SPR_UAMOR);
1139 }
1140
spr_write_iamr(DisasContext * ctx,int sprn,int gprn)1141 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1142 {
1143 TCGv t0 = tcg_temp_new();
1144 TCGv t1 = tcg_temp_new();
1145 TCGv t2 = tcg_temp_new();
1146
1147 /*
1148 * Note, the HV=1 case is handled earlier by simply using
1149 * spr_write_generic for HV mode in the SPR table
1150 */
1151
1152 /* Build insertion mask into t1 based on context */
1153 gen_load_spr(t1, SPR_AMOR);
1154
1155 /* Mask new bits into t2 */
1156 tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1157
1158 /* Load AMR and clear new bits in t0 */
1159 gen_load_spr(t0, SPR_IAMR);
1160 tcg_gen_andc_tl(t0, t0, t1);
1161
1162 /* Or'in new bits and write it out */
1163 tcg_gen_or_tl(t0, t0, t2);
1164 gen_store_spr(SPR_IAMR, t0);
1165 spr_store_dump_spr(SPR_IAMR);
1166 }
1167 #endif
1168 #endif
1169
1170 #ifndef CONFIG_USER_ONLY
spr_read_thrm(DisasContext * ctx,int gprn,int sprn)1171 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1172 {
1173 gen_helper_fixup_thrm(tcg_env);
1174 gen_load_spr(cpu_gpr[gprn], sprn);
1175 spr_load_dump_spr(sprn);
1176 }
1177 #endif /* !CONFIG_USER_ONLY */
1178
1179 #if !defined(CONFIG_USER_ONLY)
spr_write_e500_l1csr0(DisasContext * ctx,int sprn,int gprn)1180 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1181 {
1182 TCGv t0 = tcg_temp_new();
1183
1184 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1185 gen_store_spr(sprn, t0);
1186 }
1187
spr_write_e500_l1csr1(DisasContext * ctx,int sprn,int gprn)1188 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1189 {
1190 TCGv t0 = tcg_temp_new();
1191
1192 tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1193 gen_store_spr(sprn, t0);
1194 }
1195
spr_write_e500_l2csr0(DisasContext * ctx,int sprn,int gprn)1196 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1197 {
1198 TCGv t0 = tcg_temp_new();
1199
1200 tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1201 ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1202 gen_store_spr(sprn, t0);
1203 }
1204
spr_write_booke206_mmucsr0(DisasContext * ctx,int sprn,int gprn)1205 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1206 {
1207 gen_helper_booke206_tlbflush(tcg_env, cpu_gpr[gprn]);
1208 }
1209
spr_write_booke_pid(DisasContext * ctx,int sprn,int gprn)1210 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1211 {
1212 TCGv_i32 t0 = tcg_constant_i32(sprn);
1213 gen_helper_booke_setpid(tcg_env, t0, cpu_gpr[gprn]);
1214 }
1215
spr_write_eplc(DisasContext * ctx,int sprn,int gprn)1216 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1217 {
1218 gen_helper_booke_set_eplc(tcg_env, cpu_gpr[gprn]);
1219 }
1220
spr_write_epsc(DisasContext * ctx,int sprn,int gprn)1221 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1222 {
1223 gen_helper_booke_set_epsc(tcg_env, cpu_gpr[gprn]);
1224 }
1225
1226 #endif
1227
1228 #if !defined(CONFIG_USER_ONLY)
spr_write_mas73(DisasContext * ctx,int sprn,int gprn)1229 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1230 {
1231 TCGv val = tcg_temp_new();
1232 tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1233 gen_store_spr(SPR_BOOKE_MAS3, val);
1234 tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1235 gen_store_spr(SPR_BOOKE_MAS7, val);
1236 }
1237
spr_read_mas73(DisasContext * ctx,int gprn,int sprn)1238 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1239 {
1240 TCGv mas7 = tcg_temp_new();
1241 TCGv mas3 = tcg_temp_new();
1242 gen_load_spr(mas7, SPR_BOOKE_MAS7);
1243 tcg_gen_shli_tl(mas7, mas7, 32);
1244 gen_load_spr(mas3, SPR_BOOKE_MAS3);
1245 tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1246 }
1247
1248 #endif
1249
1250 #ifdef TARGET_PPC64
gen_fscr_facility_check(DisasContext * ctx,int facility_sprn,int bit,int sprn,int cause)1251 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1252 int bit, int sprn, int cause)
1253 {
1254 TCGv_i32 t1 = tcg_constant_i32(bit);
1255 TCGv_i32 t2 = tcg_constant_i32(sprn);
1256 TCGv_i32 t3 = tcg_constant_i32(cause);
1257
1258 gen_helper_fscr_facility_check(tcg_env, t1, t2, t3);
1259 }
1260
gen_msr_facility_check(DisasContext * ctx,int facility_sprn,int bit,int sprn,int cause)1261 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1262 int bit, int sprn, int cause)
1263 {
1264 TCGv_i32 t1 = tcg_constant_i32(bit);
1265 TCGv_i32 t2 = tcg_constant_i32(sprn);
1266 TCGv_i32 t3 = tcg_constant_i32(cause);
1267
1268 gen_helper_msr_facility_check(tcg_env, t1, t2, t3);
1269 }
1270
spr_read_prev_upper32(DisasContext * ctx,int gprn,int sprn)1271 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1272 {
1273 TCGv spr_up = tcg_temp_new();
1274 TCGv spr = tcg_temp_new();
1275
1276 gen_load_spr(spr, sprn - 1);
1277 tcg_gen_shri_tl(spr_up, spr, 32);
1278 tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1279 }
1280
spr_write_prev_upper32(DisasContext * ctx,int sprn,int gprn)1281 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1282 {
1283 TCGv spr = tcg_temp_new();
1284
1285 gen_load_spr(spr, sprn - 1);
1286 tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1287 gen_store_spr(sprn - 1, spr);
1288 }
1289
1290 #if !defined(CONFIG_USER_ONLY)
spr_write_hmer(DisasContext * ctx,int sprn,int gprn)1291 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1292 {
1293 TCGv hmer = tcg_temp_new();
1294
1295 gen_load_spr(hmer, sprn);
1296 tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1297 gen_store_spr(sprn, hmer);
1298 spr_store_dump_spr(sprn);
1299 }
1300
spr_read_tfmr(DisasContext * ctx,int gprn,int sprn)1301 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
1302 {
1303 /* Reading TFMR can cause it to be updated, so serialize threads here too */
1304 if (!gen_serialize_core(ctx)) {
1305 return;
1306 }
1307 gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env);
1308 }
1309
spr_write_tfmr(DisasContext * ctx,int sprn,int gprn)1310 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
1311 {
1312 if (!gen_serialize_core(ctx)) {
1313 return;
1314 }
1315 gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]);
1316 }
1317
spr_write_sprc(DisasContext * ctx,int sprn,int gprn)1318 void spr_write_sprc(DisasContext *ctx, int sprn, int gprn)
1319 {
1320 gen_helper_store_sprc(tcg_env, cpu_gpr[gprn]);
1321 }
1322
spr_read_sprd(DisasContext * ctx,int gprn,int sprn)1323 void spr_read_sprd(DisasContext *ctx, int gprn, int sprn)
1324 {
1325 gen_helper_load_sprd(cpu_gpr[gprn], tcg_env);
1326 }
1327
spr_write_sprd(DisasContext * ctx,int sprn,int gprn)1328 void spr_write_sprd(DisasContext *ctx, int sprn, int gprn)
1329 {
1330 if (!gen_serialize_core(ctx)) {
1331 return;
1332 }
1333 gen_helper_store_sprd(tcg_env, cpu_gpr[gprn]);
1334 }
1335
spr_write_lpcr(DisasContext * ctx,int sprn,int gprn)1336 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1337 {
1338 translator_io_start(&ctx->base);
1339 gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
1340 }
1341
spr_read_pmsr(DisasContext * ctx,int gprn,int sprn)1342 void spr_read_pmsr(DisasContext *ctx, int gprn, int sprn)
1343 {
1344 translator_io_start(&ctx->base);
1345 gen_helper_load_pmsr(cpu_gpr[gprn], tcg_env);
1346 }
1347
spr_write_pmcr(DisasContext * ctx,int sprn,int gprn)1348 void spr_write_pmcr(DisasContext *ctx, int sprn, int gprn)
1349 {
1350 if (!gen_serialize_core_lpar(ctx)) {
1351 return;
1352 }
1353 translator_io_start(&ctx->base);
1354 gen_helper_store_pmcr(tcg_env, cpu_gpr[gprn]);
1355 }
1356
1357 #endif /* !defined(CONFIG_USER_ONLY) */
1358
spr_read_tar(DisasContext * ctx,int gprn,int sprn)1359 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1360 {
1361 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1362 spr_read_generic(ctx, gprn, sprn);
1363 }
1364
spr_write_tar(DisasContext * ctx,int sprn,int gprn)1365 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1366 {
1367 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1368 spr_write_generic(ctx, sprn, gprn);
1369 }
1370
spr_read_tm(DisasContext * ctx,int gprn,int sprn)1371 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1372 {
1373 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1374 spr_read_generic(ctx, gprn, sprn);
1375 }
1376
spr_write_tm(DisasContext * ctx,int sprn,int gprn)1377 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1378 {
1379 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1380 spr_write_generic(ctx, sprn, gprn);
1381 }
1382
spr_read_tm_upper32(DisasContext * ctx,int gprn,int sprn)1383 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1384 {
1385 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1386 spr_read_prev_upper32(ctx, gprn, sprn);
1387 }
1388
spr_write_tm_upper32(DisasContext * ctx,int sprn,int gprn)1389 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1390 {
1391 gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1392 spr_write_prev_upper32(ctx, sprn, gprn);
1393 }
1394
spr_read_ebb(DisasContext * ctx,int gprn,int sprn)1395 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1396 {
1397 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1398 spr_read_generic(ctx, gprn, sprn);
1399 }
1400
spr_write_ebb(DisasContext * ctx,int sprn,int gprn)1401 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1402 {
1403 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1404 spr_write_generic(ctx, sprn, gprn);
1405 }
1406
spr_read_ebb_upper32(DisasContext * ctx,int gprn,int sprn)1407 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1408 {
1409 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1410 spr_read_prev_upper32(ctx, gprn, sprn);
1411 }
1412
spr_write_ebb_upper32(DisasContext * ctx,int sprn,int gprn)1413 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1414 {
1415 gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1416 spr_write_prev_upper32(ctx, sprn, gprn);
1417 }
1418
spr_read_dexcr_ureg(DisasContext * ctx,int gprn,int sprn)1419 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1420 {
1421 TCGv t0 = tcg_temp_new();
1422
1423 /*
1424 * Access to the (H)DEXCR in problem state is done using separated
1425 * SPR indexes which are 16 below the SPR indexes which have full
1426 * access to the (H)DEXCR in privileged state. Problem state can
1427 * only read bits 32:63, bits 0:31 return 0.
1428 *
1429 * See section 9.3.1-9.3.2 of PowerISA v3.1B
1430 */
1431
1432 gen_load_spr(t0, sprn + 16);
1433 tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1434 }
1435
1436 /* The PPR32 SPR accesses the upper 32-bits of PPR */
spr_read_ppr32(DisasContext * ctx,int gprn,int sprn)1437 void spr_read_ppr32(DisasContext *ctx, int gprn, int sprn)
1438 {
1439 gen_load_spr(cpu_gpr[gprn], SPR_PPR);
1440 tcg_gen_shri_tl(cpu_gpr[gprn], cpu_gpr[gprn], 32);
1441 spr_load_dump_spr(SPR_PPR);
1442 }
1443
spr_write_ppr32(DisasContext * ctx,int sprn,int gprn)1444 void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn)
1445 {
1446 TCGv t0 = tcg_temp_new();
1447
1448 /*
1449 * Don't clobber the low 32-bits of the PPR. These are all reserved bits
1450 * but TCG does implement them, so it would be surprising to zero them
1451 * here. "Priority nops" are similarly careful not to clobber reserved
1452 * bits.
1453 */
1454 gen_load_spr(t0, SPR_PPR);
1455 tcg_gen_deposit_tl(t0, t0, cpu_gpr[gprn], 32, 32);
1456 gen_store_spr(SPR_PPR, t0);
1457 spr_store_dump_spr(SPR_PPR);
1458 }
1459 #endif
1460
1461 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
1462 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1463
1464 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
1465 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1466
1467 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
1468 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1469
1470 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
1471 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1472
1473 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
1474 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1475
1476 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1477 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1478
1479 typedef struct opcode_t {
1480 unsigned char opc1, opc2, opc3, opc4;
1481 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1482 unsigned char pad[4];
1483 #endif
1484 opc_handler_t handler;
1485 const char *oname;
1486 } opcode_t;
1487
gen_priv_opc(DisasContext * ctx)1488 static void gen_priv_opc(DisasContext *ctx)
1489 {
1490 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1491 }
1492
1493 /* Helpers for priv. check */
1494 #define GEN_PRIV(CTX) \
1495 do { \
1496 gen_priv_opc(CTX); return; \
1497 } while (0)
1498
1499 #if defined(CONFIG_USER_ONLY)
1500 #define CHK_HV(CTX) GEN_PRIV(CTX)
1501 #define CHK_SV(CTX) GEN_PRIV(CTX)
1502 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1503 #else
1504 #define CHK_HV(CTX) \
1505 do { \
1506 if (unlikely(ctx->pr || !ctx->hv)) {\
1507 GEN_PRIV(CTX); \
1508 } \
1509 } while (0)
1510 #define CHK_SV(CTX) \
1511 do { \
1512 if (unlikely(ctx->pr)) { \
1513 GEN_PRIV(CTX); \
1514 } \
1515 } while (0)
1516 #define CHK_HVRM(CTX) \
1517 do { \
1518 if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1519 GEN_PRIV(CTX); \
1520 } \
1521 } while (0)
1522 #endif
1523
1524 #define CHK_NONE(CTX)
1525
1526 /*****************************************************************************/
1527 /* PowerPC instructions table */
1528
1529 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
1530 { \
1531 .opc1 = op1, \
1532 .opc2 = op2, \
1533 .opc3 = op3, \
1534 .opc4 = 0xff, \
1535 .handler = { \
1536 .inval1 = invl, \
1537 .type = _typ, \
1538 .type2 = _typ2, \
1539 .handler = &gen_##name, \
1540 }, \
1541 .oname = stringify(name), \
1542 }
1543 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
1544 { \
1545 .opc1 = op1, \
1546 .opc2 = op2, \
1547 .opc3 = op3, \
1548 .opc4 = 0xff, \
1549 .handler = { \
1550 .inval1 = invl1, \
1551 .inval2 = invl2, \
1552 .type = _typ, \
1553 .type2 = _typ2, \
1554 .handler = &gen_##name, \
1555 }, \
1556 .oname = stringify(name), \
1557 }
1558 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
1559 { \
1560 .opc1 = op1, \
1561 .opc2 = op2, \
1562 .opc3 = op3, \
1563 .opc4 = 0xff, \
1564 .handler = { \
1565 .inval1 = invl, \
1566 .type = _typ, \
1567 .type2 = _typ2, \
1568 .handler = &gen_##name, \
1569 }, \
1570 .oname = onam, \
1571 }
1572 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
1573 { \
1574 .opc1 = op1, \
1575 .opc2 = op2, \
1576 .opc3 = op3, \
1577 .opc4 = op4, \
1578 .handler = { \
1579 .inval1 = invl, \
1580 .type = _typ, \
1581 .type2 = _typ2, \
1582 .handler = &gen_##name, \
1583 }, \
1584 .oname = stringify(name), \
1585 }
1586 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
1587 { \
1588 .opc1 = op1, \
1589 .opc2 = op2, \
1590 .opc3 = op3, \
1591 .opc4 = op4, \
1592 .handler = { \
1593 .inval1 = invl, \
1594 .type = _typ, \
1595 .type2 = _typ2, \
1596 .handler = &gen_##name, \
1597 }, \
1598 .oname = onam, \
1599 }
1600
1601 /* Invalid instruction */
gen_invalid(DisasContext * ctx)1602 static void gen_invalid(DisasContext *ctx)
1603 {
1604 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1605 }
1606
1607 static opc_handler_t invalid_handler = {
1608 .inval1 = 0xFFFFFFFF,
1609 .inval2 = 0xFFFFFFFF,
1610 .type = PPC_NONE,
1611 .type2 = PPC_NONE,
1612 .handler = gen_invalid,
1613 };
1614
1615 /*** Integer comparison ***/
1616
gen_op_cmp(TCGv arg0,TCGv arg1,int s,int crf)1617 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1618 {
1619 TCGv t0 = tcg_temp_new();
1620 TCGv_i32 t = tcg_temp_new_i32();
1621
1622 tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1623 t0, arg0, arg1,
1624 tcg_constant_tl(CRF_LT), tcg_constant_tl(CRF_EQ));
1625 tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1626 t0, arg0, arg1, tcg_constant_tl(CRF_GT), t0);
1627
1628 tcg_gen_trunc_tl_i32(t, t0);
1629 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1630 tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1631 }
1632
gen_op_cmpi(TCGv arg0,target_ulong arg1,int s,int crf)1633 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1634 {
1635 TCGv t0 = tcg_constant_tl(arg1);
1636 gen_op_cmp(arg0, t0, s, crf);
1637 }
1638
gen_op_cmp32(TCGv arg0,TCGv arg1,int s,int crf)1639 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1640 {
1641 TCGv t0, t1;
1642 t0 = tcg_temp_new();
1643 t1 = tcg_temp_new();
1644 if (s) {
1645 tcg_gen_ext32s_tl(t0, arg0);
1646 tcg_gen_ext32s_tl(t1, arg1);
1647 } else {
1648 tcg_gen_ext32u_tl(t0, arg0);
1649 tcg_gen_ext32u_tl(t1, arg1);
1650 }
1651 gen_op_cmp(t0, t1, s, crf);
1652 }
1653
gen_op_cmpi32(TCGv arg0,target_ulong arg1,int s,int crf)1654 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1655 {
1656 TCGv t0 = tcg_constant_tl(arg1);
1657 gen_op_cmp32(arg0, t0, s, crf);
1658 }
1659
gen_set_Rc0(DisasContext * ctx,TCGv reg)1660 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1661 {
1662 if (NARROW_MODE(ctx)) {
1663 gen_op_cmpi32(reg, 0, 1, 0);
1664 } else {
1665 gen_op_cmpi(reg, 0, 1, 0);
1666 }
1667 }
1668
1669 /*** Integer arithmetic ***/
1670
gen_op_arith_compute_ov(DisasContext * ctx,TCGv arg0,TCGv arg1,TCGv arg2,int sub)1671 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1672 TCGv arg1, TCGv arg2, int sub)
1673 {
1674 TCGv t0 = tcg_temp_new();
1675
1676 tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1677 tcg_gen_xor_tl(t0, arg1, arg2);
1678 if (sub) {
1679 tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1680 } else {
1681 tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1682 }
1683 if (NARROW_MODE(ctx)) {
1684 tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1685 if (is_isa300(ctx)) {
1686 tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1687 }
1688 } else {
1689 if (is_isa300(ctx)) {
1690 tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1691 }
1692 tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1693 }
1694 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1695 }
1696
gen_op_arith_compute_ca32(DisasContext * ctx,TCGv res,TCGv arg0,TCGv arg1,TCGv ca32,int sub)1697 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1698 TCGv res, TCGv arg0, TCGv arg1,
1699 TCGv ca32, int sub)
1700 {
1701 TCGv t0;
1702
1703 if (!is_isa300(ctx)) {
1704 return;
1705 }
1706
1707 t0 = tcg_temp_new();
1708 if (sub) {
1709 tcg_gen_eqv_tl(t0, arg0, arg1);
1710 } else {
1711 tcg_gen_xor_tl(t0, arg0, arg1);
1712 }
1713 tcg_gen_xor_tl(t0, t0, res);
1714 tcg_gen_extract_tl(ca32, t0, 32, 1);
1715 }
1716
1717 /* Common add function */
gen_op_arith_add(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,TCGv ca,TCGv ca32,bool add_ca,bool compute_ca,bool compute_ov,bool compute_rc0)1718 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1719 TCGv arg2, TCGv ca, TCGv ca32,
1720 bool add_ca, bool compute_ca,
1721 bool compute_ov, bool compute_rc0)
1722 {
1723 TCGv t0 = ret;
1724
1725 if (compute_ca || compute_ov) {
1726 t0 = tcg_temp_new();
1727 }
1728
1729 if (compute_ca) {
1730 if (NARROW_MODE(ctx)) {
1731 /*
1732 * Caution: a non-obvious corner case of the spec is that
1733 * we must produce the *entire* 64-bit addition, but
1734 * produce the carry into bit 32.
1735 */
1736 TCGv t1 = tcg_temp_new();
1737 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
1738 tcg_gen_add_tl(t0, arg1, arg2);
1739 if (add_ca) {
1740 tcg_gen_add_tl(t0, t0, ca);
1741 }
1742 tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */
1743 tcg_gen_extract_tl(ca, ca, 32, 1);
1744 if (is_isa300(ctx)) {
1745 tcg_gen_mov_tl(ca32, ca);
1746 }
1747 } else {
1748 TCGv zero = tcg_constant_tl(0);
1749 if (add_ca) {
1750 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1751 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1752 } else {
1753 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1754 }
1755 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1756 }
1757 } else {
1758 tcg_gen_add_tl(t0, arg1, arg2);
1759 if (add_ca) {
1760 tcg_gen_add_tl(t0, t0, ca);
1761 }
1762 }
1763
1764 if (compute_ov) {
1765 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1766 }
1767 if (unlikely(compute_rc0)) {
1768 gen_set_Rc0(ctx, t0);
1769 }
1770
1771 if (t0 != ret) {
1772 tcg_gen_mov_tl(ret, t0);
1773 }
1774 }
1775
gen_op_arith_divw(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,bool sign,bool compute_ov,bool compute_rc0)1776 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret,
1777 TCGv arg1, TCGv arg2, bool sign,
1778 bool compute_ov, bool compute_rc0)
1779 {
1780 TCGv_i32 t0 = tcg_temp_new_i32();
1781 TCGv_i32 t1 = tcg_temp_new_i32();
1782 TCGv_i32 t2 = tcg_temp_new_i32();
1783 TCGv_i32 t3 = tcg_temp_new_i32();
1784
1785 tcg_gen_trunc_tl_i32(t0, arg1);
1786 tcg_gen_trunc_tl_i32(t1, arg2);
1787 if (sign) {
1788 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1789 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1790 tcg_gen_and_i32(t2, t2, t3);
1791 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1792 tcg_gen_or_i32(t2, t2, t3);
1793 tcg_gen_movi_i32(t3, 0);
1794 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1795 tcg_gen_div_i32(t3, t0, t1);
1796 tcg_gen_extu_i32_tl(ret, t3);
1797 } else {
1798 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1799 tcg_gen_movi_i32(t3, 0);
1800 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1801 tcg_gen_divu_i32(t3, t0, t1);
1802 tcg_gen_extu_i32_tl(ret, t3);
1803 }
1804 if (compute_ov) {
1805 tcg_gen_extu_i32_tl(cpu_ov, t2);
1806 if (is_isa300(ctx)) {
1807 tcg_gen_extu_i32_tl(cpu_ov32, t2);
1808 }
1809 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1810 }
1811
1812 if (unlikely(compute_rc0)) {
1813 gen_set_Rc0(ctx, ret);
1814 }
1815 }
1816
1817 #if defined(TARGET_PPC64)
gen_op_arith_divd(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,bool sign,bool compute_ov,bool compute_rc0)1818 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret,
1819 TCGv arg1, TCGv arg2, bool sign,
1820 bool compute_ov, bool compute_rc0)
1821 {
1822 TCGv_i64 t0 = tcg_temp_new_i64();
1823 TCGv_i64 t1 = tcg_temp_new_i64();
1824 TCGv_i64 t2 = tcg_temp_new_i64();
1825 TCGv_i64 t3 = tcg_temp_new_i64();
1826
1827 tcg_gen_mov_i64(t0, arg1);
1828 tcg_gen_mov_i64(t1, arg2);
1829 if (sign) {
1830 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1831 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1832 tcg_gen_and_i64(t2, t2, t3);
1833 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1834 tcg_gen_or_i64(t2, t2, t3);
1835 tcg_gen_movi_i64(t3, 0);
1836 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1837 tcg_gen_div_i64(ret, t0, t1);
1838 } else {
1839 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1840 tcg_gen_movi_i64(t3, 0);
1841 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1842 tcg_gen_divu_i64(ret, t0, t1);
1843 }
1844 if (compute_ov) {
1845 tcg_gen_mov_tl(cpu_ov, t2);
1846 if (is_isa300(ctx)) {
1847 tcg_gen_mov_tl(cpu_ov32, t2);
1848 }
1849 tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1850 }
1851
1852 if (unlikely(compute_rc0)) {
1853 gen_set_Rc0(ctx, ret);
1854 }
1855 }
1856 #endif
1857
gen_op_arith_modw(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,int sign)1858 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1859 TCGv arg2, int sign)
1860 {
1861 TCGv_i32 t0 = tcg_temp_new_i32();
1862 TCGv_i32 t1 = tcg_temp_new_i32();
1863
1864 tcg_gen_trunc_tl_i32(t0, arg1);
1865 tcg_gen_trunc_tl_i32(t1, arg2);
1866 if (sign) {
1867 TCGv_i32 t2 = tcg_temp_new_i32();
1868 TCGv_i32 t3 = tcg_temp_new_i32();
1869 tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1870 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1871 tcg_gen_and_i32(t2, t2, t3);
1872 tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1873 tcg_gen_or_i32(t2, t2, t3);
1874 tcg_gen_movi_i32(t3, 0);
1875 tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1876 tcg_gen_rem_i32(t3, t0, t1);
1877 tcg_gen_ext_i32_tl(ret, t3);
1878 } else {
1879 TCGv_i32 t2 = tcg_constant_i32(1);
1880 TCGv_i32 t3 = tcg_constant_i32(0);
1881 tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1882 tcg_gen_remu_i32(t0, t0, t1);
1883 tcg_gen_extu_i32_tl(ret, t0);
1884 }
1885 }
1886
1887 #if defined(TARGET_PPC64)
gen_op_arith_modd(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,int sign)1888 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1889 TCGv arg2, int sign)
1890 {
1891 TCGv_i64 t0 = tcg_temp_new_i64();
1892 TCGv_i64 t1 = tcg_temp_new_i64();
1893
1894 tcg_gen_mov_i64(t0, arg1);
1895 tcg_gen_mov_i64(t1, arg2);
1896 if (sign) {
1897 TCGv_i64 t2 = tcg_temp_new_i64();
1898 TCGv_i64 t3 = tcg_temp_new_i64();
1899 tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1900 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1901 tcg_gen_and_i64(t2, t2, t3);
1902 tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1903 tcg_gen_or_i64(t2, t2, t3);
1904 tcg_gen_movi_i64(t3, 0);
1905 tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1906 tcg_gen_rem_i64(ret, t0, t1);
1907 } else {
1908 TCGv_i64 t2 = tcg_constant_i64(1);
1909 TCGv_i64 t3 = tcg_constant_i64(0);
1910 tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1911 tcg_gen_remu_i64(ret, t0, t1);
1912 }
1913 }
1914 #endif
1915
1916 /* Common subf function */
gen_op_arith_subf(DisasContext * ctx,TCGv ret,TCGv arg1,TCGv arg2,bool add_ca,bool compute_ca,bool compute_ov,bool compute_rc0)1917 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
1918 TCGv arg2, bool add_ca, bool compute_ca,
1919 bool compute_ov, bool compute_rc0)
1920 {
1921 TCGv t0 = ret;
1922
1923 if (compute_ca || compute_ov) {
1924 t0 = tcg_temp_new();
1925 }
1926
1927 if (compute_ca) {
1928 /* dest = ~arg1 + arg2 [+ ca]. */
1929 if (NARROW_MODE(ctx)) {
1930 /*
1931 * Caution: a non-obvious corner case of the spec is that
1932 * we must produce the *entire* 64-bit addition, but
1933 * produce the carry into bit 32.
1934 */
1935 TCGv inv1 = tcg_temp_new();
1936 TCGv t1 = tcg_temp_new();
1937 tcg_gen_not_tl(inv1, arg1);
1938 if (add_ca) {
1939 tcg_gen_add_tl(t0, arg2, cpu_ca);
1940 } else {
1941 tcg_gen_addi_tl(t0, arg2, 1);
1942 }
1943 tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
1944 tcg_gen_add_tl(t0, t0, inv1);
1945 tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
1946 tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
1947 if (is_isa300(ctx)) {
1948 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
1949 }
1950 } else if (add_ca) {
1951 TCGv zero, inv1 = tcg_temp_new();
1952 tcg_gen_not_tl(inv1, arg1);
1953 zero = tcg_constant_tl(0);
1954 tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
1955 tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
1956 gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
1957 } else {
1958 tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
1959 tcg_gen_sub_tl(t0, arg2, arg1);
1960 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
1961 }
1962 } else if (add_ca) {
1963 /*
1964 * Since we're ignoring carry-out, we can simplify the
1965 * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
1966 */
1967 tcg_gen_sub_tl(t0, arg2, arg1);
1968 tcg_gen_add_tl(t0, t0, cpu_ca);
1969 tcg_gen_subi_tl(t0, t0, 1);
1970 } else {
1971 tcg_gen_sub_tl(t0, arg2, arg1);
1972 }
1973
1974 if (compute_ov) {
1975 gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
1976 }
1977 if (unlikely(compute_rc0)) {
1978 gen_set_Rc0(ctx, t0);
1979 }
1980
1981 if (t0 != ret) {
1982 tcg_gen_mov_tl(ret, t0);
1983 }
1984 }
1985
1986 /*** Integer logical ***/
1987
1988 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
gen_pause(DisasContext * ctx)1989 static void gen_pause(DisasContext *ctx)
1990 {
1991 TCGv_i32 t0 = tcg_constant_i32(0);
1992 tcg_gen_st_i32(t0, tcg_env,
1993 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
1994
1995 /* Stop translation, this gives other CPUs a chance to run */
1996 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
1997 }
1998 #endif /* defined(TARGET_PPC64) */
1999
2000 /*** Integer rotate ***/
2001
2002 /* rlwimi & rlwimi. */
gen_rlwimi(DisasContext * ctx)2003 static void gen_rlwimi(DisasContext *ctx)
2004 {
2005 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2006 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2007 uint32_t sh = SH(ctx->opcode);
2008 uint32_t mb = MB(ctx->opcode);
2009 uint32_t me = ME(ctx->opcode);
2010
2011 if (sh == (31 - me) && mb <= me) {
2012 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2013 } else {
2014 target_ulong mask;
2015 bool mask_in_32b = true;
2016 TCGv t1;
2017
2018 #if defined(TARGET_PPC64)
2019 mb += 32;
2020 me += 32;
2021 #endif
2022 mask = MASK(mb, me);
2023
2024 #if defined(TARGET_PPC64)
2025 if (mask > 0xffffffffu) {
2026 mask_in_32b = false;
2027 }
2028 #endif
2029 t1 = tcg_temp_new();
2030 if (mask_in_32b) {
2031 TCGv_i32 t0 = tcg_temp_new_i32();
2032 tcg_gen_trunc_tl_i32(t0, t_rs);
2033 tcg_gen_rotli_i32(t0, t0, sh);
2034 tcg_gen_extu_i32_tl(t1, t0);
2035 } else {
2036 #if defined(TARGET_PPC64)
2037 tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2038 tcg_gen_rotli_i64(t1, t1, sh);
2039 #else
2040 g_assert_not_reached();
2041 #endif
2042 }
2043
2044 tcg_gen_andi_tl(t1, t1, mask);
2045 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2046 tcg_gen_or_tl(t_ra, t_ra, t1);
2047 }
2048 if (unlikely(Rc(ctx->opcode) != 0)) {
2049 gen_set_Rc0(ctx, t_ra);
2050 }
2051 }
2052
2053 /* rlwinm & rlwinm. */
gen_rlwinm(DisasContext * ctx)2054 static void gen_rlwinm(DisasContext *ctx)
2055 {
2056 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2057 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2058 int sh = SH(ctx->opcode);
2059 int mb = MB(ctx->opcode);
2060 int me = ME(ctx->opcode);
2061 int len = me - mb + 1;
2062 int rsh = (32 - sh) & 31;
2063
2064 if (sh != 0 && len > 0 && me == (31 - sh)) {
2065 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2066 } else if (me == 31 && rsh + len <= 32) {
2067 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2068 } else {
2069 target_ulong mask;
2070 bool mask_in_32b = true;
2071 #if defined(TARGET_PPC64)
2072 mb += 32;
2073 me += 32;
2074 #endif
2075 mask = MASK(mb, me);
2076 #if defined(TARGET_PPC64)
2077 if (mask > 0xffffffffu) {
2078 mask_in_32b = false;
2079 }
2080 #endif
2081 if (mask_in_32b) {
2082 if (sh == 0) {
2083 tcg_gen_andi_tl(t_ra, t_rs, mask);
2084 } else {
2085 TCGv_i32 t0 = tcg_temp_new_i32();
2086 tcg_gen_trunc_tl_i32(t0, t_rs);
2087 tcg_gen_rotli_i32(t0, t0, sh);
2088 tcg_gen_andi_i32(t0, t0, mask);
2089 tcg_gen_extu_i32_tl(t_ra, t0);
2090 }
2091 } else {
2092 #if defined(TARGET_PPC64)
2093 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2094 tcg_gen_rotli_i64(t_ra, t_ra, sh);
2095 tcg_gen_andi_i64(t_ra, t_ra, mask);
2096 #else
2097 g_assert_not_reached();
2098 #endif
2099 }
2100 }
2101 if (unlikely(Rc(ctx->opcode) != 0)) {
2102 gen_set_Rc0(ctx, t_ra);
2103 }
2104 }
2105
2106 /* rlwnm & rlwnm. */
gen_rlwnm(DisasContext * ctx)2107 static void gen_rlwnm(DisasContext *ctx)
2108 {
2109 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2110 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2111 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2112 uint32_t mb = MB(ctx->opcode);
2113 uint32_t me = ME(ctx->opcode);
2114 target_ulong mask;
2115 bool mask_in_32b = true;
2116
2117 #if defined(TARGET_PPC64)
2118 mb += 32;
2119 me += 32;
2120 #endif
2121 mask = MASK(mb, me);
2122
2123 #if defined(TARGET_PPC64)
2124 if (mask > 0xffffffffu) {
2125 mask_in_32b = false;
2126 }
2127 #endif
2128 if (mask_in_32b) {
2129 TCGv_i32 t0 = tcg_temp_new_i32();
2130 TCGv_i32 t1 = tcg_temp_new_i32();
2131 tcg_gen_trunc_tl_i32(t0, t_rb);
2132 tcg_gen_trunc_tl_i32(t1, t_rs);
2133 tcg_gen_andi_i32(t0, t0, 0x1f);
2134 tcg_gen_rotl_i32(t1, t1, t0);
2135 tcg_gen_extu_i32_tl(t_ra, t1);
2136 } else {
2137 #if defined(TARGET_PPC64)
2138 TCGv_i64 t0 = tcg_temp_new_i64();
2139 tcg_gen_andi_i64(t0, t_rb, 0x1f);
2140 tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2141 tcg_gen_rotl_i64(t_ra, t_ra, t0);
2142 #else
2143 g_assert_not_reached();
2144 #endif
2145 }
2146
2147 tcg_gen_andi_tl(t_ra, t_ra, mask);
2148
2149 if (unlikely(Rc(ctx->opcode) != 0)) {
2150 gen_set_Rc0(ctx, t_ra);
2151 }
2152 }
2153
2154 #if defined(TARGET_PPC64)
2155 #define GEN_PPC64_R2(name, opc1, opc2) \
2156 static void glue(gen_, name##0)(DisasContext *ctx) \
2157 { \
2158 gen_##name(ctx, 0); \
2159 } \
2160 \
2161 static void glue(gen_, name##1)(DisasContext *ctx) \
2162 { \
2163 gen_##name(ctx, 1); \
2164 }
2165 #define GEN_PPC64_R4(name, opc1, opc2) \
2166 static void glue(gen_, name##0)(DisasContext *ctx) \
2167 { \
2168 gen_##name(ctx, 0, 0); \
2169 } \
2170 \
2171 static void glue(gen_, name##1)(DisasContext *ctx) \
2172 { \
2173 gen_##name(ctx, 0, 1); \
2174 } \
2175 \
2176 static void glue(gen_, name##2)(DisasContext *ctx) \
2177 { \
2178 gen_##name(ctx, 1, 0); \
2179 } \
2180 \
2181 static void glue(gen_, name##3)(DisasContext *ctx) \
2182 { \
2183 gen_##name(ctx, 1, 1); \
2184 }
2185
gen_rldinm(DisasContext * ctx,int mb,int me,int sh)2186 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2187 {
2188 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2189 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2190 int len = me - mb + 1;
2191 int rsh = (64 - sh) & 63;
2192
2193 if (sh != 0 && len > 0 && me == (63 - sh)) {
2194 tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2195 } else if (me == 63 && rsh + len <= 64) {
2196 tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2197 } else {
2198 tcg_gen_rotli_tl(t_ra, t_rs, sh);
2199 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2200 }
2201 if (unlikely(Rc(ctx->opcode) != 0)) {
2202 gen_set_Rc0(ctx, t_ra);
2203 }
2204 }
2205
2206 /* rldicl - rldicl. */
gen_rldicl(DisasContext * ctx,int mbn,int shn)2207 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2208 {
2209 uint32_t sh, mb;
2210
2211 sh = SH(ctx->opcode) | (shn << 5);
2212 mb = MB(ctx->opcode) | (mbn << 5);
2213 gen_rldinm(ctx, mb, 63, sh);
2214 }
2215 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2216
2217 /* rldicr - rldicr. */
gen_rldicr(DisasContext * ctx,int men,int shn)2218 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2219 {
2220 uint32_t sh, me;
2221
2222 sh = SH(ctx->opcode) | (shn << 5);
2223 me = MB(ctx->opcode) | (men << 5);
2224 gen_rldinm(ctx, 0, me, sh);
2225 }
2226 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2227
2228 /* rldic - rldic. */
gen_rldic(DisasContext * ctx,int mbn,int shn)2229 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2230 {
2231 uint32_t sh, mb;
2232
2233 sh = SH(ctx->opcode) | (shn << 5);
2234 mb = MB(ctx->opcode) | (mbn << 5);
2235 gen_rldinm(ctx, mb, 63 - sh, sh);
2236 }
2237 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2238
gen_rldnm(DisasContext * ctx,int mb,int me)2239 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2240 {
2241 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2242 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2243 TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2244 TCGv t0;
2245
2246 t0 = tcg_temp_new();
2247 tcg_gen_andi_tl(t0, t_rb, 0x3f);
2248 tcg_gen_rotl_tl(t_ra, t_rs, t0);
2249
2250 tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2251 if (unlikely(Rc(ctx->opcode) != 0)) {
2252 gen_set_Rc0(ctx, t_ra);
2253 }
2254 }
2255
2256 /* rldcl - rldcl. */
gen_rldcl(DisasContext * ctx,int mbn)2257 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2258 {
2259 uint32_t mb;
2260
2261 mb = MB(ctx->opcode) | (mbn << 5);
2262 gen_rldnm(ctx, mb, 63);
2263 }
2264 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2265
2266 /* rldcr - rldcr. */
gen_rldcr(DisasContext * ctx,int men)2267 static inline void gen_rldcr(DisasContext *ctx, int men)
2268 {
2269 uint32_t me;
2270
2271 me = MB(ctx->opcode) | (men << 5);
2272 gen_rldnm(ctx, 0, me);
2273 }
2274 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2275
2276 /* rldimi - rldimi. */
gen_rldimi(DisasContext * ctx,int mbn,int shn)2277 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2278 {
2279 TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2280 TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2281 uint32_t sh = SH(ctx->opcode) | (shn << 5);
2282 uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2283 uint32_t me = 63 - sh;
2284
2285 if (mb <= me) {
2286 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2287 } else {
2288 target_ulong mask = MASK(mb, me);
2289 TCGv t1 = tcg_temp_new();
2290
2291 tcg_gen_rotli_tl(t1, t_rs, sh);
2292 tcg_gen_andi_tl(t1, t1, mask);
2293 tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2294 tcg_gen_or_tl(t_ra, t_ra, t1);
2295 }
2296 if (unlikely(Rc(ctx->opcode) != 0)) {
2297 gen_set_Rc0(ctx, t_ra);
2298 }
2299 }
2300 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2301 #endif
2302
2303 /*** Integer shift ***/
2304
2305 /* slw & slw. */
gen_slw(DisasContext * ctx)2306 static void gen_slw(DisasContext *ctx)
2307 {
2308 TCGv t0, t1;
2309
2310 t0 = tcg_temp_new();
2311 /* AND rS with a mask that is 0 when rB >= 0x20 */
2312 #if defined(TARGET_PPC64)
2313 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2314 tcg_gen_sari_tl(t0, t0, 0x3f);
2315 #else
2316 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2317 tcg_gen_sari_tl(t0, t0, 0x1f);
2318 #endif
2319 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2320 t1 = tcg_temp_new();
2321 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2322 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2323 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2324 if (unlikely(Rc(ctx->opcode) != 0)) {
2325 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2326 }
2327 }
2328
2329 /* sraw & sraw. */
gen_sraw(DisasContext * ctx)2330 static void gen_sraw(DisasContext *ctx)
2331 {
2332 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], tcg_env,
2333 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2334 if (unlikely(Rc(ctx->opcode) != 0)) {
2335 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2336 }
2337 }
2338
2339 /* srawi & srawi. */
gen_srawi(DisasContext * ctx)2340 static void gen_srawi(DisasContext *ctx)
2341 {
2342 int sh = SH(ctx->opcode);
2343 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2344 TCGv src = cpu_gpr[rS(ctx->opcode)];
2345 if (sh == 0) {
2346 tcg_gen_ext32s_tl(dst, src);
2347 tcg_gen_movi_tl(cpu_ca, 0);
2348 if (is_isa300(ctx)) {
2349 tcg_gen_movi_tl(cpu_ca32, 0);
2350 }
2351 } else {
2352 TCGv t0;
2353 tcg_gen_ext32s_tl(dst, src);
2354 tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2355 t0 = tcg_temp_new();
2356 tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2357 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2358 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2359 if (is_isa300(ctx)) {
2360 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2361 }
2362 tcg_gen_sari_tl(dst, dst, sh);
2363 }
2364 if (unlikely(Rc(ctx->opcode) != 0)) {
2365 gen_set_Rc0(ctx, dst);
2366 }
2367 }
2368
2369 /* srw & srw. */
gen_srw(DisasContext * ctx)2370 static void gen_srw(DisasContext *ctx)
2371 {
2372 TCGv t0, t1;
2373
2374 t0 = tcg_temp_new();
2375 /* AND rS with a mask that is 0 when rB >= 0x20 */
2376 #if defined(TARGET_PPC64)
2377 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2378 tcg_gen_sari_tl(t0, t0, 0x3f);
2379 #else
2380 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2381 tcg_gen_sari_tl(t0, t0, 0x1f);
2382 #endif
2383 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2384 tcg_gen_ext32u_tl(t0, t0);
2385 t1 = tcg_temp_new();
2386 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2387 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2388 if (unlikely(Rc(ctx->opcode) != 0)) {
2389 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2390 }
2391 }
2392
2393 #if defined(TARGET_PPC64)
2394 /* sld & sld. */
gen_sld(DisasContext * ctx)2395 static void gen_sld(DisasContext *ctx)
2396 {
2397 TCGv t0, t1;
2398
2399 t0 = tcg_temp_new();
2400 /* AND rS with a mask that is 0 when rB >= 0x40 */
2401 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2402 tcg_gen_sari_tl(t0, t0, 0x3f);
2403 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2404 t1 = tcg_temp_new();
2405 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2406 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2407 if (unlikely(Rc(ctx->opcode) != 0)) {
2408 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2409 }
2410 }
2411
2412 /* srad & srad. */
gen_srad(DisasContext * ctx)2413 static void gen_srad(DisasContext *ctx)
2414 {
2415 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], tcg_env,
2416 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2417 if (unlikely(Rc(ctx->opcode) != 0)) {
2418 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2419 }
2420 }
2421 /* sradi & sradi. */
gen_sradi(DisasContext * ctx,int n)2422 static inline void gen_sradi(DisasContext *ctx, int n)
2423 {
2424 int sh = SH(ctx->opcode) + (n << 5);
2425 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2426 TCGv src = cpu_gpr[rS(ctx->opcode)];
2427 if (sh == 0) {
2428 tcg_gen_mov_tl(dst, src);
2429 tcg_gen_movi_tl(cpu_ca, 0);
2430 if (is_isa300(ctx)) {
2431 tcg_gen_movi_tl(cpu_ca32, 0);
2432 }
2433 } else {
2434 TCGv t0;
2435 tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2436 t0 = tcg_temp_new();
2437 tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2438 tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2439 tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2440 if (is_isa300(ctx)) {
2441 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2442 }
2443 tcg_gen_sari_tl(dst, src, sh);
2444 }
2445 if (unlikely(Rc(ctx->opcode) != 0)) {
2446 gen_set_Rc0(ctx, dst);
2447 }
2448 }
2449
gen_sradi0(DisasContext * ctx)2450 static void gen_sradi0(DisasContext *ctx)
2451 {
2452 gen_sradi(ctx, 0);
2453 }
2454
gen_sradi1(DisasContext * ctx)2455 static void gen_sradi1(DisasContext *ctx)
2456 {
2457 gen_sradi(ctx, 1);
2458 }
2459
2460 /* extswsli & extswsli. */
gen_extswsli(DisasContext * ctx,int n)2461 static inline void gen_extswsli(DisasContext *ctx, int n)
2462 {
2463 int sh = SH(ctx->opcode) + (n << 5);
2464 TCGv dst = cpu_gpr[rA(ctx->opcode)];
2465 TCGv src = cpu_gpr[rS(ctx->opcode)];
2466
2467 tcg_gen_ext32s_tl(dst, src);
2468 tcg_gen_shli_tl(dst, dst, sh);
2469 if (unlikely(Rc(ctx->opcode) != 0)) {
2470 gen_set_Rc0(ctx, dst);
2471 }
2472 }
2473
gen_extswsli0(DisasContext * ctx)2474 static void gen_extswsli0(DisasContext *ctx)
2475 {
2476 gen_extswsli(ctx, 0);
2477 }
2478
gen_extswsli1(DisasContext * ctx)2479 static void gen_extswsli1(DisasContext *ctx)
2480 {
2481 gen_extswsli(ctx, 1);
2482 }
2483
2484 /* srd & srd. */
gen_srd(DisasContext * ctx)2485 static void gen_srd(DisasContext *ctx)
2486 {
2487 TCGv t0, t1;
2488
2489 t0 = tcg_temp_new();
2490 /* AND rS with a mask that is 0 when rB >= 0x40 */
2491 tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2492 tcg_gen_sari_tl(t0, t0, 0x3f);
2493 tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2494 t1 = tcg_temp_new();
2495 tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2496 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2497 if (unlikely(Rc(ctx->opcode) != 0)) {
2498 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2499 }
2500 }
2501 #endif
2502
2503 /*** Addressing modes ***/
2504 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
gen_addr_imm_index(DisasContext * ctx,TCGv EA,target_long maskl)2505 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
2506 target_long maskl)
2507 {
2508 target_long simm = SIMM(ctx->opcode);
2509
2510 simm &= ~maskl;
2511 if (rA(ctx->opcode) == 0) {
2512 if (NARROW_MODE(ctx)) {
2513 simm = (uint32_t)simm;
2514 }
2515 tcg_gen_movi_tl(EA, simm);
2516 } else if (likely(simm != 0)) {
2517 tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
2518 if (NARROW_MODE(ctx)) {
2519 tcg_gen_ext32u_tl(EA, EA);
2520 }
2521 } else {
2522 if (NARROW_MODE(ctx)) {
2523 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2524 } else {
2525 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2526 }
2527 }
2528 }
2529
gen_addr_reg_index(DisasContext * ctx,TCGv EA)2530 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
2531 {
2532 if (rA(ctx->opcode) == 0) {
2533 if (NARROW_MODE(ctx)) {
2534 tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2535 } else {
2536 tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
2537 }
2538 } else {
2539 tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2540 if (NARROW_MODE(ctx)) {
2541 tcg_gen_ext32u_tl(EA, EA);
2542 }
2543 }
2544 }
2545
gen_addr_register(DisasContext * ctx,TCGv EA)2546 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
2547 {
2548 if (rA(ctx->opcode) == 0) {
2549 tcg_gen_movi_tl(EA, 0);
2550 } else if (NARROW_MODE(ctx)) {
2551 tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2552 } else {
2553 tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
2554 }
2555 }
2556
gen_addr_add(DisasContext * ctx,TCGv ret,TCGv arg1,target_long val)2557 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
2558 target_long val)
2559 {
2560 tcg_gen_addi_tl(ret, arg1, val);
2561 if (NARROW_MODE(ctx)) {
2562 tcg_gen_ext32u_tl(ret, ret);
2563 }
2564 }
2565
gen_align_no_le(DisasContext * ctx)2566 static inline void gen_align_no_le(DisasContext *ctx)
2567 {
2568 gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
2569 (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
2570 }
2571
2572 /* EA <- {(ra == 0) ? 0 : GPR[ra]} + displ */
do_ea_calc(DisasContext * ctx,int ra,TCGv displ)2573 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
2574 {
2575 TCGv ea = tcg_temp_new();
2576 if (ra) {
2577 tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
2578 } else {
2579 tcg_gen_mov_tl(ea, displ);
2580 }
2581 if (NARROW_MODE(ctx)) {
2582 tcg_gen_ext32u_tl(ea, ea);
2583 }
2584 return ea;
2585 }
2586
2587 #if defined(TARGET_PPC64)
2588 /* EA <- (ra == 0) ? 0 : GPR[ra] */
do_ea_calc_ra(DisasContext * ctx,int ra)2589 static TCGv do_ea_calc_ra(DisasContext *ctx, int ra)
2590 {
2591 TCGv EA = tcg_temp_new();
2592 if (!ra) {
2593 tcg_gen_movi_tl(EA, 0);
2594 } else if (NARROW_MODE(ctx)) {
2595 tcg_gen_ext32u_tl(EA, cpu_gpr[ra]);
2596 } else {
2597 tcg_gen_mov_tl(EA, cpu_gpr[ra]);
2598 }
2599 return EA;
2600 }
2601 #endif
2602
2603 /*** Integer load ***/
2604 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
2605 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
2606
2607 #define GEN_QEMU_LOAD_TL(ldop, op) \
2608 static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
2609 TCGv val, \
2610 TCGv addr) \
2611 { \
2612 tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
2613 }
2614
2615 GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
2616 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
2617 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
2618 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
2619 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
2620
2621 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
2622 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
2623
2624 #define GEN_QEMU_LOAD_64(ldop, op) \
2625 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
2626 TCGv_i64 val, \
2627 TCGv addr) \
2628 { \
2629 tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
2630 }
2631
2632 GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
2633 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
2634 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
2635 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
2636 GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
2637
2638 #if defined(TARGET_PPC64)
2639 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
2640 #endif
2641
2642 #define GEN_QEMU_STORE_TL(stop, op) \
2643 static void glue(gen_qemu_, stop)(DisasContext *ctx, \
2644 TCGv val, \
2645 TCGv addr) \
2646 { \
2647 tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
2648 }
2649
2650 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
2651 GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
2652 #endif
2653 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
2654 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
2655
2656 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
2657 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
2658
2659 #define GEN_QEMU_STORE_64(stop, op) \
2660 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
2661 TCGv_i64 val, \
2662 TCGv addr) \
2663 { \
2664 tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
2665 }
2666
2667 GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
2668 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
2669 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
2670 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
2671
2672 #if defined(TARGET_PPC64)
2673 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
2674 #endif
2675
2676 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
2677 static void glue(gen_, name##x)(DisasContext *ctx) \
2678 { \
2679 TCGv EA; \
2680 chk(ctx); \
2681 gen_set_access_type(ctx, ACCESS_INT); \
2682 EA = tcg_temp_new(); \
2683 gen_addr_reg_index(ctx, EA); \
2684 gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
2685 }
2686
2687 #define GEN_LDX(name, ldop, opc2, opc3, type) \
2688 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2689
2690 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
2691 GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2692
2693 #define GEN_LDEPX(name, ldop, opc2, opc3) \
2694 static void glue(gen_, name##epx)(DisasContext *ctx) \
2695 { \
2696 TCGv EA; \
2697 CHK_SV(ctx); \
2698 gen_set_access_type(ctx, ACCESS_INT); \
2699 EA = tcg_temp_new(); \
2700 gen_addr_reg_index(ctx, EA); \
2701 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
2702 }
2703
2704 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
2705 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
2706 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
2707 #if defined(TARGET_PPC64)
2708 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
2709 #endif
2710
2711 #if defined(TARGET_PPC64)
2712 /* CI load/store variants */
2713 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
2714 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
2715 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
2716 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
2717 #endif
2718
2719 /*** Integer store ***/
2720 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
2721 static void glue(gen_, name##x)(DisasContext *ctx) \
2722 { \
2723 TCGv EA; \
2724 chk(ctx); \
2725 gen_set_access_type(ctx, ACCESS_INT); \
2726 EA = tcg_temp_new(); \
2727 gen_addr_reg_index(ctx, EA); \
2728 gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
2729 }
2730 #define GEN_STX(name, stop, opc2, opc3, type) \
2731 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
2732
2733 #define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
2734 GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
2735
2736 #define GEN_STEPX(name, stop, opc2, opc3) \
2737 static void glue(gen_, name##epx)(DisasContext *ctx) \
2738 { \
2739 TCGv EA; \
2740 CHK_SV(ctx); \
2741 gen_set_access_type(ctx, ACCESS_INT); \
2742 EA = tcg_temp_new(); \
2743 gen_addr_reg_index(ctx, EA); \
2744 tcg_gen_qemu_st_tl( \
2745 cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop); \
2746 }
2747
2748 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
2749 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
2750 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
2751 #if defined(TARGET_PPC64)
2752 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
2753 #endif
2754
2755 #if defined(TARGET_PPC64)
2756 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
2757 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
2758 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
2759 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
2760 #endif
2761 /*** Integer load and store with byte reverse ***/
2762
2763 /* lhbrx */
2764 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
2765
2766 /* lwbrx */
2767 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
2768
2769 #if defined(TARGET_PPC64)
2770 /* ldbrx */
2771 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
2772 /* stdbrx */
2773 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
2774 #endif /* TARGET_PPC64 */
2775
2776 /* sthbrx */
2777 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
2778 /* stwbrx */
2779 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
2780
2781 /*** Integer load and store multiple ***/
2782
2783 /* lmw */
gen_lmw(DisasContext * ctx)2784 static void gen_lmw(DisasContext *ctx)
2785 {
2786 TCGv t0;
2787 TCGv_i32 t1;
2788
2789 if (ctx->le_mode) {
2790 gen_align_no_le(ctx);
2791 return;
2792 }
2793 gen_set_access_type(ctx, ACCESS_INT);
2794 t0 = tcg_temp_new();
2795 t1 = tcg_constant_i32(rD(ctx->opcode));
2796 gen_addr_imm_index(ctx, t0, 0);
2797 gen_helper_lmw(tcg_env, t0, t1);
2798 }
2799
2800 /* stmw */
gen_stmw(DisasContext * ctx)2801 static void gen_stmw(DisasContext *ctx)
2802 {
2803 TCGv t0;
2804 TCGv_i32 t1;
2805
2806 if (ctx->le_mode) {
2807 gen_align_no_le(ctx);
2808 return;
2809 }
2810 gen_set_access_type(ctx, ACCESS_INT);
2811 t0 = tcg_temp_new();
2812 t1 = tcg_constant_i32(rS(ctx->opcode));
2813 gen_addr_imm_index(ctx, t0, 0);
2814 gen_helper_stmw(tcg_env, t0, t1);
2815 }
2816
2817 /*** Integer load and store strings ***/
2818
2819 /* lswi */
2820 /*
2821 * PowerPC32 specification says we must generate an exception if rA is
2822 * in the range of registers to be loaded. In an other hand, IBM says
2823 * this is valid, but rA won't be loaded. For now, I'll follow the
2824 * spec...
2825 */
gen_lswi(DisasContext * ctx)2826 static void gen_lswi(DisasContext *ctx)
2827 {
2828 TCGv t0;
2829 TCGv_i32 t1, t2;
2830 int nb = NB(ctx->opcode);
2831 int start = rD(ctx->opcode);
2832 int ra = rA(ctx->opcode);
2833 int nr;
2834
2835 if (ctx->le_mode) {
2836 gen_align_no_le(ctx);
2837 return;
2838 }
2839 if (nb == 0) {
2840 nb = 32;
2841 }
2842 nr = DIV_ROUND_UP(nb, 4);
2843 if (unlikely(lsw_reg_in_range(start, nr, ra))) {
2844 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
2845 return;
2846 }
2847 gen_set_access_type(ctx, ACCESS_INT);
2848 t0 = tcg_temp_new();
2849 gen_addr_register(ctx, t0);
2850 t1 = tcg_constant_i32(nb);
2851 t2 = tcg_constant_i32(start);
2852 gen_helper_lsw(tcg_env, t0, t1, t2);
2853 }
2854
2855 /* lswx */
gen_lswx(DisasContext * ctx)2856 static void gen_lswx(DisasContext *ctx)
2857 {
2858 TCGv t0;
2859 TCGv_i32 t1, t2, t3;
2860
2861 if (ctx->le_mode) {
2862 gen_align_no_le(ctx);
2863 return;
2864 }
2865 gen_set_access_type(ctx, ACCESS_INT);
2866 t0 = tcg_temp_new();
2867 gen_addr_reg_index(ctx, t0);
2868 t1 = tcg_constant_i32(rD(ctx->opcode));
2869 t2 = tcg_constant_i32(rA(ctx->opcode));
2870 t3 = tcg_constant_i32(rB(ctx->opcode));
2871 gen_helper_lswx(tcg_env, t0, t1, t2, t3);
2872 }
2873
2874 /* stswi */
gen_stswi(DisasContext * ctx)2875 static void gen_stswi(DisasContext *ctx)
2876 {
2877 TCGv t0;
2878 TCGv_i32 t1, t2;
2879 int nb = NB(ctx->opcode);
2880
2881 if (ctx->le_mode) {
2882 gen_align_no_le(ctx);
2883 return;
2884 }
2885 gen_set_access_type(ctx, ACCESS_INT);
2886 t0 = tcg_temp_new();
2887 gen_addr_register(ctx, t0);
2888 if (nb == 0) {
2889 nb = 32;
2890 }
2891 t1 = tcg_constant_i32(nb);
2892 t2 = tcg_constant_i32(rS(ctx->opcode));
2893 gen_helper_stsw(tcg_env, t0, t1, t2);
2894 }
2895
2896 /* stswx */
gen_stswx(DisasContext * ctx)2897 static void gen_stswx(DisasContext *ctx)
2898 {
2899 TCGv t0;
2900 TCGv_i32 t1, t2;
2901
2902 if (ctx->le_mode) {
2903 gen_align_no_le(ctx);
2904 return;
2905 }
2906 gen_set_access_type(ctx, ACCESS_INT);
2907 t0 = tcg_temp_new();
2908 gen_addr_reg_index(ctx, t0);
2909 t1 = tcg_temp_new_i32();
2910 tcg_gen_trunc_tl_i32(t1, cpu_xer);
2911 tcg_gen_andi_i32(t1, t1, 0x7F);
2912 t2 = tcg_constant_i32(rS(ctx->opcode));
2913 gen_helper_stsw(tcg_env, t0, t1, t2);
2914 }
2915
2916 #if !defined(CONFIG_USER_ONLY)
gen_check_tlb_flush(DisasContext * ctx,bool global)2917 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
2918 {
2919 TCGv_i32 t;
2920 TCGLabel *l;
2921
2922 if (!ctx->lazy_tlb_flush) {
2923 return;
2924 }
2925 l = gen_new_label();
2926 t = tcg_temp_new_i32();
2927 tcg_gen_ld_i32(t, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
2928 tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
2929 if (global) {
2930 gen_helper_check_tlb_flush_global(tcg_env);
2931 } else {
2932 gen_helper_check_tlb_flush_local(tcg_env);
2933 }
2934 gen_set_label(l);
2935 if (global) {
2936 /*
2937 * Global TLB flush uses async-work which must run before the
2938 * next instruction, so this must be the last in the TB.
2939 */
2940 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
2941 }
2942 }
2943 #else
gen_check_tlb_flush(DisasContext * ctx,bool global)2944 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
2945 #endif
2946
2947 /* isync */
gen_isync(DisasContext * ctx)2948 static void gen_isync(DisasContext *ctx)
2949 {
2950 /*
2951 * We need to check for a pending TLB flush. This can only happen in
2952 * kernel mode however so check MSR_PR
2953 */
2954 if (!ctx->pr) {
2955 gen_check_tlb_flush(ctx, false);
2956 }
2957 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
2958 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
2959 }
2960
gen_load_locked(DisasContext * ctx,MemOp memop)2961 static void gen_load_locked(DisasContext *ctx, MemOp memop)
2962 {
2963 TCGv gpr = cpu_gpr[rD(ctx->opcode)];
2964 TCGv t0 = tcg_temp_new();
2965
2966 gen_set_access_type(ctx, ACCESS_RES);
2967 gen_addr_reg_index(ctx, t0);
2968 tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, DEF_MEMOP(memop) | MO_ALIGN);
2969 tcg_gen_mov_tl(cpu_reserve, t0);
2970 tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
2971 tcg_gen_mov_tl(cpu_reserve_val, gpr);
2972 }
2973
2974 #define LARX(name, memop) \
2975 static void gen_##name(DisasContext *ctx) \
2976 { \
2977 gen_load_locked(ctx, memop); \
2978 }
2979
2980 /* lwarx */
LARX(lbarx,MO_UB)2981 LARX(lbarx, MO_UB)
2982 LARX(lharx, MO_UW)
2983 LARX(lwarx, MO_UL)
2984
2985 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
2986 TCGv EA, TCGCond cond, int addend)
2987 {
2988 TCGv t = tcg_temp_new();
2989 TCGv t2 = tcg_temp_new();
2990 TCGv u = tcg_temp_new();
2991
2992 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
2993 tcg_gen_addi_tl(t2, EA, memop_size(memop));
2994 tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
2995 tcg_gen_addi_tl(u, t, addend);
2996
2997 /* E.g. for fetch and increment bounded... */
2998 /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
2999 tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3000 tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3001
3002 /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3003 tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t,
3004 tcg_constant_tl(1 << (memop_size(memop) * 8 - 1)));
3005 }
3006
gen_ld_atomic(DisasContext * ctx,MemOp memop)3007 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3008 {
3009 uint32_t gpr_FC = FC(ctx->opcode);
3010 TCGv EA = tcg_temp_new();
3011 int rt = rD(ctx->opcode);
3012 bool need_serial;
3013 TCGv src, dst;
3014
3015 gen_addr_register(ctx, EA);
3016 dst = cpu_gpr[rt];
3017 src = cpu_gpr[(rt + 1) & 31];
3018
3019 need_serial = false;
3020 memop |= MO_ALIGN;
3021 switch (gpr_FC) {
3022 case 0: /* Fetch and add */
3023 tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3024 break;
3025 case 1: /* Fetch and xor */
3026 tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3027 break;
3028 case 2: /* Fetch and or */
3029 tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3030 break;
3031 case 3: /* Fetch and 'and' */
3032 tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3033 break;
3034 case 4: /* Fetch and max unsigned */
3035 tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3036 break;
3037 case 5: /* Fetch and max signed */
3038 tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3039 break;
3040 case 6: /* Fetch and min unsigned */
3041 tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3042 break;
3043 case 7: /* Fetch and min signed */
3044 tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3045 break;
3046 case 8: /* Swap */
3047 tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3048 break;
3049
3050 case 16: /* Compare and swap not equal */
3051 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3052 need_serial = true;
3053 } else {
3054 TCGv t0 = tcg_temp_new();
3055 TCGv t1 = tcg_temp_new();
3056
3057 tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3058 if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3059 tcg_gen_mov_tl(t1, src);
3060 } else {
3061 tcg_gen_ext32u_tl(t1, src);
3062 }
3063 tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3064 cpu_gpr[(rt + 2) & 31], t0);
3065 tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3066 tcg_gen_mov_tl(dst, t0);
3067 }
3068 break;
3069
3070 case 24: /* Fetch and increment bounded */
3071 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3072 need_serial = true;
3073 } else {
3074 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3075 }
3076 break;
3077 case 25: /* Fetch and increment equal */
3078 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3079 need_serial = true;
3080 } else {
3081 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3082 }
3083 break;
3084 case 28: /* Fetch and decrement bounded */
3085 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3086 need_serial = true;
3087 } else {
3088 gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3089 }
3090 break;
3091
3092 default:
3093 /* invoke data storage error handler */
3094 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3095 }
3096
3097 if (need_serial) {
3098 /* Restart with exclusive lock. */
3099 gen_helper_exit_atomic(tcg_env);
3100 ctx->base.is_jmp = DISAS_NORETURN;
3101 }
3102 }
3103
gen_lwat(DisasContext * ctx)3104 static void gen_lwat(DisasContext *ctx)
3105 {
3106 gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3107 }
3108
3109 #ifdef TARGET_PPC64
gen_ldat(DisasContext * ctx)3110 static void gen_ldat(DisasContext *ctx)
3111 {
3112 gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3113 }
3114 #endif
3115
gen_st_atomic(DisasContext * ctx,MemOp memop)3116 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3117 {
3118 uint32_t gpr_FC = FC(ctx->opcode);
3119 TCGv EA = tcg_temp_new();
3120 TCGv src, discard;
3121
3122 gen_addr_register(ctx, EA);
3123 src = cpu_gpr[rD(ctx->opcode)];
3124 discard = tcg_temp_new();
3125
3126 memop |= MO_ALIGN;
3127 switch (gpr_FC) {
3128 case 0: /* add and Store */
3129 tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3130 break;
3131 case 1: /* xor and Store */
3132 tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3133 break;
3134 case 2: /* Or and Store */
3135 tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3136 break;
3137 case 3: /* 'and' and Store */
3138 tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3139 break;
3140 case 4: /* Store max unsigned */
3141 tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3142 break;
3143 case 5: /* Store max signed */
3144 tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3145 break;
3146 case 6: /* Store min unsigned */
3147 tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3148 break;
3149 case 7: /* Store min signed */
3150 tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3151 break;
3152 case 24: /* Store twin */
3153 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3154 /* Restart with exclusive lock. */
3155 gen_helper_exit_atomic(tcg_env);
3156 ctx->base.is_jmp = DISAS_NORETURN;
3157 } else {
3158 TCGv t = tcg_temp_new();
3159 TCGv t2 = tcg_temp_new();
3160 TCGv s = tcg_temp_new();
3161 TCGv s2 = tcg_temp_new();
3162 TCGv ea_plus_s = tcg_temp_new();
3163
3164 tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3165 tcg_gen_addi_tl(ea_plus_s, EA, memop_size(memop));
3166 tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3167 tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3168 tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3169 tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3170 tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3171 }
3172 break;
3173 default:
3174 /* invoke data storage error handler */
3175 gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3176 }
3177 }
3178
gen_stwat(DisasContext * ctx)3179 static void gen_stwat(DisasContext *ctx)
3180 {
3181 gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3182 }
3183
3184 #ifdef TARGET_PPC64
gen_stdat(DisasContext * ctx)3185 static void gen_stdat(DisasContext *ctx)
3186 {
3187 gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3188 }
3189 #endif
3190
gen_conditional_store(DisasContext * ctx,MemOp memop)3191 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3192 {
3193 TCGLabel *lfail;
3194 TCGv EA;
3195 TCGv cr0;
3196 TCGv t0;
3197 int rs = rS(ctx->opcode);
3198
3199 lfail = gen_new_label();
3200 EA = tcg_temp_new();
3201 cr0 = tcg_temp_new();
3202 t0 = tcg_temp_new();
3203
3204 tcg_gen_mov_tl(cr0, cpu_so);
3205 gen_set_access_type(ctx, ACCESS_RES);
3206 gen_addr_reg_index(ctx, EA);
3207 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3208 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3209
3210 tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3211 cpu_gpr[rs], ctx->mem_idx,
3212 DEF_MEMOP(memop) | MO_ALIGN);
3213 tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3214 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3215 tcg_gen_or_tl(cr0, cr0, t0);
3216
3217 gen_set_label(lfail);
3218 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3219 tcg_gen_movi_tl(cpu_reserve, -1);
3220 }
3221
3222 #define STCX(name, memop) \
3223 static void gen_##name(DisasContext *ctx) \
3224 { \
3225 gen_conditional_store(ctx, memop); \
3226 }
3227
STCX(stbcx_,MO_UB)3228 STCX(stbcx_, MO_UB)
3229 STCX(sthcx_, MO_UW)
3230 STCX(stwcx_, MO_UL)
3231
3232 #if defined(TARGET_PPC64)
3233 /* ldarx */
3234 LARX(ldarx, MO_UQ)
3235 /* stdcx. */
3236 STCX(stdcx_, MO_UQ)
3237
3238 /* lqarx */
3239 static void gen_lqarx(DisasContext *ctx)
3240 {
3241 int rd = rD(ctx->opcode);
3242 TCGv EA, hi, lo;
3243 TCGv_i128 t16;
3244
3245 if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3246 (rd == rB(ctx->opcode)))) {
3247 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3248 return;
3249 }
3250
3251 gen_set_access_type(ctx, ACCESS_RES);
3252 EA = tcg_temp_new();
3253 gen_addr_reg_index(ctx, EA);
3254
3255 /* Note that the low part is always in RD+1, even in LE mode. */
3256 lo = cpu_gpr[rd + 1];
3257 hi = cpu_gpr[rd];
3258
3259 t16 = tcg_temp_new_i128();
3260 tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3261 tcg_gen_extr_i128_i64(lo, hi, t16);
3262
3263 tcg_gen_mov_tl(cpu_reserve, EA);
3264 tcg_gen_movi_tl(cpu_reserve_length, 16);
3265 tcg_gen_st_tl(hi, tcg_env, offsetof(CPUPPCState, reserve_val));
3266 tcg_gen_st_tl(lo, tcg_env, offsetof(CPUPPCState, reserve_val2));
3267 }
3268
3269 /* stqcx. */
gen_stqcx_(DisasContext * ctx)3270 static void gen_stqcx_(DisasContext *ctx)
3271 {
3272 TCGLabel *lfail;
3273 TCGv EA, t0, t1;
3274 TCGv cr0;
3275 TCGv_i128 cmp, val;
3276 int rs = rS(ctx->opcode);
3277
3278 if (unlikely(rs & 1)) {
3279 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3280 return;
3281 }
3282
3283 lfail = gen_new_label();
3284 EA = tcg_temp_new();
3285 cr0 = tcg_temp_new();
3286
3287 tcg_gen_mov_tl(cr0, cpu_so);
3288 gen_set_access_type(ctx, ACCESS_RES);
3289 gen_addr_reg_index(ctx, EA);
3290 tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3291 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3292
3293 cmp = tcg_temp_new_i128();
3294 val = tcg_temp_new_i128();
3295
3296 tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3297
3298 /* Note that the low part is always in RS+1, even in LE mode. */
3299 tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3300
3301 tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3302 DEF_MEMOP(MO_128 | MO_ALIGN));
3303
3304 t0 = tcg_temp_new();
3305 t1 = tcg_temp_new();
3306 tcg_gen_extr_i128_i64(t1, t0, val);
3307
3308 tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3309 tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3310 tcg_gen_or_tl(t0, t0, t1);
3311
3312 tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3313 tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3314 tcg_gen_or_tl(cr0, cr0, t0);
3315
3316 gen_set_label(lfail);
3317 tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3318 tcg_gen_movi_tl(cpu_reserve, -1);
3319 }
3320 #endif /* defined(TARGET_PPC64) */
3321
3322 /* wait */
gen_wait(DisasContext * ctx)3323 static void gen_wait(DisasContext *ctx)
3324 {
3325 uint32_t wc;
3326
3327 if (ctx->insns_flags & PPC_WAIT) {
3328 /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3329
3330 if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3331 /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3332 wc = WC(ctx->opcode);
3333 } else {
3334 wc = 0;
3335 }
3336
3337 } else if (ctx->insns_flags2 & PPC2_ISA300) {
3338 /* v3.0 defines a new 'wait' encoding. */
3339 wc = WC(ctx->opcode);
3340 if (ctx->insns_flags2 & PPC2_ISA310) {
3341 uint32_t pl = PL(ctx->opcode);
3342
3343 /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3344 if (wc == 3) {
3345 gen_invalid(ctx);
3346 return;
3347 }
3348
3349 /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3350 if (pl > 0 && wc != 2) {
3351 gen_invalid(ctx);
3352 return;
3353 }
3354
3355 } else { /* ISA300 */
3356 /* WC 1-3 are reserved */
3357 if (wc > 0) {
3358 gen_invalid(ctx);
3359 return;
3360 }
3361 }
3362
3363 } else {
3364 warn_report("wait instruction decoded with wrong ISA flags.");
3365 gen_invalid(ctx);
3366 return;
3367 }
3368
3369 /*
3370 * wait without WC field or with WC=0 waits for an exception / interrupt
3371 * to occur.
3372 */
3373 if (wc == 0) {
3374 TCGv_i32 t0 = tcg_constant_i32(1);
3375 tcg_gen_st_i32(t0, tcg_env,
3376 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3377 /* Stop translation, as the CPU is supposed to sleep from now */
3378 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3379 }
3380
3381 /*
3382 * Other wait types must not just wait until an exception occurs because
3383 * ignoring their other wake-up conditions could cause a hang.
3384 *
3385 * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
3386 * no-ops.
3387 *
3388 * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
3389 *
3390 * wc=2 waits for an implementation-specific condition, such could be
3391 * always true, so it can be implemented as a no-op.
3392 *
3393 * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
3394 *
3395 * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
3396 * Reservation-loss may have implementation-specific conditions, so it
3397 * can be implemented as a no-op.
3398 *
3399 * wc=2 waits for an exception or an amount of time to pass. This
3400 * amount is implementation-specific so it can be implemented as a
3401 * no-op.
3402 *
3403 * ISA v3.1 allows for execution to resume "in the rare case of
3404 * an implementation-dependent event", so in any case software must
3405 * not depend on the architected resumption condition to become
3406 * true, so no-op implementations should be architecturally correct
3407 * (if suboptimal).
3408 */
3409 }
3410
3411 #if defined(TARGET_PPC64)
gen_doze(DisasContext * ctx)3412 static void gen_doze(DisasContext *ctx)
3413 {
3414 #if defined(CONFIG_USER_ONLY)
3415 GEN_PRIV(ctx);
3416 #else
3417 TCGv_i32 t;
3418
3419 CHK_HV(ctx);
3420 translator_io_start(&ctx->base);
3421 t = tcg_constant_i32(PPC_PM_DOZE);
3422 gen_helper_pminsn(tcg_env, t);
3423 /* Stop translation, as the CPU is supposed to sleep from now */
3424 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3425 #endif /* defined(CONFIG_USER_ONLY) */
3426 }
3427
gen_nap(DisasContext * ctx)3428 static void gen_nap(DisasContext *ctx)
3429 {
3430 #if defined(CONFIG_USER_ONLY)
3431 GEN_PRIV(ctx);
3432 #else
3433 TCGv_i32 t;
3434
3435 CHK_HV(ctx);
3436 translator_io_start(&ctx->base);
3437 t = tcg_constant_i32(PPC_PM_NAP);
3438 gen_helper_pminsn(tcg_env, t);
3439 /* Stop translation, as the CPU is supposed to sleep from now */
3440 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3441 #endif /* defined(CONFIG_USER_ONLY) */
3442 }
3443
gen_stop(DisasContext * ctx)3444 static void gen_stop(DisasContext *ctx)
3445 {
3446 #if defined(CONFIG_USER_ONLY)
3447 GEN_PRIV(ctx);
3448 #else
3449 TCGv_i32 t;
3450
3451 CHK_HV(ctx);
3452 translator_io_start(&ctx->base);
3453 t = tcg_constant_i32(PPC_PM_STOP);
3454 gen_helper_pminsn(tcg_env, t);
3455 /* Stop translation, as the CPU is supposed to sleep from now */
3456 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3457 #endif /* defined(CONFIG_USER_ONLY) */
3458 }
3459
gen_sleep(DisasContext * ctx)3460 static void gen_sleep(DisasContext *ctx)
3461 {
3462 #if defined(CONFIG_USER_ONLY)
3463 GEN_PRIV(ctx);
3464 #else
3465 TCGv_i32 t;
3466
3467 CHK_HV(ctx);
3468 translator_io_start(&ctx->base);
3469 t = tcg_constant_i32(PPC_PM_SLEEP);
3470 gen_helper_pminsn(tcg_env, t);
3471 /* Stop translation, as the CPU is supposed to sleep from now */
3472 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3473 #endif /* defined(CONFIG_USER_ONLY) */
3474 }
3475
gen_rvwinkle(DisasContext * ctx)3476 static void gen_rvwinkle(DisasContext *ctx)
3477 {
3478 #if defined(CONFIG_USER_ONLY)
3479 GEN_PRIV(ctx);
3480 #else
3481 TCGv_i32 t;
3482
3483 CHK_HV(ctx);
3484 translator_io_start(&ctx->base);
3485 t = tcg_constant_i32(PPC_PM_RVWINKLE);
3486 gen_helper_pminsn(tcg_env, t);
3487 /* Stop translation, as the CPU is supposed to sleep from now */
3488 gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3489 #endif /* defined(CONFIG_USER_ONLY) */
3490 }
3491
gen_write_bhrb(TCGv_ptr base,TCGv offset,TCGv mask,TCGv value)3492 static inline TCGv gen_write_bhrb(TCGv_ptr base, TCGv offset, TCGv mask, TCGv value)
3493 {
3494 TCGv_ptr tmp = tcg_temp_new_ptr();
3495
3496 /* add base and offset to get address of bhrb entry */
3497 tcg_gen_add_ptr(tmp, base, (TCGv_ptr)offset);
3498
3499 /* store value into bhrb at bhrb_offset */
3500 tcg_gen_st_i64(value, tmp, 0);
3501
3502 /* add 8 to current bhrb_offset */
3503 tcg_gen_addi_tl(offset, offset, 8);
3504
3505 /* apply offset mask */
3506 tcg_gen_and_tl(offset, offset, mask);
3507
3508 return offset;
3509 }
3510 #endif /* #if defined(TARGET_PPC64) */
3511
gen_update_branch_history(DisasContext * ctx,target_ulong nip,TCGv target,target_long inst_type)3512 static inline void gen_update_branch_history(DisasContext *ctx,
3513 target_ulong nip,
3514 TCGv target,
3515 target_long inst_type)
3516 {
3517 #if defined(TARGET_PPC64)
3518 TCGv_ptr base;
3519 TCGv tmp;
3520 TCGv offset;
3521 TCGv mask;
3522 TCGLabel *no_update;
3523
3524 if (ctx->has_cfar) {
3525 tcg_gen_movi_tl(cpu_cfar, nip);
3526 }
3527
3528 if (!ctx->has_bhrb ||
3529 !ctx->bhrb_enable ||
3530 inst_type == BHRB_TYPE_NORECORD) {
3531 return;
3532 }
3533
3534 tmp = tcg_temp_new();
3535 no_update = gen_new_label();
3536
3537 /* check for bhrb filtering */
3538 tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUPPCState, bhrb_filter));
3539 tcg_gen_andi_tl(tmp, tmp, inst_type);
3540 tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, 0, no_update);
3541
3542 base = tcg_temp_new_ptr();
3543 offset = tcg_temp_new();
3544 mask = tcg_temp_new();
3545
3546 /* load bhrb base address */
3547 tcg_gen_ld_ptr(base, tcg_env, offsetof(CPUPPCState, bhrb_base));
3548
3549 /* load current bhrb_offset */
3550 tcg_gen_ld_tl(offset, tcg_env, offsetof(CPUPPCState, bhrb_offset));
3551
3552 /* load a BHRB offset mask */
3553 tcg_gen_ld_tl(mask, tcg_env, offsetof(CPUPPCState, bhrb_offset_mask));
3554
3555 offset = gen_write_bhrb(base, offset, mask, tcg_constant_i64(nip));
3556
3557 /* Also record the target address for XL-Form branches */
3558 if (inst_type & BHRB_TYPE_XL_FORM) {
3559
3560 /* Set the 'T' bit for target entries */
3561 tcg_gen_ori_tl(tmp, target, 0x2);
3562
3563 offset = gen_write_bhrb(base, offset, mask, tmp);
3564 }
3565
3566 /* save updated bhrb_offset for next time */
3567 tcg_gen_st_tl(offset, tcg_env, offsetof(CPUPPCState, bhrb_offset));
3568
3569 gen_set_label(no_update);
3570 #endif
3571 }
3572
3573 #if defined(TARGET_PPC64)
pmu_count_insns(DisasContext * ctx)3574 static void pmu_count_insns(DisasContext *ctx)
3575 {
3576 /*
3577 * Do not bother calling the helper if the PMU isn't counting
3578 * instructions.
3579 */
3580 if (!ctx->pmu_insn_cnt) {
3581 return;
3582 }
3583
3584 #if !defined(CONFIG_USER_ONLY)
3585 TCGLabel *l;
3586 TCGv t0;
3587
3588 /*
3589 * The PMU insns_inc() helper stops the internal PMU timer if a
3590 * counter overflows happens. In that case, if the guest is
3591 * running with icount and we do not handle it beforehand,
3592 * the helper can trigger a 'bad icount read'.
3593 */
3594 translator_io_start(&ctx->base);
3595
3596 /* Avoid helper calls when only PMC5-6 are enabled. */
3597 if (!ctx->pmc_other) {
3598 l = gen_new_label();
3599 t0 = tcg_temp_new();
3600
3601 gen_load_spr(t0, SPR_POWER_PMC5);
3602 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
3603 gen_store_spr(SPR_POWER_PMC5, t0);
3604 /* Check for overflow, if it's enabled */
3605 if (ctx->mmcr0_pmcjce) {
3606 tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
3607 gen_helper_handle_pmc5_overflow(tcg_env);
3608 }
3609
3610 gen_set_label(l);
3611 } else {
3612 gen_helper_insns_inc(tcg_env, tcg_constant_i32(ctx->base.num_insns));
3613 }
3614 #else
3615 /*
3616 * User mode can read (but not write) PMC5 and start/stop
3617 * the PMU via MMCR0_FC. In this case just increment
3618 * PMC5 with base.num_insns.
3619 */
3620 TCGv t0 = tcg_temp_new();
3621
3622 gen_load_spr(t0, SPR_POWER_PMC5);
3623 tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
3624 gen_store_spr(SPR_POWER_PMC5, t0);
3625 #endif /* #if !defined(CONFIG_USER_ONLY) */
3626 }
3627 #else
pmu_count_insns(DisasContext * ctx)3628 static void pmu_count_insns(DisasContext *ctx)
3629 {
3630 return;
3631 }
3632 #endif /* #if defined(TARGET_PPC64) */
3633
use_goto_tb(DisasContext * ctx,target_ulong dest)3634 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
3635 {
3636 if (unlikely(ctx->singlestep_enabled)) {
3637 return false;
3638 }
3639 return translator_use_goto_tb(&ctx->base, dest);
3640 }
3641
gen_lookup_and_goto_ptr(DisasContext * ctx)3642 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
3643 {
3644 if (unlikely(ctx->singlestep_enabled)) {
3645 gen_debug_exception(ctx, false);
3646 } else {
3647 /*
3648 * tcg_gen_lookup_and_goto_ptr will exit the TB if
3649 * CF_NO_GOTO_PTR is set. Count insns now.
3650 */
3651 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
3652 pmu_count_insns(ctx);
3653 }
3654
3655 tcg_gen_lookup_and_goto_ptr();
3656 }
3657 }
3658
3659 /*** Branch ***/
gen_goto_tb(DisasContext * ctx,int n,target_ulong dest)3660 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
3661 {
3662 if (NARROW_MODE(ctx)) {
3663 dest = (uint32_t) dest;
3664 }
3665 if (use_goto_tb(ctx, dest)) {
3666 pmu_count_insns(ctx);
3667 tcg_gen_goto_tb(n);
3668 tcg_gen_movi_tl(cpu_nip, dest & ~3);
3669 tcg_gen_exit_tb(ctx->base.tb, n);
3670 } else {
3671 tcg_gen_movi_tl(cpu_nip, dest & ~3);
3672 gen_lookup_and_goto_ptr(ctx);
3673 }
3674 }
3675
gen_setlr(DisasContext * ctx,target_ulong nip)3676 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
3677 {
3678 if (NARROW_MODE(ctx)) {
3679 nip = (uint32_t)nip;
3680 }
3681 tcg_gen_movi_tl(cpu_lr, nip);
3682 }
3683
3684 /* b ba bl bla */
gen_b(DisasContext * ctx)3685 static void gen_b(DisasContext *ctx)
3686 {
3687 target_ulong li, target;
3688
3689 /* sign extend LI */
3690 li = LI(ctx->opcode);
3691 li = (li ^ 0x02000000) - 0x02000000;
3692 if (likely(AA(ctx->opcode) == 0)) {
3693 target = ctx->cia + li;
3694 } else {
3695 target = li;
3696 }
3697 if (LK(ctx->opcode)) {
3698 gen_setlr(ctx, ctx->base.pc_next);
3699 gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_CALL);
3700 } else {
3701 gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_OTHER);
3702 }
3703 gen_goto_tb(ctx, 0, target);
3704 ctx->base.is_jmp = DISAS_NORETURN;
3705 }
3706
3707 #define BCOND_IM 0
3708 #define BCOND_LR 1
3709 #define BCOND_CTR 2
3710 #define BCOND_TAR 3
3711
gen_bcond(DisasContext * ctx,int type)3712 static void gen_bcond(DisasContext *ctx, int type)
3713 {
3714 uint32_t bo = BO(ctx->opcode);
3715 TCGLabel *l1;
3716 TCGv target;
3717 target_long bhrb_type = BHRB_TYPE_OTHER;
3718
3719 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
3720 target = tcg_temp_new();
3721 if (type == BCOND_CTR) {
3722 tcg_gen_mov_tl(target, cpu_ctr);
3723 } else if (type == BCOND_TAR) {
3724 gen_load_spr(target, SPR_TAR);
3725 } else {
3726 tcg_gen_mov_tl(target, cpu_lr);
3727 }
3728 if (!LK(ctx->opcode)) {
3729 bhrb_type |= BHRB_TYPE_INDIRECT;
3730 }
3731 bhrb_type |= BHRB_TYPE_XL_FORM;
3732 } else {
3733 target = NULL;
3734 }
3735 if (LK(ctx->opcode)) {
3736 gen_setlr(ctx, ctx->base.pc_next);
3737 bhrb_type |= BHRB_TYPE_CALL;
3738 }
3739 l1 = gen_new_label();
3740 if ((bo & 0x4) == 0) {
3741 /* Decrement and test CTR */
3742 TCGv temp = tcg_temp_new();
3743
3744 if (type == BCOND_CTR) {
3745 /*
3746 * All ISAs up to v3 describe this form of bcctr as invalid but
3747 * some processors, ie. 64-bit server processors compliant with
3748 * arch 2.x, do implement a "test and decrement" logic instead,
3749 * as described in their respective UMs. This logic involves CTR
3750 * to act as both the branch target and a counter, which makes
3751 * it basically useless and thus never used in real code.
3752 *
3753 * This form was hence chosen to trigger extra micro-architectural
3754 * side-effect on real HW needed for the Spectre v2 workaround.
3755 * It is up to guests that implement such workaround, ie. linux, to
3756 * use this form in a way it just triggers the side-effect without
3757 * doing anything else harmful.
3758 */
3759 if (unlikely(!is_book3s_arch2x(ctx))) {
3760 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3761 return;
3762 }
3763
3764 if (NARROW_MODE(ctx)) {
3765 tcg_gen_ext32u_tl(temp, cpu_ctr);
3766 } else {
3767 tcg_gen_mov_tl(temp, cpu_ctr);
3768 }
3769 if (bo & 0x2) {
3770 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
3771 } else {
3772 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
3773 }
3774 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
3775 } else {
3776 tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
3777 if (NARROW_MODE(ctx)) {
3778 tcg_gen_ext32u_tl(temp, cpu_ctr);
3779 } else {
3780 tcg_gen_mov_tl(temp, cpu_ctr);
3781 }
3782 if (bo & 0x2) {
3783 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
3784 } else {
3785 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
3786 }
3787 }
3788 bhrb_type |= BHRB_TYPE_COND;
3789 }
3790 if ((bo & 0x10) == 0) {
3791 /* Test CR */
3792 uint32_t bi = BI(ctx->opcode);
3793 uint32_t mask = 0x08 >> (bi & 0x03);
3794 TCGv_i32 temp = tcg_temp_new_i32();
3795
3796 if (bo & 0x8) {
3797 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
3798 tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
3799 } else {
3800 tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
3801 tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
3802 }
3803 bhrb_type |= BHRB_TYPE_COND;
3804 }
3805
3806 gen_update_branch_history(ctx, ctx->cia, target, bhrb_type);
3807
3808 if (type == BCOND_IM) {
3809 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
3810 if (likely(AA(ctx->opcode) == 0)) {
3811 gen_goto_tb(ctx, 0, ctx->cia + li);
3812 } else {
3813 gen_goto_tb(ctx, 0, li);
3814 }
3815 } else {
3816 if (NARROW_MODE(ctx)) {
3817 tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
3818 } else {
3819 tcg_gen_andi_tl(cpu_nip, target, ~3);
3820 }
3821 gen_lookup_and_goto_ptr(ctx);
3822 }
3823 if ((bo & 0x14) != 0x14) {
3824 /* fallthrough case */
3825 gen_set_label(l1);
3826 gen_goto_tb(ctx, 1, ctx->base.pc_next);
3827 }
3828 ctx->base.is_jmp = DISAS_NORETURN;
3829 }
3830
gen_bc(DisasContext * ctx)3831 static void gen_bc(DisasContext *ctx)
3832 {
3833 gen_bcond(ctx, BCOND_IM);
3834 }
3835
gen_bcctr(DisasContext * ctx)3836 static void gen_bcctr(DisasContext *ctx)
3837 {
3838 gen_bcond(ctx, BCOND_CTR);
3839 }
3840
gen_bclr(DisasContext * ctx)3841 static void gen_bclr(DisasContext *ctx)
3842 {
3843 gen_bcond(ctx, BCOND_LR);
3844 }
3845
gen_bctar(DisasContext * ctx)3846 static void gen_bctar(DisasContext *ctx)
3847 {
3848 gen_bcond(ctx, BCOND_TAR);
3849 }
3850
3851 /*** Condition register logical ***/
3852 #define GEN_CRLOGIC(name, tcg_op, opc) \
3853 static void glue(gen_, name)(DisasContext *ctx) \
3854 { \
3855 uint8_t bitmask; \
3856 int sh; \
3857 TCGv_i32 t0, t1; \
3858 sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
3859 t0 = tcg_temp_new_i32(); \
3860 if (sh > 0) \
3861 tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
3862 else if (sh < 0) \
3863 tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
3864 else \
3865 tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
3866 t1 = tcg_temp_new_i32(); \
3867 sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
3868 if (sh > 0) \
3869 tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
3870 else if (sh < 0) \
3871 tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
3872 else \
3873 tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
3874 tcg_op(t0, t0, t1); \
3875 bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \
3876 tcg_gen_andi_i32(t0, t0, bitmask); \
3877 tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
3878 tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
3879 }
3880
3881 /* crand */
3882 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
3883 /* crandc */
3884 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
3885 /* creqv */
3886 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
3887 /* crnand */
3888 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
3889 /* crnor */
3890 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
3891 /* cror */
3892 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
3893 /* crorc */
3894 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
3895 /* crxor */
3896 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
3897
3898 /* mcrf */
gen_mcrf(DisasContext * ctx)3899 static void gen_mcrf(DisasContext *ctx)
3900 {
3901 tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
3902 }
3903
3904 /*** System linkage ***/
3905
3906 /* rfi (supervisor only) */
gen_rfi(DisasContext * ctx)3907 static void gen_rfi(DisasContext *ctx)
3908 {
3909 #if defined(CONFIG_USER_ONLY)
3910 GEN_PRIV(ctx);
3911 #else
3912 /*
3913 * This instruction doesn't exist anymore on 64-bit server
3914 * processors compliant with arch 2.x
3915 */
3916 if (is_book3s_arch2x(ctx)) {
3917 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3918 return;
3919 }
3920 /* Restore CPU state */
3921 CHK_SV(ctx);
3922 translator_io_start(&ctx->base);
3923 gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD);
3924 gen_helper_rfi(tcg_env);
3925 ctx->base.is_jmp = DISAS_EXIT;
3926 #endif
3927 }
3928
3929 #if defined(TARGET_PPC64)
gen_rfid(DisasContext * ctx)3930 static void gen_rfid(DisasContext *ctx)
3931 {
3932 #if defined(CONFIG_USER_ONLY)
3933 GEN_PRIV(ctx);
3934 #else
3935 /* Restore CPU state */
3936 CHK_SV(ctx);
3937 translator_io_start(&ctx->base);
3938 gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD);
3939 gen_helper_rfid(tcg_env);
3940 ctx->base.is_jmp = DISAS_EXIT;
3941 #endif
3942 }
3943
3944 #if !defined(CONFIG_USER_ONLY)
gen_rfscv(DisasContext * ctx)3945 static void gen_rfscv(DisasContext *ctx)
3946 {
3947 #if defined(CONFIG_USER_ONLY)
3948 GEN_PRIV(ctx);
3949 #else
3950 /* Restore CPU state */
3951 CHK_SV(ctx);
3952 translator_io_start(&ctx->base);
3953 gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD);
3954 gen_helper_rfscv(tcg_env);
3955 ctx->base.is_jmp = DISAS_EXIT;
3956 #endif
3957 }
3958 #endif
3959
gen_hrfid(DisasContext * ctx)3960 static void gen_hrfid(DisasContext *ctx)
3961 {
3962 #if defined(CONFIG_USER_ONLY)
3963 GEN_PRIV(ctx);
3964 #else
3965 /* Restore CPU state */
3966 CHK_HV(ctx);
3967 translator_io_start(&ctx->base);
3968 gen_helper_hrfid(tcg_env);
3969 ctx->base.is_jmp = DISAS_EXIT;
3970 #endif
3971 }
3972 #endif
3973
3974 /* sc */
3975 #if defined(CONFIG_USER_ONLY)
3976 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
3977 #else
3978 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
3979 #endif
gen_sc(DisasContext * ctx)3980 static void gen_sc(DisasContext *ctx)
3981 {
3982 uint32_t lev;
3983
3984 /*
3985 * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
3986 * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
3987 * for Ultravisor which TCG does not support, so just ignore the top 6.
3988 */
3989 lev = (ctx->opcode >> 5) & 0x1;
3990 gen_exception_err(ctx, POWERPC_SYSCALL, lev);
3991 }
3992
3993 #if defined(TARGET_PPC64)
3994 #if !defined(CONFIG_USER_ONLY)
gen_scv(DisasContext * ctx)3995 static void gen_scv(DisasContext *ctx)
3996 {
3997 uint32_t lev = (ctx->opcode >> 5) & 0x7F;
3998
3999 /* Set the PC back to the faulting instruction. */
4000 gen_update_nip(ctx, ctx->cia);
4001 gen_helper_scv(tcg_env, tcg_constant_i32(lev));
4002
4003 ctx->base.is_jmp = DISAS_NORETURN;
4004 }
4005 #endif
4006 #endif
4007
4008 /*** Trap ***/
4009
4010 /* Check for unconditional traps (always or never) */
check_unconditional_trap(DisasContext * ctx,int to)4011 static bool check_unconditional_trap(DisasContext *ctx, int to)
4012 {
4013 /* Trap never */
4014 if (to == 0) {
4015 return true;
4016 }
4017 /* Trap always */
4018 if (to == 31) {
4019 gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4020 return true;
4021 }
4022 return false;
4023 }
4024
4025 /*** Processor control ***/
4026
4027 /* mcrxr */
gen_mcrxr(DisasContext * ctx)4028 static void gen_mcrxr(DisasContext *ctx)
4029 {
4030 TCGv_i32 t0 = tcg_temp_new_i32();
4031 TCGv_i32 t1 = tcg_temp_new_i32();
4032 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4033
4034 tcg_gen_trunc_tl_i32(t0, cpu_so);
4035 tcg_gen_trunc_tl_i32(t1, cpu_ov);
4036 tcg_gen_trunc_tl_i32(dst, cpu_ca);
4037 tcg_gen_shli_i32(t0, t0, 3);
4038 tcg_gen_shli_i32(t1, t1, 2);
4039 tcg_gen_shli_i32(dst, dst, 1);
4040 tcg_gen_or_i32(dst, dst, t0);
4041 tcg_gen_or_i32(dst, dst, t1);
4042
4043 tcg_gen_movi_tl(cpu_so, 0);
4044 tcg_gen_movi_tl(cpu_ov, 0);
4045 tcg_gen_movi_tl(cpu_ca, 0);
4046 }
4047
4048 #ifdef TARGET_PPC64
4049 /* mcrxrx */
gen_mcrxrx(DisasContext * ctx)4050 static void gen_mcrxrx(DisasContext *ctx)
4051 {
4052 TCGv t0 = tcg_temp_new();
4053 TCGv t1 = tcg_temp_new();
4054 TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4055
4056 /* copy OV and OV32 */
4057 tcg_gen_shli_tl(t0, cpu_ov, 1);
4058 tcg_gen_or_tl(t0, t0, cpu_ov32);
4059 tcg_gen_shli_tl(t0, t0, 2);
4060 /* copy CA and CA32 */
4061 tcg_gen_shli_tl(t1, cpu_ca, 1);
4062 tcg_gen_or_tl(t1, t1, cpu_ca32);
4063 tcg_gen_or_tl(t0, t0, t1);
4064 tcg_gen_trunc_tl_i32(dst, t0);
4065 }
4066 #endif
4067
4068 /* mfcr mfocrf */
gen_mfcr(DisasContext * ctx)4069 static void gen_mfcr(DisasContext *ctx)
4070 {
4071 uint32_t crm, crn;
4072
4073 if (likely(ctx->opcode & 0x00100000)) {
4074 crm = CRM(ctx->opcode);
4075 if (likely(crm && ((crm & (crm - 1)) == 0))) {
4076 crn = ctz32(crm);
4077 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4078 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4079 cpu_gpr[rD(ctx->opcode)], crn * 4);
4080 }
4081 } else {
4082 TCGv_i32 t0 = tcg_temp_new_i32();
4083 tcg_gen_mov_i32(t0, cpu_crf[0]);
4084 tcg_gen_shli_i32(t0, t0, 4);
4085 tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4086 tcg_gen_shli_i32(t0, t0, 4);
4087 tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4088 tcg_gen_shli_i32(t0, t0, 4);
4089 tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4090 tcg_gen_shli_i32(t0, t0, 4);
4091 tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4092 tcg_gen_shli_i32(t0, t0, 4);
4093 tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4094 tcg_gen_shli_i32(t0, t0, 4);
4095 tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4096 tcg_gen_shli_i32(t0, t0, 4);
4097 tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4098 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4099 }
4100 }
4101
4102 /* mfmsr */
gen_mfmsr(DisasContext * ctx)4103 static void gen_mfmsr(DisasContext *ctx)
4104 {
4105 CHK_SV(ctx);
4106 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4107 }
4108
4109 /* mfspr */
gen_op_mfspr(DisasContext * ctx)4110 static inline void gen_op_mfspr(DisasContext *ctx)
4111 {
4112 void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4113 uint32_t sprn = SPR(ctx->opcode);
4114
4115 #if defined(CONFIG_USER_ONLY)
4116 read_cb = ctx->spr_cb[sprn].uea_read;
4117 #else
4118 if (ctx->pr) {
4119 read_cb = ctx->spr_cb[sprn].uea_read;
4120 } else if (ctx->hv) {
4121 read_cb = ctx->spr_cb[sprn].hea_read;
4122 } else {
4123 read_cb = ctx->spr_cb[sprn].oea_read;
4124 }
4125 #endif
4126 if (likely(read_cb != NULL)) {
4127 if (likely(read_cb != SPR_NOACCESS)) {
4128 (*read_cb)(ctx, rD(ctx->opcode), sprn);
4129 } else {
4130 /* Privilege exception */
4131 /*
4132 * This is a hack to avoid warnings when running Linux:
4133 * this OS breaks the PowerPC virtualisation model,
4134 * allowing userland application to read the PVR
4135 */
4136 if (sprn != SPR_PVR) {
4137 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4138 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4139 ctx->cia);
4140 }
4141 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4142 }
4143 } else {
4144 /* ISA 2.07 defines these as no-ops */
4145 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4146 (sprn >= 808 && sprn <= 811)) {
4147 /* This is a nop */
4148 return;
4149 }
4150 /* Not defined */
4151 qemu_log_mask(LOG_GUEST_ERROR,
4152 "Trying to read invalid spr %d (0x%03x) at "
4153 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4154
4155 /*
4156 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4157 * generate a priv, a hv emu or a no-op
4158 */
4159 if (sprn & 0x10) {
4160 if (ctx->pr) {
4161 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4162 }
4163 } else {
4164 if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4165 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4166 }
4167 }
4168 }
4169 }
4170
gen_mfspr(DisasContext * ctx)4171 static void gen_mfspr(DisasContext *ctx)
4172 {
4173 gen_op_mfspr(ctx);
4174 }
4175
4176 /* mftb */
gen_mftb(DisasContext * ctx)4177 static void gen_mftb(DisasContext *ctx)
4178 {
4179 gen_op_mfspr(ctx);
4180 }
4181
4182 /* mtcrf mtocrf*/
gen_mtcrf(DisasContext * ctx)4183 static void gen_mtcrf(DisasContext *ctx)
4184 {
4185 uint32_t crm, crn;
4186
4187 crm = CRM(ctx->opcode);
4188 if (likely((ctx->opcode & 0x00100000))) {
4189 if (crm && ((crm & (crm - 1)) == 0)) {
4190 TCGv_i32 temp = tcg_temp_new_i32();
4191 crn = ctz32(crm);
4192 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4193 tcg_gen_shri_i32(temp, temp, crn * 4);
4194 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4195 }
4196 } else {
4197 TCGv_i32 temp = tcg_temp_new_i32();
4198 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4199 for (crn = 0 ; crn < 8 ; crn++) {
4200 if (crm & (1 << crn)) {
4201 tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4202 tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4203 }
4204 }
4205 }
4206 }
4207
4208 /* mtmsr */
4209 #if defined(TARGET_PPC64)
gen_mtmsrd(DisasContext * ctx)4210 static void gen_mtmsrd(DisasContext *ctx)
4211 {
4212 if (unlikely(!is_book3s_arch2x(ctx))) {
4213 gen_invalid(ctx);
4214 return;
4215 }
4216
4217 CHK_SV(ctx);
4218
4219 #if !defined(CONFIG_USER_ONLY)
4220 TCGv t0, t1;
4221 target_ulong mask;
4222
4223 t0 = tcg_temp_new();
4224 t1 = tcg_temp_new();
4225
4226 translator_io_start(&ctx->base);
4227
4228 if (ctx->opcode & 0x00010000) {
4229 /* L=1 form only updates EE and RI */
4230 mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4231 } else {
4232 /* mtmsrd does not alter HV, S, ME, or LE */
4233 mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4234 (1ULL << MSR_HV));
4235 /*
4236 * XXX: we need to update nip before the store if we enter
4237 * power saving mode, we will exit the loop directly from
4238 * ppc_store_msr
4239 */
4240 gen_update_nip(ctx, ctx->base.pc_next);
4241 }
4242
4243 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4244 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4245 tcg_gen_or_tl(t0, t0, t1);
4246
4247 gen_helper_store_msr(tcg_env, t0);
4248
4249 /* Must stop the translation as machine state (may have) changed */
4250 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4251 #endif /* !defined(CONFIG_USER_ONLY) */
4252 }
4253 #endif /* defined(TARGET_PPC64) */
4254
gen_mtmsr(DisasContext * ctx)4255 static void gen_mtmsr(DisasContext *ctx)
4256 {
4257 CHK_SV(ctx);
4258
4259 #if !defined(CONFIG_USER_ONLY)
4260 TCGv t0, t1;
4261 target_ulong mask = 0xFFFFFFFF;
4262
4263 t0 = tcg_temp_new();
4264 t1 = tcg_temp_new();
4265
4266 translator_io_start(&ctx->base);
4267 if (ctx->opcode & 0x00010000) {
4268 /* L=1 form only updates EE and RI */
4269 mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4270 } else {
4271 /* mtmsr does not alter S, ME, or LE */
4272 mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4273
4274 /*
4275 * XXX: we need to update nip before the store if we enter
4276 * power saving mode, we will exit the loop directly from
4277 * ppc_store_msr
4278 */
4279 gen_update_nip(ctx, ctx->base.pc_next);
4280 }
4281
4282 tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4283 tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4284 tcg_gen_or_tl(t0, t0, t1);
4285
4286 gen_helper_store_msr(tcg_env, t0);
4287
4288 /* Must stop the translation as machine state (may have) changed */
4289 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4290 #endif
4291 }
4292
4293 /* mtspr */
gen_mtspr(DisasContext * ctx)4294 static void gen_mtspr(DisasContext *ctx)
4295 {
4296 void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4297 uint32_t sprn = SPR(ctx->opcode);
4298
4299 #if defined(CONFIG_USER_ONLY)
4300 write_cb = ctx->spr_cb[sprn].uea_write;
4301 #else
4302 if (ctx->pr) {
4303 write_cb = ctx->spr_cb[sprn].uea_write;
4304 } else if (ctx->hv) {
4305 write_cb = ctx->spr_cb[sprn].hea_write;
4306 } else {
4307 write_cb = ctx->spr_cb[sprn].oea_write;
4308 }
4309 #endif
4310 if (likely(write_cb != NULL)) {
4311 if (likely(write_cb != SPR_NOACCESS)) {
4312 (*write_cb)(ctx, sprn, rS(ctx->opcode));
4313 } else {
4314 /* Privilege exception */
4315 qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4316 "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4317 ctx->cia);
4318 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4319 }
4320 } else {
4321 /* ISA 2.07 defines these as no-ops */
4322 if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4323 (sprn >= 808 && sprn <= 811)) {
4324 /* This is a nop */
4325 return;
4326 }
4327
4328 /* Not defined */
4329 qemu_log_mask(LOG_GUEST_ERROR,
4330 "Trying to write invalid spr %d (0x%03x) at "
4331 TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4332
4333
4334 /*
4335 * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4336 * generate a priv, a hv emu or a no-op
4337 */
4338 if (sprn & 0x10) {
4339 if (ctx->pr) {
4340 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4341 }
4342 } else {
4343 if (ctx->pr || sprn == 0) {
4344 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4345 }
4346 }
4347 }
4348 }
4349
4350 #if defined(TARGET_PPC64)
4351 /* setb */
gen_setb(DisasContext * ctx)4352 static void gen_setb(DisasContext *ctx)
4353 {
4354 TCGv_i32 t0 = tcg_temp_new_i32();
4355 TCGv_i32 t8 = tcg_constant_i32(8);
4356 TCGv_i32 tm1 = tcg_constant_i32(-1);
4357 int crf = crfS(ctx->opcode);
4358
4359 tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4360 tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4361 tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4362 }
4363 #endif
4364
4365 /*** Cache management ***/
4366
4367 /* dcbf */
gen_dcbf(DisasContext * ctx)4368 static void gen_dcbf(DisasContext *ctx)
4369 {
4370 /* XXX: specification says this is treated as a load by the MMU */
4371 TCGv t0;
4372 gen_set_access_type(ctx, ACCESS_CACHE);
4373 t0 = tcg_temp_new();
4374 gen_addr_reg_index(ctx, t0);
4375 gen_qemu_ld8u(ctx, t0, t0);
4376 }
4377
4378 /* dcbfep (external PID dcbf) */
gen_dcbfep(DisasContext * ctx)4379 static void gen_dcbfep(DisasContext *ctx)
4380 {
4381 /* XXX: specification says this is treated as a load by the MMU */
4382 TCGv t0;
4383 CHK_SV(ctx);
4384 gen_set_access_type(ctx, ACCESS_CACHE);
4385 t0 = tcg_temp_new();
4386 gen_addr_reg_index(ctx, t0);
4387 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4388 }
4389
4390 /* dcbi (Supervisor only) */
gen_dcbi(DisasContext * ctx)4391 static void gen_dcbi(DisasContext *ctx)
4392 {
4393 #if defined(CONFIG_USER_ONLY)
4394 GEN_PRIV(ctx);
4395 #else
4396 TCGv EA, val;
4397
4398 CHK_SV(ctx);
4399 EA = tcg_temp_new();
4400 gen_set_access_type(ctx, ACCESS_CACHE);
4401 gen_addr_reg_index(ctx, EA);
4402 val = tcg_temp_new();
4403 /* XXX: specification says this should be treated as a store by the MMU */
4404 gen_qemu_ld8u(ctx, val, EA);
4405 gen_qemu_st8(ctx, val, EA);
4406 #endif /* defined(CONFIG_USER_ONLY) */
4407 }
4408
4409 /* dcdst */
gen_dcbst(DisasContext * ctx)4410 static void gen_dcbst(DisasContext *ctx)
4411 {
4412 /* XXX: specification say this is treated as a load by the MMU */
4413 TCGv t0;
4414 gen_set_access_type(ctx, ACCESS_CACHE);
4415 t0 = tcg_temp_new();
4416 gen_addr_reg_index(ctx, t0);
4417 gen_qemu_ld8u(ctx, t0, t0);
4418 }
4419
4420 /* dcbstep (dcbstep External PID version) */
gen_dcbstep(DisasContext * ctx)4421 static void gen_dcbstep(DisasContext *ctx)
4422 {
4423 /* XXX: specification say this is treated as a load by the MMU */
4424 TCGv t0;
4425 gen_set_access_type(ctx, ACCESS_CACHE);
4426 t0 = tcg_temp_new();
4427 gen_addr_reg_index(ctx, t0);
4428 tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4429 }
4430
4431 /* dcbt */
gen_dcbt(DisasContext * ctx)4432 static void gen_dcbt(DisasContext *ctx)
4433 {
4434 /*
4435 * interpreted as no-op
4436 * XXX: specification say this is treated as a load by the MMU but
4437 * does not generate any exception
4438 */
4439 }
4440
4441 /* dcbtep */
gen_dcbtep(DisasContext * ctx)4442 static void gen_dcbtep(DisasContext *ctx)
4443 {
4444 /*
4445 * interpreted as no-op
4446 * XXX: specification say this is treated as a load by the MMU but
4447 * does not generate any exception
4448 */
4449 }
4450
4451 /* dcbtst */
gen_dcbtst(DisasContext * ctx)4452 static void gen_dcbtst(DisasContext *ctx)
4453 {
4454 /*
4455 * interpreted as no-op
4456 * XXX: specification say this is treated as a load by the MMU but
4457 * does not generate any exception
4458 */
4459 }
4460
4461 /* dcbtstep */
gen_dcbtstep(DisasContext * ctx)4462 static void gen_dcbtstep(DisasContext *ctx)
4463 {
4464 /*
4465 * interpreted as no-op
4466 * XXX: specification say this is treated as a load by the MMU but
4467 * does not generate any exception
4468 */
4469 }
4470
4471 /* dcbtls */
gen_dcbtls(DisasContext * ctx)4472 static void gen_dcbtls(DisasContext *ctx)
4473 {
4474 /* Always fails locking the cache */
4475 TCGv t0 = tcg_temp_new();
4476 gen_load_spr(t0, SPR_Exxx_L1CSR0);
4477 tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
4478 gen_store_spr(SPR_Exxx_L1CSR0, t0);
4479 }
4480
4481 /* dcblc */
gen_dcblc(DisasContext * ctx)4482 static void gen_dcblc(DisasContext *ctx)
4483 {
4484 /*
4485 * interpreted as no-op
4486 */
4487 }
4488
4489 /* dcbz */
gen_dcbz(DisasContext * ctx)4490 static void gen_dcbz(DisasContext *ctx)
4491 {
4492 TCGv tcgv_addr = tcg_temp_new();
4493
4494 gen_set_access_type(ctx, ACCESS_CACHE);
4495 gen_addr_reg_index(ctx, tcgv_addr);
4496
4497 #ifdef TARGET_PPC64
4498 if (ctx->excp_model == POWERPC_EXCP_970 && !(ctx->opcode & 0x00200000)) {
4499 gen_helper_dcbzl(tcg_env, tcgv_addr);
4500 return;
4501 }
4502 #endif
4503
4504 gen_helper_dcbz(tcg_env, tcgv_addr, tcg_constant_i32(ctx->mem_idx));
4505 }
4506
4507 /* dcbzep */
gen_dcbzep(DisasContext * ctx)4508 static void gen_dcbzep(DisasContext *ctx)
4509 {
4510 TCGv tcgv_addr = tcg_temp_new();
4511
4512 gen_set_access_type(ctx, ACCESS_CACHE);
4513 gen_addr_reg_index(ctx, tcgv_addr);
4514 gen_helper_dcbz(tcg_env, tcgv_addr, tcg_constant_i32(PPC_TLB_EPID_STORE));
4515 }
4516
4517 /* dst / dstt */
gen_dst(DisasContext * ctx)4518 static void gen_dst(DisasContext *ctx)
4519 {
4520 if (rA(ctx->opcode) == 0) {
4521 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4522 } else {
4523 /* interpreted as no-op */
4524 }
4525 }
4526
4527 /* dstst /dststt */
gen_dstst(DisasContext * ctx)4528 static void gen_dstst(DisasContext *ctx)
4529 {
4530 if (rA(ctx->opcode) == 0) {
4531 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4532 } else {
4533 /* interpreted as no-op */
4534 }
4535
4536 }
4537
4538 /* dss / dssall */
gen_dss(DisasContext * ctx)4539 static void gen_dss(DisasContext *ctx)
4540 {
4541 /* interpreted as no-op */
4542 }
4543
4544 /* icbi */
gen_icbi(DisasContext * ctx)4545 static void gen_icbi(DisasContext *ctx)
4546 {
4547 TCGv t0;
4548 gen_set_access_type(ctx, ACCESS_CACHE);
4549 t0 = tcg_temp_new();
4550 gen_addr_reg_index(ctx, t0);
4551 gen_helper_icbi(tcg_env, t0);
4552 }
4553
4554 /* icbiep */
gen_icbiep(DisasContext * ctx)4555 static void gen_icbiep(DisasContext *ctx)
4556 {
4557 TCGv t0;
4558 gen_set_access_type(ctx, ACCESS_CACHE);
4559 t0 = tcg_temp_new();
4560 gen_addr_reg_index(ctx, t0);
4561 gen_helper_icbiep(tcg_env, t0);
4562 }
4563
4564 /* Optional: */
4565 /* dcba */
gen_dcba(DisasContext * ctx)4566 static void gen_dcba(DisasContext *ctx)
4567 {
4568 /*
4569 * interpreted as no-op
4570 * XXX: specification say this is treated as a store by the MMU
4571 * but does not generate any exception
4572 */
4573 }
4574
4575 /*** Segment register manipulation ***/
4576 /* Supervisor only: */
4577
4578 /* mfsr */
gen_mfsr(DisasContext * ctx)4579 static void gen_mfsr(DisasContext *ctx)
4580 {
4581 #if defined(CONFIG_USER_ONLY)
4582 GEN_PRIV(ctx);
4583 #else
4584 TCGv t0;
4585
4586 CHK_SV(ctx);
4587 t0 = tcg_constant_tl(SR(ctx->opcode));
4588 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
4589 #endif /* defined(CONFIG_USER_ONLY) */
4590 }
4591
4592 /* mfsrin */
gen_mfsrin(DisasContext * ctx)4593 static void gen_mfsrin(DisasContext *ctx)
4594 {
4595 #if defined(CONFIG_USER_ONLY)
4596 GEN_PRIV(ctx);
4597 #else
4598 TCGv t0;
4599
4600 CHK_SV(ctx);
4601 t0 = tcg_temp_new();
4602 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4603 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
4604 #endif /* defined(CONFIG_USER_ONLY) */
4605 }
4606
4607 /* mtsr */
gen_mtsr(DisasContext * ctx)4608 static void gen_mtsr(DisasContext *ctx)
4609 {
4610 #if defined(CONFIG_USER_ONLY)
4611 GEN_PRIV(ctx);
4612 #else
4613 TCGv t0;
4614
4615 CHK_SV(ctx);
4616 t0 = tcg_constant_tl(SR(ctx->opcode));
4617 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
4618 #endif /* defined(CONFIG_USER_ONLY) */
4619 }
4620
4621 /* mtsrin */
gen_mtsrin(DisasContext * ctx)4622 static void gen_mtsrin(DisasContext *ctx)
4623 {
4624 #if defined(CONFIG_USER_ONLY)
4625 GEN_PRIV(ctx);
4626 #else
4627 TCGv t0;
4628 CHK_SV(ctx);
4629
4630 t0 = tcg_temp_new();
4631 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4632 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rD(ctx->opcode)]);
4633 #endif /* defined(CONFIG_USER_ONLY) */
4634 }
4635
4636 #if defined(TARGET_PPC64)
4637 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
4638
4639 /* mfsr */
gen_mfsr_64b(DisasContext * ctx)4640 static void gen_mfsr_64b(DisasContext *ctx)
4641 {
4642 #if defined(CONFIG_USER_ONLY)
4643 GEN_PRIV(ctx);
4644 #else
4645 TCGv t0;
4646
4647 CHK_SV(ctx);
4648 t0 = tcg_constant_tl(SR(ctx->opcode));
4649 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
4650 #endif /* defined(CONFIG_USER_ONLY) */
4651 }
4652
4653 /* mfsrin */
gen_mfsrin_64b(DisasContext * ctx)4654 static void gen_mfsrin_64b(DisasContext *ctx)
4655 {
4656 #if defined(CONFIG_USER_ONLY)
4657 GEN_PRIV(ctx);
4658 #else
4659 TCGv t0;
4660
4661 CHK_SV(ctx);
4662 t0 = tcg_temp_new();
4663 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4664 gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
4665 #endif /* defined(CONFIG_USER_ONLY) */
4666 }
4667
4668 /* mtsr */
gen_mtsr_64b(DisasContext * ctx)4669 static void gen_mtsr_64b(DisasContext *ctx)
4670 {
4671 #if defined(CONFIG_USER_ONLY)
4672 GEN_PRIV(ctx);
4673 #else
4674 TCGv t0;
4675
4676 CHK_SV(ctx);
4677 t0 = tcg_constant_tl(SR(ctx->opcode));
4678 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
4679 #endif /* defined(CONFIG_USER_ONLY) */
4680 }
4681
4682 /* mtsrin */
gen_mtsrin_64b(DisasContext * ctx)4683 static void gen_mtsrin_64b(DisasContext *ctx)
4684 {
4685 #if defined(CONFIG_USER_ONLY)
4686 GEN_PRIV(ctx);
4687 #else
4688 TCGv t0;
4689
4690 CHK_SV(ctx);
4691 t0 = tcg_temp_new();
4692 tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
4693 gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
4694 #endif /* defined(CONFIG_USER_ONLY) */
4695 }
4696
4697 #endif /* defined(TARGET_PPC64) */
4698
4699 /*** Lookaside buffer management ***/
4700 /* Optional & supervisor only: */
4701
4702 /* tlbia */
gen_tlbia(DisasContext * ctx)4703 static void gen_tlbia(DisasContext *ctx)
4704 {
4705 #if defined(CONFIG_USER_ONLY)
4706 GEN_PRIV(ctx);
4707 #else
4708 CHK_HV(ctx);
4709
4710 gen_helper_tlbia(tcg_env);
4711 #endif /* defined(CONFIG_USER_ONLY) */
4712 }
4713
4714 /* tlbsync */
gen_tlbsync(DisasContext * ctx)4715 static void gen_tlbsync(DisasContext *ctx)
4716 {
4717 #if defined(CONFIG_USER_ONLY)
4718 GEN_PRIV(ctx);
4719 #else
4720
4721 if (ctx->gtse) {
4722 CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
4723 } else {
4724 CHK_HV(ctx); /* Else hypervisor privileged */
4725 }
4726
4727 /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
4728 if (ctx->insns_flags & PPC_BOOKE) {
4729 gen_check_tlb_flush(ctx, true);
4730 }
4731 #endif /* defined(CONFIG_USER_ONLY) */
4732 }
4733
4734 /*** External control ***/
4735 /* Optional: */
4736
4737 /* eciwx */
gen_eciwx(DisasContext * ctx)4738 static void gen_eciwx(DisasContext *ctx)
4739 {
4740 TCGv t0;
4741 /* Should check EAR[E] ! */
4742 gen_set_access_type(ctx, ACCESS_EXT);
4743 t0 = tcg_temp_new();
4744 gen_addr_reg_index(ctx, t0);
4745 tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
4746 DEF_MEMOP(MO_UL | MO_ALIGN));
4747 }
4748
4749 /* ecowx */
gen_ecowx(DisasContext * ctx)4750 static void gen_ecowx(DisasContext *ctx)
4751 {
4752 TCGv t0;
4753 /* Should check EAR[E] ! */
4754 gen_set_access_type(ctx, ACCESS_EXT);
4755 t0 = tcg_temp_new();
4756 gen_addr_reg_index(ctx, t0);
4757 tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
4758 DEF_MEMOP(MO_UL | MO_ALIGN));
4759 }
4760
4761 /* 602 - 603 - G2 TLB management */
4762
4763 /* tlbld */
gen_tlbld_6xx(DisasContext * ctx)4764 static void gen_tlbld_6xx(DisasContext *ctx)
4765 {
4766 #if defined(CONFIG_USER_ONLY)
4767 GEN_PRIV(ctx);
4768 #else
4769 CHK_SV(ctx);
4770 gen_helper_6xx_tlbd(tcg_env, cpu_gpr[rB(ctx->opcode)]);
4771 #endif /* defined(CONFIG_USER_ONLY) */
4772 }
4773
4774 /* tlbli */
gen_tlbli_6xx(DisasContext * ctx)4775 static void gen_tlbli_6xx(DisasContext *ctx)
4776 {
4777 #if defined(CONFIG_USER_ONLY)
4778 GEN_PRIV(ctx);
4779 #else
4780 CHK_SV(ctx);
4781 gen_helper_6xx_tlbi(tcg_env, cpu_gpr[rB(ctx->opcode)]);
4782 #endif /* defined(CONFIG_USER_ONLY) */
4783 }
4784
4785 /* BookE specific instructions */
4786
4787 /* XXX: not implemented on 440 ? */
gen_mfapidi(DisasContext * ctx)4788 static void gen_mfapidi(DisasContext *ctx)
4789 {
4790 /* XXX: TODO */
4791 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4792 }
4793
4794 /* XXX: not implemented on 440 ? */
gen_tlbiva(DisasContext * ctx)4795 static void gen_tlbiva(DisasContext *ctx)
4796 {
4797 #if defined(CONFIG_USER_ONLY)
4798 GEN_PRIV(ctx);
4799 #else
4800 TCGv t0;
4801
4802 CHK_SV(ctx);
4803 t0 = tcg_temp_new();
4804 gen_addr_reg_index(ctx, t0);
4805 gen_helper_tlbiva(tcg_env, cpu_gpr[rB(ctx->opcode)]);
4806 #endif /* defined(CONFIG_USER_ONLY) */
4807 }
4808
4809 /* All 405 MAC instructions are translated here */
gen_405_mulladd_insn(DisasContext * ctx,int opc2,int opc3,int ra,int rb,int rt,int Rc)4810 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
4811 int ra, int rb, int rt, int Rc)
4812 {
4813 TCGv t0, t1;
4814
4815 t0 = tcg_temp_new();
4816 t1 = tcg_temp_new();
4817
4818 switch (opc3 & 0x0D) {
4819 case 0x05:
4820 /* macchw - macchw. - macchwo - macchwo. */
4821 /* macchws - macchws. - macchwso - macchwso. */
4822 /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
4823 /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
4824 /* mulchw - mulchw. */
4825 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
4826 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
4827 tcg_gen_ext16s_tl(t1, t1);
4828 break;
4829 case 0x04:
4830 /* macchwu - macchwu. - macchwuo - macchwuo. */
4831 /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
4832 /* mulchwu - mulchwu. */
4833 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
4834 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
4835 tcg_gen_ext16u_tl(t1, t1);
4836 break;
4837 case 0x01:
4838 /* machhw - machhw. - machhwo - machhwo. */
4839 /* machhws - machhws. - machhwso - machhwso. */
4840 /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
4841 /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
4842 /* mulhhw - mulhhw. */
4843 tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
4844 tcg_gen_ext16s_tl(t0, t0);
4845 tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
4846 tcg_gen_ext16s_tl(t1, t1);
4847 break;
4848 case 0x00:
4849 /* machhwu - machhwu. - machhwuo - machhwuo. */
4850 /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
4851 /* mulhhwu - mulhhwu. */
4852 tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
4853 tcg_gen_ext16u_tl(t0, t0);
4854 tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
4855 tcg_gen_ext16u_tl(t1, t1);
4856 break;
4857 case 0x0D:
4858 /* maclhw - maclhw. - maclhwo - maclhwo. */
4859 /* maclhws - maclhws. - maclhwso - maclhwso. */
4860 /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
4861 /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
4862 /* mullhw - mullhw. */
4863 tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
4864 tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
4865 break;
4866 case 0x0C:
4867 /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
4868 /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
4869 /* mullhwu - mullhwu. */
4870 tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
4871 tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
4872 break;
4873 }
4874 if (opc2 & 0x04) {
4875 /* (n)multiply-and-accumulate (0x0C / 0x0E) */
4876 tcg_gen_mul_tl(t1, t0, t1);
4877 if (opc2 & 0x02) {
4878 /* nmultiply-and-accumulate (0x0E) */
4879 tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
4880 } else {
4881 /* multiply-and-accumulate (0x0C) */
4882 tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
4883 }
4884
4885 if (opc3 & 0x12) {
4886 /* Check overflow and/or saturate */
4887 TCGLabel *l1 = gen_new_label();
4888
4889 if (opc3 & 0x10) {
4890 /* Start with XER OV disabled, the most likely case */
4891 tcg_gen_movi_tl(cpu_ov, 0);
4892 }
4893 if (opc3 & 0x01) {
4894 /* Signed */
4895 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
4896 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
4897 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
4898 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
4899 if (opc3 & 0x02) {
4900 /* Saturate */
4901 tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
4902 tcg_gen_xori_tl(t0, t0, 0x7fffffff);
4903 }
4904 } else {
4905 /* Unsigned */
4906 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
4907 if (opc3 & 0x02) {
4908 /* Saturate */
4909 tcg_gen_movi_tl(t0, UINT32_MAX);
4910 }
4911 }
4912 if (opc3 & 0x10) {
4913 /* Check overflow */
4914 tcg_gen_movi_tl(cpu_ov, 1);
4915 tcg_gen_movi_tl(cpu_so, 1);
4916 }
4917 gen_set_label(l1);
4918 tcg_gen_mov_tl(cpu_gpr[rt], t0);
4919 }
4920 } else {
4921 tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
4922 }
4923 if (unlikely(Rc) != 0) {
4924 /* Update Rc0 */
4925 gen_set_Rc0(ctx, cpu_gpr[rt]);
4926 }
4927 }
4928
4929 #define GEN_MAC_HANDLER(name, opc2, opc3) \
4930 static void glue(gen_, name)(DisasContext *ctx) \
4931 { \
4932 gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
4933 rD(ctx->opcode), Rc(ctx->opcode)); \
4934 }
4935
4936 /* macchw - macchw. */
4937 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
4938 /* macchwo - macchwo. */
4939 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
4940 /* macchws - macchws. */
4941 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
4942 /* macchwso - macchwso. */
4943 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
4944 /* macchwsu - macchwsu. */
4945 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
4946 /* macchwsuo - macchwsuo. */
4947 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
4948 /* macchwu - macchwu. */
4949 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
4950 /* macchwuo - macchwuo. */
4951 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
4952 /* machhw - machhw. */
4953 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
4954 /* machhwo - machhwo. */
4955 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
4956 /* machhws - machhws. */
4957 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
4958 /* machhwso - machhwso. */
4959 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
4960 /* machhwsu - machhwsu. */
4961 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
4962 /* machhwsuo - machhwsuo. */
4963 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
4964 /* machhwu - machhwu. */
4965 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
4966 /* machhwuo - machhwuo. */
4967 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
4968 /* maclhw - maclhw. */
4969 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
4970 /* maclhwo - maclhwo. */
4971 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
4972 /* maclhws - maclhws. */
4973 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
4974 /* maclhwso - maclhwso. */
4975 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
4976 /* maclhwu - maclhwu. */
4977 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
4978 /* maclhwuo - maclhwuo. */
4979 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
4980 /* maclhwsu - maclhwsu. */
4981 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
4982 /* maclhwsuo - maclhwsuo. */
4983 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
4984 /* nmacchw - nmacchw. */
4985 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
4986 /* nmacchwo - nmacchwo. */
4987 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
4988 /* nmacchws - nmacchws. */
4989 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
4990 /* nmacchwso - nmacchwso. */
4991 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
4992 /* nmachhw - nmachhw. */
4993 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
4994 /* nmachhwo - nmachhwo. */
4995 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
4996 /* nmachhws - nmachhws. */
4997 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
4998 /* nmachhwso - nmachhwso. */
4999 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5000 /* nmaclhw - nmaclhw. */
5001 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5002 /* nmaclhwo - nmaclhwo. */
5003 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5004 /* nmaclhws - nmaclhws. */
5005 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5006 /* nmaclhwso - nmaclhwso. */
5007 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5008
5009 /* mulchw - mulchw. */
5010 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5011 /* mulchwu - mulchwu. */
5012 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5013 /* mulhhw - mulhhw. */
5014 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5015 /* mulhhwu - mulhhwu. */
5016 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5017 /* mullhw - mullhw. */
5018 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5019 /* mullhwu - mullhwu. */
5020 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5021
5022 /* mfdcr */
gen_mfdcr(DisasContext * ctx)5023 static void gen_mfdcr(DisasContext *ctx)
5024 {
5025 #if defined(CONFIG_USER_ONLY)
5026 GEN_PRIV(ctx);
5027 #else
5028 TCGv dcrn;
5029
5030 CHK_SV(ctx);
5031 dcrn = tcg_constant_tl(SPR(ctx->opcode));
5032 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, dcrn);
5033 #endif /* defined(CONFIG_USER_ONLY) */
5034 }
5035
5036 /* mtdcr */
gen_mtdcr(DisasContext * ctx)5037 static void gen_mtdcr(DisasContext *ctx)
5038 {
5039 #if defined(CONFIG_USER_ONLY)
5040 GEN_PRIV(ctx);
5041 #else
5042 TCGv dcrn;
5043
5044 CHK_SV(ctx);
5045 dcrn = tcg_constant_tl(SPR(ctx->opcode));
5046 gen_helper_store_dcr(tcg_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5047 #endif /* defined(CONFIG_USER_ONLY) */
5048 }
5049
5050 /* mfdcrx */
5051 /* XXX: not implemented on 440 ? */
gen_mfdcrx(DisasContext * ctx)5052 static void gen_mfdcrx(DisasContext *ctx)
5053 {
5054 #if defined(CONFIG_USER_ONLY)
5055 GEN_PRIV(ctx);
5056 #else
5057 CHK_SV(ctx);
5058 gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env,
5059 cpu_gpr[rA(ctx->opcode)]);
5060 /* Note: Rc update flag set leads to undefined state of Rc0 */
5061 #endif /* defined(CONFIG_USER_ONLY) */
5062 }
5063
5064 /* mtdcrx */
5065 /* XXX: not implemented on 440 ? */
gen_mtdcrx(DisasContext * ctx)5066 static void gen_mtdcrx(DisasContext *ctx)
5067 {
5068 #if defined(CONFIG_USER_ONLY)
5069 GEN_PRIV(ctx);
5070 #else
5071 CHK_SV(ctx);
5072 gen_helper_store_dcr(tcg_env, cpu_gpr[rA(ctx->opcode)],
5073 cpu_gpr[rS(ctx->opcode)]);
5074 /* Note: Rc update flag set leads to undefined state of Rc0 */
5075 #endif /* defined(CONFIG_USER_ONLY) */
5076 }
5077
5078 /* dccci */
gen_dccci(DisasContext * ctx)5079 static void gen_dccci(DisasContext *ctx)
5080 {
5081 CHK_SV(ctx);
5082 /* interpreted as no-op */
5083 }
5084
5085 /* dcread */
gen_dcread(DisasContext * ctx)5086 static void gen_dcread(DisasContext *ctx)
5087 {
5088 #if defined(CONFIG_USER_ONLY)
5089 GEN_PRIV(ctx);
5090 #else
5091 TCGv EA, val;
5092
5093 CHK_SV(ctx);
5094 gen_set_access_type(ctx, ACCESS_CACHE);
5095 EA = tcg_temp_new();
5096 gen_addr_reg_index(ctx, EA);
5097 val = tcg_temp_new();
5098 gen_qemu_ld32u(ctx, val, EA);
5099 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5100 #endif /* defined(CONFIG_USER_ONLY) */
5101 }
5102
5103 /* icbt */
gen_icbt_40x(DisasContext * ctx)5104 static void gen_icbt_40x(DisasContext *ctx)
5105 {
5106 /*
5107 * interpreted as no-op
5108 * XXX: specification say this is treated as a load by the MMU but
5109 * does not generate any exception
5110 */
5111 }
5112
5113 /* iccci */
gen_iccci(DisasContext * ctx)5114 static void gen_iccci(DisasContext *ctx)
5115 {
5116 CHK_SV(ctx);
5117 /* interpreted as no-op */
5118 }
5119
5120 /* icread */
gen_icread(DisasContext * ctx)5121 static void gen_icread(DisasContext *ctx)
5122 {
5123 CHK_SV(ctx);
5124 /* interpreted as no-op */
5125 }
5126
5127 /* rfci (supervisor only) */
gen_rfci_40x(DisasContext * ctx)5128 static void gen_rfci_40x(DisasContext *ctx)
5129 {
5130 #if defined(CONFIG_USER_ONLY)
5131 GEN_PRIV(ctx);
5132 #else
5133 CHK_SV(ctx);
5134 /* Restore CPU state */
5135 gen_helper_40x_rfci(tcg_env);
5136 ctx->base.is_jmp = DISAS_EXIT;
5137 #endif /* defined(CONFIG_USER_ONLY) */
5138 }
5139
gen_rfci(DisasContext * ctx)5140 static void gen_rfci(DisasContext *ctx)
5141 {
5142 #if defined(CONFIG_USER_ONLY)
5143 GEN_PRIV(ctx);
5144 #else
5145 CHK_SV(ctx);
5146 /* Restore CPU state */
5147 gen_helper_rfci(tcg_env);
5148 ctx->base.is_jmp = DISAS_EXIT;
5149 #endif /* defined(CONFIG_USER_ONLY) */
5150 }
5151
5152 /* BookE specific */
5153
5154 /* XXX: not implemented on 440 ? */
gen_rfdi(DisasContext * ctx)5155 static void gen_rfdi(DisasContext *ctx)
5156 {
5157 #if defined(CONFIG_USER_ONLY)
5158 GEN_PRIV(ctx);
5159 #else
5160 CHK_SV(ctx);
5161 /* Restore CPU state */
5162 gen_helper_rfdi(tcg_env);
5163 ctx->base.is_jmp = DISAS_EXIT;
5164 #endif /* defined(CONFIG_USER_ONLY) */
5165 }
5166
5167 /* XXX: not implemented on 440 ? */
gen_rfmci(DisasContext * ctx)5168 static void gen_rfmci(DisasContext *ctx)
5169 {
5170 #if defined(CONFIG_USER_ONLY)
5171 GEN_PRIV(ctx);
5172 #else
5173 CHK_SV(ctx);
5174 /* Restore CPU state */
5175 gen_helper_rfmci(tcg_env);
5176 ctx->base.is_jmp = DISAS_EXIT;
5177 #endif /* defined(CONFIG_USER_ONLY) */
5178 }
5179
5180 /* TLB management - PowerPC 405 implementation */
5181
5182 /* tlbre */
gen_tlbre_40x(DisasContext * ctx)5183 static void gen_tlbre_40x(DisasContext *ctx)
5184 {
5185 #if defined(CONFIG_USER_ONLY)
5186 GEN_PRIV(ctx);
5187 #else
5188 CHK_SV(ctx);
5189 switch (rB(ctx->opcode)) {
5190 case 0:
5191 gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], tcg_env,
5192 cpu_gpr[rA(ctx->opcode)]);
5193 break;
5194 case 1:
5195 gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], tcg_env,
5196 cpu_gpr[rA(ctx->opcode)]);
5197 break;
5198 default:
5199 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5200 break;
5201 }
5202 #endif /* defined(CONFIG_USER_ONLY) */
5203 }
5204
5205 /* tlbsx - tlbsx. */
gen_tlbsx_40x(DisasContext * ctx)5206 static void gen_tlbsx_40x(DisasContext *ctx)
5207 {
5208 #if defined(CONFIG_USER_ONLY)
5209 GEN_PRIV(ctx);
5210 #else
5211 TCGv t0;
5212
5213 CHK_SV(ctx);
5214 t0 = tcg_temp_new();
5215 gen_addr_reg_index(ctx, t0);
5216 gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5217 if (Rc(ctx->opcode)) {
5218 TCGLabel *l1 = gen_new_label();
5219 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5220 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5221 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5222 gen_set_label(l1);
5223 }
5224 #endif /* defined(CONFIG_USER_ONLY) */
5225 }
5226
5227 /* tlbwe */
gen_tlbwe_40x(DisasContext * ctx)5228 static void gen_tlbwe_40x(DisasContext *ctx)
5229 {
5230 #if defined(CONFIG_USER_ONLY)
5231 GEN_PRIV(ctx);
5232 #else
5233 CHK_SV(ctx);
5234
5235 switch (rB(ctx->opcode)) {
5236 case 0:
5237 gen_helper_4xx_tlbwe_hi(tcg_env, cpu_gpr[rA(ctx->opcode)],
5238 cpu_gpr[rS(ctx->opcode)]);
5239 break;
5240 case 1:
5241 gen_helper_4xx_tlbwe_lo(tcg_env, cpu_gpr[rA(ctx->opcode)],
5242 cpu_gpr[rS(ctx->opcode)]);
5243 break;
5244 default:
5245 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5246 break;
5247 }
5248 #endif /* defined(CONFIG_USER_ONLY) */
5249 }
5250
5251 /* TLB management - PowerPC 440 implementation */
5252
5253 /* tlbre */
gen_tlbre_440(DisasContext * ctx)5254 static void gen_tlbre_440(DisasContext *ctx)
5255 {
5256 #if defined(CONFIG_USER_ONLY)
5257 GEN_PRIV(ctx);
5258 #else
5259 CHK_SV(ctx);
5260
5261 switch (rB(ctx->opcode)) {
5262 case 0:
5263 case 1:
5264 case 2:
5265 {
5266 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5267 gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], tcg_env,
5268 t0, cpu_gpr[rA(ctx->opcode)]);
5269 }
5270 break;
5271 default:
5272 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5273 break;
5274 }
5275 #endif /* defined(CONFIG_USER_ONLY) */
5276 }
5277
5278 /* tlbsx - tlbsx. */
gen_tlbsx_440(DisasContext * ctx)5279 static void gen_tlbsx_440(DisasContext *ctx)
5280 {
5281 #if defined(CONFIG_USER_ONLY)
5282 GEN_PRIV(ctx);
5283 #else
5284 TCGv t0;
5285
5286 CHK_SV(ctx);
5287 t0 = tcg_temp_new();
5288 gen_addr_reg_index(ctx, t0);
5289 gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5290 if (Rc(ctx->opcode)) {
5291 TCGLabel *l1 = gen_new_label();
5292 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5293 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5294 tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5295 gen_set_label(l1);
5296 }
5297 #endif /* defined(CONFIG_USER_ONLY) */
5298 }
5299
5300 /* tlbwe */
gen_tlbwe_440(DisasContext * ctx)5301 static void gen_tlbwe_440(DisasContext *ctx)
5302 {
5303 #if defined(CONFIG_USER_ONLY)
5304 GEN_PRIV(ctx);
5305 #else
5306 CHK_SV(ctx);
5307 switch (rB(ctx->opcode)) {
5308 case 0:
5309 case 1:
5310 case 2:
5311 {
5312 TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5313 gen_helper_440_tlbwe(tcg_env, t0, cpu_gpr[rA(ctx->opcode)],
5314 cpu_gpr[rS(ctx->opcode)]);
5315 }
5316 break;
5317 default:
5318 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5319 break;
5320 }
5321 #endif /* defined(CONFIG_USER_ONLY) */
5322 }
5323
5324 /* TLB management - PowerPC BookE 2.06 implementation */
5325
5326 /* tlbre */
gen_tlbre_booke206(DisasContext * ctx)5327 static void gen_tlbre_booke206(DisasContext *ctx)
5328 {
5329 #if defined(CONFIG_USER_ONLY)
5330 GEN_PRIV(ctx);
5331 #else
5332 CHK_SV(ctx);
5333 gen_helper_booke206_tlbre(tcg_env);
5334 #endif /* defined(CONFIG_USER_ONLY) */
5335 }
5336
5337 /* tlbsx - tlbsx. */
gen_tlbsx_booke206(DisasContext * ctx)5338 static void gen_tlbsx_booke206(DisasContext *ctx)
5339 {
5340 #if defined(CONFIG_USER_ONLY)
5341 GEN_PRIV(ctx);
5342 #else
5343 TCGv t0;
5344
5345 CHK_SV(ctx);
5346 if (rA(ctx->opcode)) {
5347 t0 = tcg_temp_new();
5348 tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5349 } else {
5350 t0 = cpu_gpr[rB(ctx->opcode)];
5351 }
5352 gen_helper_booke206_tlbsx(tcg_env, t0);
5353 #endif /* defined(CONFIG_USER_ONLY) */
5354 }
5355
5356 /* tlbwe */
gen_tlbwe_booke206(DisasContext * ctx)5357 static void gen_tlbwe_booke206(DisasContext *ctx)
5358 {
5359 #if defined(CONFIG_USER_ONLY)
5360 GEN_PRIV(ctx);
5361 #else
5362 CHK_SV(ctx);
5363 gen_helper_booke206_tlbwe(tcg_env);
5364 #endif /* defined(CONFIG_USER_ONLY) */
5365 }
5366
gen_tlbivax_booke206(DisasContext * ctx)5367 static void gen_tlbivax_booke206(DisasContext *ctx)
5368 {
5369 #if defined(CONFIG_USER_ONLY)
5370 GEN_PRIV(ctx);
5371 #else
5372 TCGv t0;
5373
5374 CHK_SV(ctx);
5375 t0 = tcg_temp_new();
5376 gen_addr_reg_index(ctx, t0);
5377 gen_helper_booke206_tlbivax(tcg_env, t0);
5378 #endif /* defined(CONFIG_USER_ONLY) */
5379 }
5380
gen_tlbilx_booke206(DisasContext * ctx)5381 static void gen_tlbilx_booke206(DisasContext *ctx)
5382 {
5383 #if defined(CONFIG_USER_ONLY)
5384 GEN_PRIV(ctx);
5385 #else
5386 TCGv t0;
5387
5388 CHK_SV(ctx);
5389 t0 = tcg_temp_new();
5390 gen_addr_reg_index(ctx, t0);
5391
5392 switch ((ctx->opcode >> 21) & 0x3) {
5393 case 0:
5394 gen_helper_booke206_tlbilx0(tcg_env, t0);
5395 break;
5396 case 1:
5397 gen_helper_booke206_tlbilx1(tcg_env, t0);
5398 break;
5399 case 3:
5400 gen_helper_booke206_tlbilx3(tcg_env, t0);
5401 break;
5402 default:
5403 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5404 break;
5405 }
5406 #endif /* defined(CONFIG_USER_ONLY) */
5407 }
5408
5409 /* wrtee */
gen_wrtee(DisasContext * ctx)5410 static void gen_wrtee(DisasContext *ctx)
5411 {
5412 #if defined(CONFIG_USER_ONLY)
5413 GEN_PRIV(ctx);
5414 #else
5415 TCGv t0;
5416
5417 CHK_SV(ctx);
5418 t0 = tcg_temp_new();
5419 tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
5420 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5421 tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
5422 gen_ppc_maybe_interrupt(ctx);
5423 /*
5424 * Stop translation to have a chance to raise an exception if we
5425 * just set msr_ee to 1
5426 */
5427 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5428 #endif /* defined(CONFIG_USER_ONLY) */
5429 }
5430
5431 /* wrteei */
gen_wrteei(DisasContext * ctx)5432 static void gen_wrteei(DisasContext *ctx)
5433 {
5434 #if defined(CONFIG_USER_ONLY)
5435 GEN_PRIV(ctx);
5436 #else
5437 CHK_SV(ctx);
5438 if (ctx->opcode & 0x00008000) {
5439 tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
5440 gen_ppc_maybe_interrupt(ctx);
5441 /* Stop translation to have a chance to raise an exception */
5442 ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5443 } else {
5444 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5445 }
5446 #endif /* defined(CONFIG_USER_ONLY) */
5447 }
5448
5449 /* PowerPC 440 specific instructions */
5450
5451 /* dlmzb */
gen_dlmzb(DisasContext * ctx)5452 static void gen_dlmzb(DisasContext *ctx)
5453 {
5454 TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
5455 gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], tcg_env,
5456 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
5457 }
5458
5459 /* icbt */
gen_icbt_440(DisasContext * ctx)5460 static void gen_icbt_440(DisasContext *ctx)
5461 {
5462 /*
5463 * interpreted as no-op
5464 * XXX: specification say this is treated as a load by the MMU but
5465 * does not generate any exception
5466 */
5467 }
5468
gen_tbegin(DisasContext * ctx)5469 static void gen_tbegin(DisasContext *ctx)
5470 {
5471 if (unlikely(!ctx->tm_enabled)) {
5472 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
5473 return;
5474 }
5475 gen_helper_tbegin(tcg_env);
5476 }
5477
5478 #define GEN_TM_NOOP(name) \
5479 static inline void gen_##name(DisasContext *ctx) \
5480 { \
5481 if (unlikely(!ctx->tm_enabled)) { \
5482 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
5483 return; \
5484 } \
5485 /* \
5486 * Because tbegin always fails in QEMU, these user \
5487 * space instructions all have a simple implementation: \
5488 * \
5489 * CR[0] = 0b0 || MSR[TS] || 0b0 \
5490 * = 0b0 || 0b00 || 0b0 \
5491 */ \
5492 tcg_gen_movi_i32(cpu_crf[0], 0); \
5493 }
5494
5495 GEN_TM_NOOP(tend);
5496 GEN_TM_NOOP(tabort);
5497 GEN_TM_NOOP(tabortwc);
5498 GEN_TM_NOOP(tabortwci);
5499 GEN_TM_NOOP(tabortdc);
5500 GEN_TM_NOOP(tabortdci);
5501 GEN_TM_NOOP(tsr);
5502
gen_cp_abort(DisasContext * ctx)5503 static inline void gen_cp_abort(DisasContext *ctx)
5504 {
5505 /* Do Nothing */
5506 }
5507
5508 #define GEN_CP_PASTE_NOOP(name) \
5509 static inline void gen_##name(DisasContext *ctx) \
5510 { \
5511 /* \
5512 * Generate invalid exception until we have an \
5513 * implementation of the copy paste facility \
5514 */ \
5515 gen_invalid(ctx); \
5516 }
5517
5518 GEN_CP_PASTE_NOOP(copy)
GEN_CP_PASTE_NOOP(paste)5519 GEN_CP_PASTE_NOOP(paste)
5520
5521 static void gen_tcheck(DisasContext *ctx)
5522 {
5523 if (unlikely(!ctx->tm_enabled)) {
5524 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
5525 return;
5526 }
5527 /*
5528 * Because tbegin always fails, the tcheck implementation is
5529 * simple:
5530 *
5531 * CR[CRF] = TDOOMED || MSR[TS] || 0b0
5532 * = 0b1 || 0b00 || 0b0
5533 */
5534 tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
5535 }
5536
5537 #if defined(CONFIG_USER_ONLY)
5538 #define GEN_TM_PRIV_NOOP(name) \
5539 static inline void gen_##name(DisasContext *ctx) \
5540 { \
5541 gen_priv_opc(ctx); \
5542 }
5543
5544 #else
5545
5546 #define GEN_TM_PRIV_NOOP(name) \
5547 static inline void gen_##name(DisasContext *ctx) \
5548 { \
5549 CHK_SV(ctx); \
5550 if (unlikely(!ctx->tm_enabled)) { \
5551 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
5552 return; \
5553 } \
5554 /* \
5555 * Because tbegin always fails, the implementation is \
5556 * simple: \
5557 * \
5558 * CR[0] = 0b0 || MSR[TS] || 0b0 \
5559 * = 0b0 || 0b00 | 0b0 \
5560 */ \
5561 tcg_gen_movi_i32(cpu_crf[0], 0); \
5562 }
5563
5564 #endif
5565
5566 GEN_TM_PRIV_NOOP(treclaim);
5567 GEN_TM_PRIV_NOOP(trechkpt);
5568
get_fpr(TCGv_i64 dst,int regno)5569 static inline void get_fpr(TCGv_i64 dst, int regno)
5570 {
5571 tcg_gen_ld_i64(dst, tcg_env, fpr_offset(regno));
5572 }
5573
set_fpr(int regno,TCGv_i64 src)5574 static inline void set_fpr(int regno, TCGv_i64 src)
5575 {
5576 tcg_gen_st_i64(src, tcg_env, fpr_offset(regno));
5577 /*
5578 * Before PowerISA v3.1 the result of doubleword 1 of the VSR
5579 * corresponding to the target FPR was undefined. However,
5580 * most (if not all) real hardware were setting the result to 0.
5581 * Starting at ISA v3.1, the result for doubleword 1 is now defined
5582 * to be 0.
5583 */
5584 tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false));
5585 }
5586
5587 /*
5588 * Helpers for decodetree used by !function for decoding arguments.
5589 */
times_2(DisasContext * ctx,int x)5590 static int times_2(DisasContext *ctx, int x)
5591 {
5592 return x * 2;
5593 }
5594
times_4(DisasContext * ctx,int x)5595 static int times_4(DisasContext *ctx, int x)
5596 {
5597 return x * 4;
5598 }
5599
times_16(DisasContext * ctx,int x)5600 static int times_16(DisasContext *ctx, int x)
5601 {
5602 return x * 16;
5603 }
5604
dw_compose_ea(DisasContext * ctx,int x)5605 static int64_t dw_compose_ea(DisasContext *ctx, int x)
5606 {
5607 return deposit64(0xfffffffffffffe00, 3, 6, x);
5608 }
5609
5610 /*
5611 * Helpers for trans_* functions to check for specific insns flags.
5612 * Use token pasting to ensure that we use the proper flag with the
5613 * proper variable.
5614 */
5615 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
5616 do { \
5617 if (((CTX)->insns_flags & PPC_##NAME) == 0) { \
5618 return false; \
5619 } \
5620 } while (0)
5621
5622 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
5623 do { \
5624 if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
5625 return false; \
5626 } \
5627 } while (0)
5628
5629 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
5630 #if TARGET_LONG_BITS == 32
5631 # define REQUIRE_64BIT(CTX) return false
5632 #else
5633 # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B)
5634 #endif
5635
5636 #define REQUIRE_VECTOR(CTX) \
5637 do { \
5638 if (unlikely(!(CTX)->altivec_enabled)) { \
5639 gen_exception((CTX), POWERPC_EXCP_VPU); \
5640 return true; \
5641 } \
5642 } while (0)
5643
5644 #define REQUIRE_VSX(CTX) \
5645 do { \
5646 if (unlikely(!(CTX)->vsx_enabled)) { \
5647 gen_exception((CTX), POWERPC_EXCP_VSXU); \
5648 return true; \
5649 } \
5650 } while (0)
5651
5652 #define REQUIRE_FPU(ctx) \
5653 do { \
5654 if (unlikely(!(ctx)->fpu_enabled)) { \
5655 gen_exception((ctx), POWERPC_EXCP_FPU); \
5656 return true; \
5657 } \
5658 } while (0)
5659
5660 #if !defined(CONFIG_USER_ONLY)
5661 #define REQUIRE_SV(CTX) \
5662 do { \
5663 if (unlikely((CTX)->pr)) { \
5664 gen_priv_opc(CTX); \
5665 return true; \
5666 } \
5667 } while (0)
5668
5669 #define REQUIRE_HV(CTX) \
5670 do { \
5671 if (unlikely((CTX)->pr || !(CTX)->hv)) { \
5672 gen_priv_opc(CTX); \
5673 return true; \
5674 } \
5675 } while (0)
5676 #else
5677 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
5678 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
5679 #endif
5680
5681 /*
5682 * Helpers for implementing sets of trans_* functions.
5683 * Defer the implementation of NAME to FUNC, with optional extra arguments.
5684 */
5685 #define TRANS(NAME, FUNC, ...) \
5686 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
5687 { return FUNC(ctx, a, __VA_ARGS__); }
5688 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
5689 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
5690 { \
5691 REQUIRE_INSNS_FLAGS(ctx, FLAGS); \
5692 return FUNC(ctx, a, __VA_ARGS__); \
5693 }
5694 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
5695 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
5696 { \
5697 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
5698 return FUNC(ctx, a, __VA_ARGS__); \
5699 }
5700
5701 #define TRANS64(NAME, FUNC, ...) \
5702 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
5703 { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
5704 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
5705 static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
5706 { \
5707 REQUIRE_64BIT(ctx); \
5708 REQUIRE_INSNS_FLAGS2(ctx, FLAGS2); \
5709 return FUNC(ctx, a, __VA_ARGS__); \
5710 }
5711
5712 /* TODO: More TRANS* helpers for extra insn_flags checks. */
5713
5714
5715 #include "decode-insn32.c.inc"
5716 #include "decode-insn64.c.inc"
5717 #include "power8-pmu-regs.c.inc"
5718
5719 /*
5720 * Incorporate CIA into the constant when R=1.
5721 * Validate that when R=1, RA=0.
5722 */
resolve_PLS_D(DisasContext * ctx,arg_D * d,arg_PLS_D * a)5723 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
5724 {
5725 d->rt = a->rt;
5726 d->ra = a->ra;
5727 d->si = a->si;
5728 if (a->r) {
5729 if (unlikely(a->ra != 0)) {
5730 gen_invalid(ctx);
5731 return false;
5732 }
5733 d->si += ctx->cia;
5734 }
5735 return true;
5736 }
5737
5738 #include "translate/fixedpoint-impl.c.inc"
5739
5740 #include "translate/fp-impl.c.inc"
5741
5742 #include "translate/vmx-impl.c.inc"
5743
5744 #include "translate/vsx-impl.c.inc"
5745
5746 #include "translate/dfp-impl.c.inc"
5747
5748 #include "translate/spe-impl.c.inc"
5749
5750 #include "translate/branch-impl.c.inc"
5751
5752 #include "translate/processor-ctrl-impl.c.inc"
5753
5754 #include "translate/storage-ctrl-impl.c.inc"
5755
5756 #include "translate/misc-impl.c.inc"
5757
5758 #include "translate/bhrb-impl.c.inc"
5759
5760 /* Handles lfdp */
gen_dform39(DisasContext * ctx)5761 static void gen_dform39(DisasContext *ctx)
5762 {
5763 if ((ctx->opcode & 0x3) == 0) {
5764 if (ctx->insns_flags2 & PPC2_ISA205) {
5765 return gen_lfdp(ctx);
5766 }
5767 }
5768 return gen_invalid(ctx);
5769 }
5770
5771 /* Handles stfdp */
gen_dform3D(DisasContext * ctx)5772 static void gen_dform3D(DisasContext *ctx)
5773 {
5774 if ((ctx->opcode & 3) == 0) { /* DS-FORM */
5775 /* stfdp */
5776 if (ctx->insns_flags2 & PPC2_ISA205) {
5777 return gen_stfdp(ctx);
5778 }
5779 }
5780 return gen_invalid(ctx);
5781 }
5782
5783 #if defined(TARGET_PPC64)
5784 /* brd */
gen_brd(DisasContext * ctx)5785 static void gen_brd(DisasContext *ctx)
5786 {
5787 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
5788 }
5789
5790 /* brw */
gen_brw(DisasContext * ctx)5791 static void gen_brw(DisasContext *ctx)
5792 {
5793 tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
5794 tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
5795
5796 }
5797
5798 /* brh */
gen_brh(DisasContext * ctx)5799 static void gen_brh(DisasContext *ctx)
5800 {
5801 TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
5802 TCGv_i64 t1 = tcg_temp_new_i64();
5803 TCGv_i64 t2 = tcg_temp_new_i64();
5804
5805 tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
5806 tcg_gen_and_i64(t2, t1, mask);
5807 tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
5808 tcg_gen_shli_i64(t1, t1, 8);
5809 tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
5810 }
5811 #endif
5812
5813 static opcode_t opcodes[] = {
5814 #if defined(TARGET_PPC64)
5815 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
5816 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
5817 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
5818 #endif
5819 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
5820 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
5821 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
5822 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
5823 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
5824 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
5825 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
5826 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
5827 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
5828 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
5829 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
5830 #if defined(TARGET_PPC64)
5831 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
5832 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
5833 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
5834 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
5835 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
5836 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
5837 PPC_NONE, PPC2_ISA300),
5838 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
5839 PPC_NONE, PPC2_ISA300),
5840 #endif
5841 /* handles lfdp, lxsd, lxssp */
5842 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
5843 /* handles stfdp, stxsd, stxssp */
5844 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
5845 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
5846 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
5847 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
5848 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
5849 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
5850 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
5851 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
5852 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
5853 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
5854 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
5855 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
5856 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
5857 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
5858 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
5859 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
5860 #if defined(TARGET_PPC64)
5861 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
5862 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
5863 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
5864 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
5865 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
5866 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
5867 #endif
5868 /* ISA v3.0 changed the extended opcode from 62 to 30 */
5869 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
5870 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
5871 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
5872 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
5873 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
5874 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
5875 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
5876 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
5877 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
5878 #if defined(TARGET_PPC64)
5879 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
5880 #if !defined(CONFIG_USER_ONLY)
5881 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
5882 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
5883 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
5884 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
5885 #endif
5886 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
5887 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
5888 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
5889 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
5890 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
5891 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
5892 #endif
5893 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
5894 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
5895 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
5896 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
5897 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
5898 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
5899 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
5900 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
5901 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
5902 #if defined(TARGET_PPC64)
5903 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
5904 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
5905 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
5906 #endif
5907 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
5908 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
5909 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
5910 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
5911 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
5912 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
5913 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
5914 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
5915 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
5916 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
5917 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
5918 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
5919 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
5920 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
5921 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
5922 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
5923 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
5924 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
5925 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
5926 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
5927 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
5928 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
5929 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
5930 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
5931 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
5932 #if defined(TARGET_PPC64)
5933 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
5934 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
5935 PPC_SEGMENT_64B),
5936 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
5937 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
5938 PPC_SEGMENT_64B),
5939 #endif
5940 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
5941 /*
5942 * XXX Those instructions will need to be handled differently for
5943 * different ISA versions
5944 */
5945 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
5946 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
5947 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
5948 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
5949 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
5950 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
5951 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
5952 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
5953 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
5954 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
5955 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
5956 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
5957 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
5958 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
5959 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
5960 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
5961 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
5962 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
5963 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
5964 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
5965 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
5966 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
5967 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
5968 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
5969 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
5970 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
5971 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
5972 PPC_NONE, PPC2_BOOKE206),
5973 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
5974 PPC_NONE, PPC2_BOOKE206),
5975 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
5976 PPC_NONE, PPC2_BOOKE206),
5977 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
5978 PPC_NONE, PPC2_BOOKE206),
5979 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
5980 PPC_NONE, PPC2_BOOKE206),
5981 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
5982 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
5983 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
5984 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
5985 PPC_BOOKE, PPC2_BOOKE206),
5986 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
5987 PPC_440_SPEC),
5988 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
5989 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
5990
5991 #if defined(TARGET_PPC64)
5992 #undef GEN_PPC64_R2
5993 #undef GEN_PPC64_R4
5994 #define GEN_PPC64_R2(name, opc1, opc2) \
5995 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
5996 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
5997 PPC_64B)
5998 #define GEN_PPC64_R4(name, opc1, opc2) \
5999 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6000 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
6001 PPC_64B), \
6002 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
6003 PPC_64B), \
6004 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
6005 PPC_64B)
6006 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6007 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6008 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6009 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6010 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6011 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6012 #endif
6013
6014 #undef GEN_LDX_E
6015 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
6016 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6017
6018 #if defined(TARGET_PPC64)
6019 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6020
6021 /* HV/P7 and later only */
6022 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6023 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6024 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6025 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6026 #endif
6027 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6028 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6029
6030 /* External PID based load */
6031 #undef GEN_LDEPX
6032 #define GEN_LDEPX(name, ldop, opc2, opc3) \
6033 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6034 0x00000001, PPC_NONE, PPC2_BOOKE206),
6035
6036 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6037 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6038 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6039 #if defined(TARGET_PPC64)
6040 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6041 #endif
6042
6043 #undef GEN_STX_E
6044 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
6045 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6046
6047 #if defined(TARGET_PPC64)
6048 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6049 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6050 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6051 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6052 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6053 #endif
6054 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6055 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6056
6057 #undef GEN_STEPX
6058 #define GEN_STEPX(name, ldop, opc2, opc3) \
6059 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3, \
6060 0x00000001, PPC_NONE, PPC2_BOOKE206),
6061
6062 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6063 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6064 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6065 #if defined(TARGET_PPC64)
6066 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6067 #endif
6068
6069 #undef GEN_CRLOGIC
6070 #define GEN_CRLOGIC(name, tcg_op, opc) \
6071 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6072 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6073 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6074 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6075 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6076 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6077 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6078 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6079 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6080
6081 #undef GEN_MAC_HANDLER
6082 #define GEN_MAC_HANDLER(name, opc2, opc3) \
6083 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6084 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6085 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6086 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6087 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6088 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6089 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6090 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6091 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6092 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6093 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6094 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6095 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6096 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6097 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6098 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6099 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6100 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6101 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6102 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6103 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6104 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6105 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6106 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6107 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6108 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6109 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6110 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6111 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6112 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6113 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6114 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6115 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6116 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6117 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6118 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6119 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6120 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6121 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6122 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6123 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6124 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6125 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6126
6127 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6128 PPC_NONE, PPC2_TM),
6129 GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
6130 PPC_NONE, PPC2_TM),
6131 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6132 PPC_NONE, PPC2_TM),
6133 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6134 PPC_NONE, PPC2_TM),
6135 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6136 PPC_NONE, PPC2_TM),
6137 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6138 PPC_NONE, PPC2_TM),
6139 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6140 PPC_NONE, PPC2_TM),
6141 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6142 PPC_NONE, PPC2_TM),
6143 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6144 PPC_NONE, PPC2_TM),
6145 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6146 PPC_NONE, PPC2_TM),
6147 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6148 PPC_NONE, PPC2_TM),
6149
6150 #include "translate/fp-ops.c.inc"
6151
6152 #include "translate/vmx-ops.c.inc"
6153
6154 #include "translate/vsx-ops.c.inc"
6155
6156 #include "translate/spe-ops.c.inc"
6157 };
6158
6159 /*****************************************************************************/
6160 /* Opcode types */
6161 enum {
6162 PPC_DIRECT = 0, /* Opcode routine */
6163 PPC_INDIRECT = 1, /* Indirect opcode table */
6164 };
6165
6166 #define PPC_OPCODE_MASK 0x3
6167
is_indirect_opcode(void * handler)6168 static inline int is_indirect_opcode(void *handler)
6169 {
6170 return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6171 }
6172
ind_table(void * handler)6173 static inline opc_handler_t **ind_table(void *handler)
6174 {
6175 return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6176 }
6177
6178 /* Instruction table creation */
6179 /* Opcodes tables creation */
fill_new_table(opc_handler_t ** table,int len)6180 static void fill_new_table(opc_handler_t **table, int len)
6181 {
6182 int i;
6183
6184 for (i = 0; i < len; i++) {
6185 table[i] = &invalid_handler;
6186 }
6187 }
6188
create_new_table(opc_handler_t ** table,unsigned char idx)6189 static int create_new_table(opc_handler_t **table, unsigned char idx)
6190 {
6191 opc_handler_t **tmp;
6192
6193 tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
6194 fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
6195 table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
6196
6197 return 0;
6198 }
6199
insert_in_table(opc_handler_t ** table,unsigned char idx,opc_handler_t * handler)6200 static int insert_in_table(opc_handler_t **table, unsigned char idx,
6201 opc_handler_t *handler)
6202 {
6203 if (table[idx] != &invalid_handler) {
6204 return -1;
6205 }
6206 table[idx] = handler;
6207
6208 return 0;
6209 }
6210
register_direct_insn(opc_handler_t ** ppc_opcodes,unsigned char idx,opc_handler_t * handler)6211 static int register_direct_insn(opc_handler_t **ppc_opcodes,
6212 unsigned char idx, opc_handler_t *handler)
6213 {
6214 if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
6215 printf("*** ERROR: opcode %02x already assigned in main "
6216 "opcode table\n", idx);
6217 return -1;
6218 }
6219
6220 return 0;
6221 }
6222
register_ind_in_table(opc_handler_t ** table,unsigned char idx1,unsigned char idx2,opc_handler_t * handler)6223 static int register_ind_in_table(opc_handler_t **table,
6224 unsigned char idx1, unsigned char idx2,
6225 opc_handler_t *handler)
6226 {
6227 if (table[idx1] == &invalid_handler) {
6228 if (create_new_table(table, idx1) < 0) {
6229 printf("*** ERROR: unable to create indirect table "
6230 "idx=%02x\n", idx1);
6231 return -1;
6232 }
6233 } else {
6234 if (!is_indirect_opcode(table[idx1])) {
6235 printf("*** ERROR: idx %02x already assigned to a direct "
6236 "opcode\n", idx1);
6237 return -1;
6238 }
6239 }
6240 if (handler != NULL &&
6241 insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
6242 printf("*** ERROR: opcode %02x already assigned in "
6243 "opcode table %02x\n", idx2, idx1);
6244 return -1;
6245 }
6246
6247 return 0;
6248 }
6249
register_ind_insn(opc_handler_t ** ppc_opcodes,unsigned char idx1,unsigned char idx2,opc_handler_t * handler)6250 static int register_ind_insn(opc_handler_t **ppc_opcodes,
6251 unsigned char idx1, unsigned char idx2,
6252 opc_handler_t *handler)
6253 {
6254 return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
6255 }
6256
register_dblind_insn(opc_handler_t ** ppc_opcodes,unsigned char idx1,unsigned char idx2,unsigned char idx3,opc_handler_t * handler)6257 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
6258 unsigned char idx1, unsigned char idx2,
6259 unsigned char idx3, opc_handler_t *handler)
6260 {
6261 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
6262 printf("*** ERROR: unable to join indirect table idx "
6263 "[%02x-%02x]\n", idx1, idx2);
6264 return -1;
6265 }
6266 if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
6267 handler) < 0) {
6268 printf("*** ERROR: unable to insert opcode "
6269 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
6270 return -1;
6271 }
6272
6273 return 0;
6274 }
6275
register_trplind_insn(opc_handler_t ** ppc_opcodes,unsigned char idx1,unsigned char idx2,unsigned char idx3,unsigned char idx4,opc_handler_t * handler)6276 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
6277 unsigned char idx1, unsigned char idx2,
6278 unsigned char idx3, unsigned char idx4,
6279 opc_handler_t *handler)
6280 {
6281 opc_handler_t **table;
6282
6283 if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
6284 printf("*** ERROR: unable to join indirect table idx "
6285 "[%02x-%02x]\n", idx1, idx2);
6286 return -1;
6287 }
6288 table = ind_table(ppc_opcodes[idx1]);
6289 if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
6290 printf("*** ERROR: unable to join 2nd-level indirect table idx "
6291 "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
6292 return -1;
6293 }
6294 table = ind_table(table[idx2]);
6295 if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
6296 printf("*** ERROR: unable to insert opcode "
6297 "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
6298 return -1;
6299 }
6300 return 0;
6301 }
register_insn(opc_handler_t ** ppc_opcodes,opcode_t * insn)6302 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
6303 {
6304 if (insn->opc2 != 0xFF) {
6305 if (insn->opc3 != 0xFF) {
6306 if (insn->opc4 != 0xFF) {
6307 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
6308 insn->opc3, insn->opc4,
6309 &insn->handler) < 0) {
6310 return -1;
6311 }
6312 } else {
6313 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
6314 insn->opc3, &insn->handler) < 0) {
6315 return -1;
6316 }
6317 }
6318 } else {
6319 if (register_ind_insn(ppc_opcodes, insn->opc1,
6320 insn->opc2, &insn->handler) < 0) {
6321 return -1;
6322 }
6323 }
6324 } else {
6325 if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
6326 return -1;
6327 }
6328 }
6329
6330 return 0;
6331 }
6332
test_opcode_table(opc_handler_t ** table,int len)6333 static int test_opcode_table(opc_handler_t **table, int len)
6334 {
6335 int i, count, tmp;
6336
6337 for (i = 0, count = 0; i < len; i++) {
6338 /* Consistency fixup */
6339 if (table[i] == NULL) {
6340 table[i] = &invalid_handler;
6341 }
6342 if (table[i] != &invalid_handler) {
6343 if (is_indirect_opcode(table[i])) {
6344 tmp = test_opcode_table(ind_table(table[i]),
6345 PPC_CPU_INDIRECT_OPCODES_LEN);
6346 if (tmp == 0) {
6347 g_free(table[i]);
6348 table[i] = &invalid_handler;
6349 } else {
6350 count++;
6351 }
6352 } else {
6353 count++;
6354 }
6355 }
6356 }
6357
6358 return count;
6359 }
6360
fix_opcode_tables(opc_handler_t ** ppc_opcodes)6361 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
6362 {
6363 if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
6364 printf("*** WARNING: no opcode defined !\n");
6365 }
6366 }
6367
6368 /*****************************************************************************/
create_ppc_opcodes(PowerPCCPU * cpu,Error ** errp)6369 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
6370 {
6371 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
6372 opcode_t *opc;
6373
6374 fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
6375 for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
6376 if (((opc->handler.type & pcc->insns_flags) != 0) ||
6377 ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
6378 if (register_insn(cpu->opcodes, opc) < 0) {
6379 error_setg(errp, "ERROR initializing PowerPC instruction "
6380 "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
6381 opc->opc3);
6382 return;
6383 }
6384 }
6385 }
6386 fix_opcode_tables(cpu->opcodes);
6387 fflush(stdout);
6388 fflush(stderr);
6389 }
6390
destroy_ppc_opcodes(PowerPCCPU * cpu)6391 void destroy_ppc_opcodes(PowerPCCPU *cpu)
6392 {
6393 opc_handler_t **table, **table_2;
6394 int i, j, k;
6395
6396 for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
6397 if (cpu->opcodes[i] == &invalid_handler) {
6398 continue;
6399 }
6400 if (is_indirect_opcode(cpu->opcodes[i])) {
6401 table = ind_table(cpu->opcodes[i]);
6402 for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
6403 if (table[j] == &invalid_handler) {
6404 continue;
6405 }
6406 if (is_indirect_opcode(table[j])) {
6407 table_2 = ind_table(table[j]);
6408 for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
6409 if (table_2[k] != &invalid_handler &&
6410 is_indirect_opcode(table_2[k])) {
6411 g_free((opc_handler_t *)((uintptr_t)table_2[k] &
6412 ~PPC_INDIRECT));
6413 }
6414 }
6415 g_free((opc_handler_t *)((uintptr_t)table[j] &
6416 ~PPC_INDIRECT));
6417 }
6418 }
6419 g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
6420 ~PPC_INDIRECT));
6421 }
6422 }
6423 }
6424
ppc_fixup_cpu(PowerPCCPU * cpu)6425 int ppc_fixup_cpu(PowerPCCPU *cpu)
6426 {
6427 CPUPPCState *env = &cpu->env;
6428
6429 /*
6430 * TCG doesn't (yet) emulate some groups of instructions that are
6431 * implemented on some otherwise supported CPUs (e.g. VSX and
6432 * decimal floating point instructions on POWER7). We remove
6433 * unsupported instruction groups from the cpu state's instruction
6434 * masks and hope the guest can cope. For at least the pseries
6435 * machine, the unavailability of these instructions can be
6436 * advertised to the guest via the device tree.
6437 */
6438 if ((env->insns_flags & ~PPC_TCG_INSNS)
6439 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
6440 warn_report("Disabling some instructions which are not "
6441 "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
6442 env->insns_flags & ~PPC_TCG_INSNS,
6443 env->insns_flags2 & ~PPC_TCG_INSNS2);
6444 }
6445 env->insns_flags &= PPC_TCG_INSNS;
6446 env->insns_flags2 &= PPC_TCG_INSNS2;
6447 return 0;
6448 }
6449
decode_legacy(PowerPCCPU * cpu,DisasContext * ctx,uint32_t insn)6450 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
6451 {
6452 opc_handler_t **table, *handler;
6453 uint32_t inval;
6454
6455 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
6456 insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
6457 ctx->le_mode ? "little" : "big");
6458
6459 table = cpu->opcodes;
6460 handler = table[opc1(insn)];
6461 if (is_indirect_opcode(handler)) {
6462 table = ind_table(handler);
6463 handler = table[opc2(insn)];
6464 if (is_indirect_opcode(handler)) {
6465 table = ind_table(handler);
6466 handler = table[opc3(insn)];
6467 if (is_indirect_opcode(handler)) {
6468 table = ind_table(handler);
6469 handler = table[opc4(insn)];
6470 }
6471 }
6472 }
6473
6474 /* Is opcode *REALLY* valid ? */
6475 if (unlikely(handler->handler == &gen_invalid)) {
6476 qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
6477 "%02x - %02x - %02x - %02x (%08x) "
6478 TARGET_FMT_lx "\n",
6479 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
6480 insn, ctx->cia);
6481 return false;
6482 }
6483
6484 if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
6485 && Rc(insn))) {
6486 inval = handler->inval2;
6487 } else {
6488 inval = handler->inval1;
6489 }
6490
6491 if (unlikely((insn & inval) != 0)) {
6492 qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
6493 "%02x - %02x - %02x - %02x (%08x) "
6494 TARGET_FMT_lx "\n", insn & inval,
6495 opc1(insn), opc2(insn), opc3(insn), opc4(insn),
6496 insn, ctx->cia);
6497 return false;
6498 }
6499
6500 handler->handler(ctx);
6501 return true;
6502 }
6503
ppc_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)6504 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
6505 {
6506 DisasContext *ctx = container_of(dcbase, DisasContext, base);
6507 CPUPPCState *env = cpu_env(cs);
6508 uint32_t hflags = ctx->base.tb->flags;
6509
6510 ctx->spr_cb = env->spr_cb;
6511 ctx->pr = (hflags >> HFLAGS_PR) & 1;
6512 ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
6513 ctx->dr = (hflags >> HFLAGS_DR) & 1;
6514 ctx->hv = (hflags >> HFLAGS_HV) & 1;
6515 ctx->insns_flags = env->insns_flags;
6516 ctx->insns_flags2 = env->insns_flags2;
6517 ctx->access_type = -1;
6518 ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
6519 ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
6520 ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
6521 ctx->flags = env->flags;
6522 #if defined(TARGET_PPC64)
6523 ctx->excp_model = env->excp_model;
6524 ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
6525 ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
6526 ctx->has_bhrb = !!(env->flags & POWERPC_FLAG_BHRB);
6527 #endif
6528 ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
6529 || env->mmu_model & POWERPC_MMU_64;
6530
6531 ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
6532 ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
6533 ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
6534 ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
6535 ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
6536 ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
6537 ctx->hr = (hflags >> HFLAGS_HR) & 1;
6538 ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
6539 ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
6540 ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
6541 ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
6542 ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
6543 ctx->bhrb_enable = (hflags >> HFLAGS_BHRB_ENABLE) & 1;
6544
6545 ctx->singlestep_enabled = 0;
6546 if ((hflags >> HFLAGS_SE) & 1) {
6547 ctx->singlestep_enabled |= CPU_SINGLE_STEP;
6548 ctx->base.max_insns = 1;
6549 }
6550 if ((hflags >> HFLAGS_BE) & 1) {
6551 ctx->singlestep_enabled |= CPU_BRANCH_STEP;
6552 }
6553 }
6554
ppc_tr_tb_start(DisasContextBase * db,CPUState * cs)6555 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
6556 {
6557 }
6558
ppc_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)6559 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
6560 {
6561 tcg_gen_insn_start(dcbase->pc_next);
6562 }
6563
is_prefix_insn(DisasContext * ctx,uint32_t insn)6564 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
6565 {
6566 REQUIRE_INSNS_FLAGS2(ctx, ISA310);
6567 return opc1(insn) == 1;
6568 }
6569
ppc_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)6570 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
6571 {
6572 DisasContext *ctx = container_of(dcbase, DisasContext, base);
6573 PowerPCCPU *cpu = POWERPC_CPU(cs);
6574 CPUPPCState *env = cpu_env(cs);
6575 target_ulong pc;
6576 uint32_t insn;
6577 bool ok;
6578
6579 LOG_DISAS("----------------\n");
6580 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
6581 ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
6582
6583 ctx->cia = pc = ctx->base.pc_next;
6584 insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
6585 ctx->base.pc_next = pc += 4;
6586
6587 if (!is_prefix_insn(ctx, insn)) {
6588 ctx->opcode = insn;
6589 ok = (decode_insn32(ctx, insn) ||
6590 decode_legacy(cpu, ctx, insn));
6591 } else if ((pc & 63) == 0) {
6592 /*
6593 * Power v3.1, section 1.9 Exceptions:
6594 * attempt to execute a prefixed instruction that crosses a
6595 * 64-byte address boundary (system alignment error).
6596 */
6597 gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
6598 ok = true;
6599 } else {
6600 uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
6601 need_byteswap(ctx));
6602 ctx->base.pc_next = pc += 4;
6603 ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
6604 }
6605 if (!ok) {
6606 gen_invalid(ctx);
6607 }
6608
6609 /* End the TB when crossing a page boundary. */
6610 if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
6611 ctx->base.is_jmp = DISAS_TOO_MANY;
6612 }
6613 }
6614
ppc_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)6615 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
6616 {
6617 DisasContext *ctx = container_of(dcbase, DisasContext, base);
6618 DisasJumpType is_jmp = ctx->base.is_jmp;
6619 target_ulong nip = ctx->base.pc_next;
6620
6621 if (is_jmp == DISAS_NORETURN) {
6622 /* We have already exited the TB. */
6623 return;
6624 }
6625
6626 /* Honor single stepping. */
6627 if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) {
6628 bool rfi_type = false;
6629
6630 switch (is_jmp) {
6631 case DISAS_TOO_MANY:
6632 case DISAS_EXIT_UPDATE:
6633 case DISAS_CHAIN_UPDATE:
6634 gen_update_nip(ctx, nip);
6635 break;
6636 case DISAS_EXIT:
6637 case DISAS_CHAIN:
6638 /*
6639 * This is a heuristic, to put it kindly. The rfi class of
6640 * instructions are among the few outside branches that change
6641 * NIP without taking an interrupt. Single step trace interrupts
6642 * do not fire on completion of these instructions.
6643 */
6644 rfi_type = true;
6645 break;
6646 default:
6647 g_assert_not_reached();
6648 }
6649
6650 gen_debug_exception(ctx, rfi_type);
6651 return;
6652 }
6653
6654 switch (is_jmp) {
6655 case DISAS_TOO_MANY:
6656 if (use_goto_tb(ctx, nip)) {
6657 pmu_count_insns(ctx);
6658 tcg_gen_goto_tb(0);
6659 gen_update_nip(ctx, nip);
6660 tcg_gen_exit_tb(ctx->base.tb, 0);
6661 break;
6662 }
6663 /* fall through */
6664 case DISAS_CHAIN_UPDATE:
6665 gen_update_nip(ctx, nip);
6666 /* fall through */
6667 case DISAS_CHAIN:
6668 /*
6669 * tcg_gen_lookup_and_goto_ptr will exit the TB if
6670 * CF_NO_GOTO_PTR is set. Count insns now.
6671 */
6672 if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
6673 pmu_count_insns(ctx);
6674 }
6675
6676 tcg_gen_lookup_and_goto_ptr();
6677 break;
6678
6679 case DISAS_EXIT_UPDATE:
6680 gen_update_nip(ctx, nip);
6681 /* fall through */
6682 case DISAS_EXIT:
6683 pmu_count_insns(ctx);
6684 tcg_gen_exit_tb(NULL, 0);
6685 break;
6686
6687 default:
6688 g_assert_not_reached();
6689 }
6690 }
6691
6692 static const TranslatorOps ppc_tr_ops = {
6693 .init_disas_context = ppc_tr_init_disas_context,
6694 .tb_start = ppc_tr_tb_start,
6695 .insn_start = ppc_tr_insn_start,
6696 .translate_insn = ppc_tr_translate_insn,
6697 .tb_stop = ppc_tr_tb_stop,
6698 };
6699
ppc_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)6700 void ppc_translate_code(CPUState *cs, TranslationBlock *tb,
6701 int *max_insns, vaddr pc, void *host_pc)
6702 {
6703 DisasContext ctx;
6704
6705 translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
6706 }
6707