xref: /openbmc/qemu/target/ppc/translate.c (revision c5d98a7b)
1 /*
2  *  PowerPC emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (C) 2011 Freescale Semiconductor, Inc.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 #include "qemu/main-loop.h"
30 #include "exec/cpu_ldst.h"
31 
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34 
35 #include "exec/translator.h"
36 #include "exec/log.h"
37 #include "qemu/atomic128.h"
38 #include "spr_common.h"
39 #include "power8-pmu.h"
40 
41 #include "qemu/qemu-print.h"
42 #include "qapi/error.h"
43 
44 #define HELPER_H "helper.h"
45 #include "exec/helper-info.c.inc"
46 #undef  HELPER_H
47 
48 #define CPU_SINGLE_STEP 0x1
49 #define CPU_BRANCH_STEP 0x2
50 
51 /* Include definitions for instructions classes and implementations flags */
52 /* #define PPC_DEBUG_DISAS */
53 
54 #ifdef PPC_DEBUG_DISAS
55 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
56 #else
57 #  define LOG_DISAS(...) do { } while (0)
58 #endif
59 /*****************************************************************************/
60 /* Code translation helpers                                                  */
61 
62 /* global register indexes */
63 static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
64                           + 10 * 4 + 22 * 5 /* SPE GPRh */
65                           + 8 * 5           /* CRF */];
66 static TCGv cpu_gpr[32];
67 static TCGv cpu_gprh[32];
68 static TCGv_i32 cpu_crf[8];
69 static TCGv cpu_nip;
70 static TCGv cpu_msr;
71 static TCGv cpu_ctr;
72 static TCGv cpu_lr;
73 #if defined(TARGET_PPC64)
74 static TCGv cpu_cfar;
75 #endif
76 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
77 static TCGv cpu_reserve;
78 static TCGv cpu_reserve_length;
79 static TCGv cpu_reserve_val;
80 static TCGv cpu_reserve_val2;
81 static TCGv cpu_fpscr;
82 static TCGv_i32 cpu_access_type;
83 
84 void ppc_translate_init(void)
85 {
86     int i;
87     char *p;
88     size_t cpu_reg_names_size;
89 
90     p = cpu_reg_names;
91     cpu_reg_names_size = sizeof(cpu_reg_names);
92 
93     for (i = 0; i < 8; i++) {
94         snprintf(p, cpu_reg_names_size, "crf%d", i);
95         cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
96                                             offsetof(CPUPPCState, crf[i]), p);
97         p += 5;
98         cpu_reg_names_size -= 5;
99     }
100 
101     for (i = 0; i < 32; i++) {
102         snprintf(p, cpu_reg_names_size, "r%d", i);
103         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
104                                         offsetof(CPUPPCState, gpr[i]), p);
105         p += (i < 10) ? 3 : 4;
106         cpu_reg_names_size -= (i < 10) ? 3 : 4;
107         snprintf(p, cpu_reg_names_size, "r%dH", i);
108         cpu_gprh[i] = tcg_global_mem_new(cpu_env,
109                                          offsetof(CPUPPCState, gprh[i]), p);
110         p += (i < 10) ? 4 : 5;
111         cpu_reg_names_size -= (i < 10) ? 4 : 5;
112     }
113 
114     cpu_nip = tcg_global_mem_new(cpu_env,
115                                  offsetof(CPUPPCState, nip), "nip");
116 
117     cpu_msr = tcg_global_mem_new(cpu_env,
118                                  offsetof(CPUPPCState, msr), "msr");
119 
120     cpu_ctr = tcg_global_mem_new(cpu_env,
121                                  offsetof(CPUPPCState, ctr), "ctr");
122 
123     cpu_lr = tcg_global_mem_new(cpu_env,
124                                 offsetof(CPUPPCState, lr), "lr");
125 
126 #if defined(TARGET_PPC64)
127     cpu_cfar = tcg_global_mem_new(cpu_env,
128                                   offsetof(CPUPPCState, cfar), "cfar");
129 #endif
130 
131     cpu_xer = tcg_global_mem_new(cpu_env,
132                                  offsetof(CPUPPCState, xer), "xer");
133     cpu_so = tcg_global_mem_new(cpu_env,
134                                 offsetof(CPUPPCState, so), "SO");
135     cpu_ov = tcg_global_mem_new(cpu_env,
136                                 offsetof(CPUPPCState, ov), "OV");
137     cpu_ca = tcg_global_mem_new(cpu_env,
138                                 offsetof(CPUPPCState, ca), "CA");
139     cpu_ov32 = tcg_global_mem_new(cpu_env,
140                                   offsetof(CPUPPCState, ov32), "OV32");
141     cpu_ca32 = tcg_global_mem_new(cpu_env,
142                                   offsetof(CPUPPCState, ca32), "CA32");
143 
144     cpu_reserve = tcg_global_mem_new(cpu_env,
145                                      offsetof(CPUPPCState, reserve_addr),
146                                      "reserve_addr");
147     cpu_reserve_length = tcg_global_mem_new(cpu_env,
148                                             offsetof(CPUPPCState,
149                                                      reserve_length),
150                                             "reserve_length");
151     cpu_reserve_val = tcg_global_mem_new(cpu_env,
152                                          offsetof(CPUPPCState, reserve_val),
153                                          "reserve_val");
154     cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
155                                           offsetof(CPUPPCState, reserve_val2),
156                                           "reserve_val2");
157 
158     cpu_fpscr = tcg_global_mem_new(cpu_env,
159                                    offsetof(CPUPPCState, fpscr), "fpscr");
160 
161     cpu_access_type = tcg_global_mem_new_i32(cpu_env,
162                                              offsetof(CPUPPCState, access_type),
163                                              "access_type");
164 }
165 
166 /* internal defines */
167 struct DisasContext {
168     DisasContextBase base;
169     target_ulong cia;  /* current instruction address */
170     uint32_t opcode;
171     /* Routine used to access memory */
172     bool pr, hv, dr, le_mode;
173     bool lazy_tlb_flush;
174     bool need_access_type;
175     int mem_idx;
176     int access_type;
177     /* Translation flags */
178     MemOp default_tcg_memop_mask;
179 #if defined(TARGET_PPC64)
180     bool sf_mode;
181     bool has_cfar;
182 #endif
183     bool fpu_enabled;
184     bool altivec_enabled;
185     bool vsx_enabled;
186     bool spe_enabled;
187     bool tm_enabled;
188     bool gtse;
189     bool hr;
190     bool mmcr0_pmcc0;
191     bool mmcr0_pmcc1;
192     bool mmcr0_pmcjce;
193     bool pmc_other;
194     bool pmu_insn_cnt;
195     ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
196     int singlestep_enabled;
197     uint32_t flags;
198     uint64_t insns_flags;
199     uint64_t insns_flags2;
200 };
201 
202 #define DISAS_EXIT         DISAS_TARGET_0  /* exit to main loop, pc updated */
203 #define DISAS_EXIT_UPDATE  DISAS_TARGET_1  /* exit to main loop, pc stale */
204 #define DISAS_CHAIN        DISAS_TARGET_2  /* lookup next tb, pc updated */
205 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3  /* lookup next tb, pc stale */
206 
207 /* Return true iff byteswap is needed in a scalar memop */
208 static inline bool need_byteswap(const DisasContext *ctx)
209 {
210 #if TARGET_BIG_ENDIAN
211      return ctx->le_mode;
212 #else
213      return !ctx->le_mode;
214 #endif
215 }
216 
217 /* True when active word size < size of target_long.  */
218 #ifdef TARGET_PPC64
219 # define NARROW_MODE(C)  (!(C)->sf_mode)
220 #else
221 # define NARROW_MODE(C)  0
222 #endif
223 
224 struct opc_handler_t {
225     /* invalid bits for instruction 1 (Rc(opcode) == 0) */
226     uint32_t inval1;
227     /* invalid bits for instruction 2 (Rc(opcode) == 1) */
228     uint32_t inval2;
229     /* instruction type */
230     uint64_t type;
231     /* extended instruction type */
232     uint64_t type2;
233     /* handler */
234     void (*handler)(DisasContext *ctx);
235 };
236 
237 static inline bool gen_serialize(DisasContext *ctx)
238 {
239     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
240         /* Restart with exclusive lock.  */
241         gen_helper_exit_atomic(cpu_env);
242         ctx->base.is_jmp = DISAS_NORETURN;
243         return false;
244     }
245     return true;
246 }
247 
248 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
249 static inline bool gen_serialize_core(DisasContext *ctx)
250 {
251     if (ctx->flags & POWERPC_FLAG_SMT) {
252         return gen_serialize(ctx);
253     }
254 
255     return true;
256 }
257 #endif
258 
259 /* SPR load/store helpers */
260 static inline void gen_load_spr(TCGv t, int reg)
261 {
262     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
263 }
264 
265 static inline void gen_store_spr(int reg, TCGv t)
266 {
267     tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
268 }
269 
270 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
271 {
272     if (ctx->need_access_type && ctx->access_type != access_type) {
273         tcg_gen_movi_i32(cpu_access_type, access_type);
274         ctx->access_type = access_type;
275     }
276 }
277 
278 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
279 {
280     if (NARROW_MODE(ctx)) {
281         nip = (uint32_t)nip;
282     }
283     tcg_gen_movi_tl(cpu_nip, nip);
284 }
285 
286 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
287 {
288     TCGv_i32 t0, t1;
289 
290     /*
291      * These are all synchronous exceptions, we set the PC back to the
292      * faulting instruction
293      */
294     gen_update_nip(ctx, ctx->cia);
295     t0 = tcg_constant_i32(excp);
296     t1 = tcg_constant_i32(error);
297     gen_helper_raise_exception_err(cpu_env, t0, t1);
298     ctx->base.is_jmp = DISAS_NORETURN;
299 }
300 
301 static void gen_exception(DisasContext *ctx, uint32_t excp)
302 {
303     TCGv_i32 t0;
304 
305     /*
306      * These are all synchronous exceptions, we set the PC back to the
307      * faulting instruction
308      */
309     gen_update_nip(ctx, ctx->cia);
310     t0 = tcg_constant_i32(excp);
311     gen_helper_raise_exception(cpu_env, t0);
312     ctx->base.is_jmp = DISAS_NORETURN;
313 }
314 
315 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
316                               target_ulong nip)
317 {
318     TCGv_i32 t0;
319 
320     gen_update_nip(ctx, nip);
321     t0 = tcg_constant_i32(excp);
322     gen_helper_raise_exception(cpu_env, t0);
323     ctx->base.is_jmp = DISAS_NORETURN;
324 }
325 
326 #if !defined(CONFIG_USER_ONLY)
327 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
328 {
329     translator_io_start(&ctx->base);
330     gen_helper_ppc_maybe_interrupt(cpu_env);
331 }
332 #endif
333 
334 /*
335  * Tells the caller what is the appropriate exception to generate and prepares
336  * SPR registers for this exception.
337  *
338  * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
339  * POWERPC_EXCP_DEBUG (on BookE).
340  */
341 static uint32_t gen_prep_dbgex(DisasContext *ctx)
342 {
343     if (ctx->flags & POWERPC_FLAG_DE) {
344         target_ulong dbsr = 0;
345         if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
346             dbsr = DBCR0_ICMP;
347         } else {
348             /* Must have been branch */
349             dbsr = DBCR0_BRT;
350         }
351         TCGv t0 = tcg_temp_new();
352         gen_load_spr(t0, SPR_BOOKE_DBSR);
353         tcg_gen_ori_tl(t0, t0, dbsr);
354         gen_store_spr(SPR_BOOKE_DBSR, t0);
355         return POWERPC_EXCP_DEBUG;
356     } else {
357         return POWERPC_EXCP_TRACE;
358     }
359 }
360 
361 static void gen_debug_exception(DisasContext *ctx)
362 {
363     gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
364     ctx->base.is_jmp = DISAS_NORETURN;
365 }
366 
367 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
368 {
369     /* Will be converted to program check if needed */
370     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
371 }
372 
373 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
374 {
375     gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
376 }
377 
378 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
379 {
380     /* Will be converted to program check if needed */
381     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
382 }
383 
384 /*****************************************************************************/
385 /* SPR READ/WRITE CALLBACKS */
386 
387 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
388 {
389 #if 0
390     sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
391     printf("ERROR: try to access SPR %d !\n", sprn);
392 #endif
393 }
394 
395 /* #define PPC_DUMP_SPR_ACCESSES */
396 
397 /*
398  * Generic callbacks:
399  * do nothing but store/retrieve spr value
400  */
401 static void spr_load_dump_spr(int sprn)
402 {
403 #ifdef PPC_DUMP_SPR_ACCESSES
404     TCGv_i32 t0 = tcg_constant_i32(sprn);
405     gen_helper_load_dump_spr(cpu_env, t0);
406 #endif
407 }
408 
409 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
410 {
411     gen_load_spr(cpu_gpr[gprn], sprn);
412     spr_load_dump_spr(sprn);
413 }
414 
415 static void spr_store_dump_spr(int sprn)
416 {
417 #ifdef PPC_DUMP_SPR_ACCESSES
418     TCGv_i32 t0 = tcg_constant_i32(sprn);
419     gen_helper_store_dump_spr(cpu_env, t0);
420 #endif
421 }
422 
423 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
424 {
425     gen_store_spr(sprn, cpu_gpr[gprn]);
426     spr_store_dump_spr(sprn);
427 }
428 
429 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
430 {
431 #ifdef TARGET_PPC64
432     TCGv t0 = tcg_temp_new();
433     tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
434     gen_store_spr(sprn, t0);
435     spr_store_dump_spr(sprn);
436 #else
437     spr_write_generic(ctx, sprn, gprn);
438 #endif
439 }
440 
441 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
442 {
443     /* This does not implement >1 thread */
444     TCGv t0 = tcg_temp_new();
445     TCGv t1 = tcg_temp_new();
446     tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
447     tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
448     tcg_gen_or_tl(t1, t1, t0);
449     gen_store_spr(sprn, t1);
450 }
451 
452 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
453 {
454     if (!(ctx->flags & POWERPC_FLAG_SMT)) {
455         spr_write_CTRL_ST(ctx, sprn, gprn);
456         goto out;
457     }
458 
459     if (!gen_serialize(ctx)) {
460         return;
461     }
462 
463     gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn),
464                               cpu_gpr[gprn]);
465 out:
466     spr_store_dump_spr(sprn);
467 
468     /*
469      * SPR_CTRL writes must force a new translation block,
470      * allowing the PMU to calculate the run latch events with
471      * more accuracy.
472      */
473     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
474 }
475 
476 #if !defined(CONFIG_USER_ONLY)
477 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
478 {
479     TCGv t0 = tcg_temp_new();
480     TCGv t1 = tcg_temp_new();
481     gen_load_spr(t0, sprn);
482     tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
483     tcg_gen_and_tl(t0, t0, t1);
484     gen_store_spr(sprn, t0);
485 }
486 
487 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
488 {
489 }
490 
491 #endif
492 
493 /* SPR common to all PowerPC */
494 /* XER */
495 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
496 {
497     TCGv dst = cpu_gpr[gprn];
498     TCGv t0 = tcg_temp_new();
499     TCGv t1 = tcg_temp_new();
500     TCGv t2 = tcg_temp_new();
501     tcg_gen_mov_tl(dst, cpu_xer);
502     tcg_gen_shli_tl(t0, cpu_so, XER_SO);
503     tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
504     tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
505     tcg_gen_or_tl(t0, t0, t1);
506     tcg_gen_or_tl(dst, dst, t2);
507     tcg_gen_or_tl(dst, dst, t0);
508     if (is_isa300(ctx)) {
509         tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
510         tcg_gen_or_tl(dst, dst, t0);
511         tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
512         tcg_gen_or_tl(dst, dst, t0);
513     }
514 }
515 
516 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
517 {
518     TCGv src = cpu_gpr[gprn];
519     /* Write all flags, while reading back check for isa300 */
520     tcg_gen_andi_tl(cpu_xer, src,
521                     ~((1u << XER_SO) |
522                       (1u << XER_OV) | (1u << XER_OV32) |
523                       (1u << XER_CA) | (1u << XER_CA32)));
524     tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
525     tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
526     tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
527     tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
528     tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
529 }
530 
531 /* LR */
532 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
533 {
534     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
535 }
536 
537 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
538 {
539     tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
540 }
541 
542 /* CFAR */
543 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
544 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
545 {
546     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
547 }
548 
549 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
550 {
551     tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
552 }
553 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
554 
555 /* CTR */
556 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
557 {
558     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
559 }
560 
561 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
562 {
563     tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
564 }
565 
566 /* User read access to SPR */
567 /* USPRx */
568 /* UMMCRx */
569 /* UPMCx */
570 /* USIA */
571 /* UDECR */
572 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
573 {
574     gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
575 }
576 
577 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
578 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
579 {
580     gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
581 }
582 #endif
583 
584 /* SPR common to all non-embedded PowerPC */
585 /* DECR */
586 #if !defined(CONFIG_USER_ONLY)
587 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
588 {
589     translator_io_start(&ctx->base);
590     gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
591 }
592 
593 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
594 {
595     translator_io_start(&ctx->base);
596     gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
597 }
598 #endif
599 
600 /* SPR common to all non-embedded PowerPC, except 601 */
601 /* Time base */
602 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
603 {
604     translator_io_start(&ctx->base);
605     gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
606 }
607 
608 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
609 {
610     translator_io_start(&ctx->base);
611     gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
612 }
613 
614 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
615 {
616     gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
617 }
618 
619 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
620 {
621     gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
622 }
623 
624 #if !defined(CONFIG_USER_ONLY)
625 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
626 {
627     translator_io_start(&ctx->base);
628     gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
629 }
630 
631 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
632 {
633     translator_io_start(&ctx->base);
634     gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
635 }
636 
637 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
638 {
639     gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
640 }
641 
642 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
643 {
644     gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
645 }
646 
647 #if defined(TARGET_PPC64)
648 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
649 {
650     translator_io_start(&ctx->base);
651     gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
652 }
653 
654 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
655 {
656     translator_io_start(&ctx->base);
657     gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
658 }
659 
660 /* HDECR */
661 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
662 {
663     translator_io_start(&ctx->base);
664     gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
665 }
666 
667 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
668 {
669     translator_io_start(&ctx->base);
670     gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
671 }
672 
673 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
674 {
675     translator_io_start(&ctx->base);
676     gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
677 }
678 
679 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
680 {
681     translator_io_start(&ctx->base);
682     gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
683 }
684 
685 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
686 {
687     translator_io_start(&ctx->base);
688     gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
689 }
690 
691 #endif
692 #endif
693 
694 #if !defined(CONFIG_USER_ONLY)
695 /* IBAT0U...IBAT0U */
696 /* IBAT0L...IBAT7L */
697 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
698 {
699     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
700                   offsetof(CPUPPCState,
701                            IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
702 }
703 
704 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
705 {
706     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
707                   offsetof(CPUPPCState,
708                            IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
709 }
710 
711 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
712 {
713     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
714     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
715 }
716 
717 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
718 {
719     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
720     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
721 }
722 
723 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
724 {
725     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
726     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
727 }
728 
729 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
730 {
731     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
732     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
733 }
734 
735 /* DBAT0U...DBAT7U */
736 /* DBAT0L...DBAT7L */
737 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
738 {
739     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
740                   offsetof(CPUPPCState,
741                            DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
742 }
743 
744 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
745 {
746     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
747                   offsetof(CPUPPCState,
748                            DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
749 }
750 
751 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
752 {
753     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
754     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
755 }
756 
757 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
758 {
759     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
760     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
761 }
762 
763 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
764 {
765     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
766     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
767 }
768 
769 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
770 {
771     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
772     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
773 }
774 
775 /* SDR1 */
776 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
777 {
778     gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
779 }
780 
781 #if defined(TARGET_PPC64)
782 /* 64 bits PowerPC specific SPRs */
783 /* PIDR */
784 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
785 {
786     gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
787 }
788 
789 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
790 {
791     gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
792 }
793 
794 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
795 {
796     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
797 }
798 
799 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
800 {
801     TCGv t0 = tcg_temp_new();
802     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
803     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
804 }
805 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
806 {
807     gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
808 }
809 
810 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
811 {
812     gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
813 }
814 
815 /* DPDES */
816 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
817 {
818     gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
819 }
820 
821 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
822 {
823     gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
824 }
825 #endif
826 #endif
827 
828 /* PowerPC 40x specific registers */
829 #if !defined(CONFIG_USER_ONLY)
830 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
831 {
832     translator_io_start(&ctx->base);
833     gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
834 }
835 
836 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
837 {
838     translator_io_start(&ctx->base);
839     gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
840 }
841 
842 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
843 {
844     translator_io_start(&ctx->base);
845     gen_store_spr(sprn, cpu_gpr[gprn]);
846     gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
847     /* We must stop translation as we may have rebooted */
848     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
849 }
850 
851 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
852 {
853     translator_io_start(&ctx->base);
854     gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
855 }
856 
857 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
858 {
859     translator_io_start(&ctx->base);
860     gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
861 }
862 
863 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
864 {
865     translator_io_start(&ctx->base);
866     gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
867 }
868 
869 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
870 {
871     TCGv t0 = tcg_temp_new();
872     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
873     gen_helper_store_40x_pid(cpu_env, t0);
874 }
875 
876 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
877 {
878     translator_io_start(&ctx->base);
879     gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
880 }
881 
882 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
883 {
884     translator_io_start(&ctx->base);
885     gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
886 }
887 #endif
888 
889 /* PIR */
890 #if !defined(CONFIG_USER_ONLY)
891 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
892 {
893     TCGv t0 = tcg_temp_new();
894     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
895     gen_store_spr(SPR_PIR, t0);
896 }
897 #endif
898 
899 /* SPE specific registers */
900 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
901 {
902     TCGv_i32 t0 = tcg_temp_new_i32();
903     tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
904     tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
905 }
906 
907 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
908 {
909     TCGv_i32 t0 = tcg_temp_new_i32();
910     tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
911     tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
912 }
913 
914 #if !defined(CONFIG_USER_ONLY)
915 /* Callback used to write the exception vector base */
916 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
917 {
918     TCGv t0 = tcg_temp_new();
919     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
920     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
921     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
922     gen_store_spr(sprn, t0);
923 }
924 
925 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
926 {
927     int sprn_offs;
928 
929     if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
930         sprn_offs = sprn - SPR_BOOKE_IVOR0;
931     } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
932         sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
933     } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
934         sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
935     } else {
936         qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
937                       " vector 0x%03x\n", sprn);
938         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
939         return;
940     }
941 
942     TCGv t0 = tcg_temp_new();
943     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
944     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
945     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
946     gen_store_spr(sprn, t0);
947 }
948 #endif
949 
950 #ifdef TARGET_PPC64
951 #ifndef CONFIG_USER_ONLY
952 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
953 {
954     TCGv t0 = tcg_temp_new();
955     TCGv t1 = tcg_temp_new();
956     TCGv t2 = tcg_temp_new();
957 
958     /*
959      * Note, the HV=1 PR=0 case is handled earlier by simply using
960      * spr_write_generic for HV mode in the SPR table
961      */
962 
963     /* Build insertion mask into t1 based on context */
964     if (ctx->pr) {
965         gen_load_spr(t1, SPR_UAMOR);
966     } else {
967         gen_load_spr(t1, SPR_AMOR);
968     }
969 
970     /* Mask new bits into t2 */
971     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
972 
973     /* Load AMR and clear new bits in t0 */
974     gen_load_spr(t0, SPR_AMR);
975     tcg_gen_andc_tl(t0, t0, t1);
976 
977     /* Or'in new bits and write it out */
978     tcg_gen_or_tl(t0, t0, t2);
979     gen_store_spr(SPR_AMR, t0);
980     spr_store_dump_spr(SPR_AMR);
981 }
982 
983 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
984 {
985     TCGv t0 = tcg_temp_new();
986     TCGv t1 = tcg_temp_new();
987     TCGv t2 = tcg_temp_new();
988 
989     /*
990      * Note, the HV=1 case is handled earlier by simply using
991      * spr_write_generic for HV mode in the SPR table
992      */
993 
994     /* Build insertion mask into t1 based on context */
995     gen_load_spr(t1, SPR_AMOR);
996 
997     /* Mask new bits into t2 */
998     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
999 
1000     /* Load AMR and clear new bits in t0 */
1001     gen_load_spr(t0, SPR_UAMOR);
1002     tcg_gen_andc_tl(t0, t0, t1);
1003 
1004     /* Or'in new bits and write it out */
1005     tcg_gen_or_tl(t0, t0, t2);
1006     gen_store_spr(SPR_UAMOR, t0);
1007     spr_store_dump_spr(SPR_UAMOR);
1008 }
1009 
1010 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1011 {
1012     TCGv t0 = tcg_temp_new();
1013     TCGv t1 = tcg_temp_new();
1014     TCGv t2 = tcg_temp_new();
1015 
1016     /*
1017      * Note, the HV=1 case is handled earlier by simply using
1018      * spr_write_generic for HV mode in the SPR table
1019      */
1020 
1021     /* Build insertion mask into t1 based on context */
1022     gen_load_spr(t1, SPR_AMOR);
1023 
1024     /* Mask new bits into t2 */
1025     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1026 
1027     /* Load AMR and clear new bits in t0 */
1028     gen_load_spr(t0, SPR_IAMR);
1029     tcg_gen_andc_tl(t0, t0, t1);
1030 
1031     /* Or'in new bits and write it out */
1032     tcg_gen_or_tl(t0, t0, t2);
1033     gen_store_spr(SPR_IAMR, t0);
1034     spr_store_dump_spr(SPR_IAMR);
1035 }
1036 #endif
1037 #endif
1038 
1039 #ifndef CONFIG_USER_ONLY
1040 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1041 {
1042     gen_helper_fixup_thrm(cpu_env);
1043     gen_load_spr(cpu_gpr[gprn], sprn);
1044     spr_load_dump_spr(sprn);
1045 }
1046 #endif /* !CONFIG_USER_ONLY */
1047 
1048 #if !defined(CONFIG_USER_ONLY)
1049 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1050 {
1051     TCGv t0 = tcg_temp_new();
1052 
1053     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1054     gen_store_spr(sprn, t0);
1055 }
1056 
1057 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1058 {
1059     TCGv t0 = tcg_temp_new();
1060 
1061     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1062     gen_store_spr(sprn, t0);
1063 }
1064 
1065 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1066 {
1067     TCGv t0 = tcg_temp_new();
1068 
1069     tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1070                     ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1071     gen_store_spr(sprn, t0);
1072 }
1073 
1074 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1075 {
1076     gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1077 }
1078 
1079 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1080 {
1081     TCGv_i32 t0 = tcg_constant_i32(sprn);
1082     gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1083 }
1084 
1085 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1086 {
1087     gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1088 }
1089 
1090 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1091 {
1092     gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1093 }
1094 
1095 #endif
1096 
1097 #if !defined(CONFIG_USER_ONLY)
1098 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1099 {
1100     TCGv val = tcg_temp_new();
1101     tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1102     gen_store_spr(SPR_BOOKE_MAS3, val);
1103     tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1104     gen_store_spr(SPR_BOOKE_MAS7, val);
1105 }
1106 
1107 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1108 {
1109     TCGv mas7 = tcg_temp_new();
1110     TCGv mas3 = tcg_temp_new();
1111     gen_load_spr(mas7, SPR_BOOKE_MAS7);
1112     tcg_gen_shli_tl(mas7, mas7, 32);
1113     gen_load_spr(mas3, SPR_BOOKE_MAS3);
1114     tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1115 }
1116 
1117 #endif
1118 
1119 #ifdef TARGET_PPC64
1120 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1121                                     int bit, int sprn, int cause)
1122 {
1123     TCGv_i32 t1 = tcg_constant_i32(bit);
1124     TCGv_i32 t2 = tcg_constant_i32(sprn);
1125     TCGv_i32 t3 = tcg_constant_i32(cause);
1126 
1127     gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1128 }
1129 
1130 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1131                                    int bit, int sprn, int cause)
1132 {
1133     TCGv_i32 t1 = tcg_constant_i32(bit);
1134     TCGv_i32 t2 = tcg_constant_i32(sprn);
1135     TCGv_i32 t3 = tcg_constant_i32(cause);
1136 
1137     gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1138 }
1139 
1140 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1141 {
1142     TCGv spr_up = tcg_temp_new();
1143     TCGv spr = tcg_temp_new();
1144 
1145     gen_load_spr(spr, sprn - 1);
1146     tcg_gen_shri_tl(spr_up, spr, 32);
1147     tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1148 }
1149 
1150 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1151 {
1152     TCGv spr = tcg_temp_new();
1153 
1154     gen_load_spr(spr, sprn - 1);
1155     tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1156     gen_store_spr(sprn - 1, spr);
1157 }
1158 
1159 #if !defined(CONFIG_USER_ONLY)
1160 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1161 {
1162     TCGv hmer = tcg_temp_new();
1163 
1164     gen_load_spr(hmer, sprn);
1165     tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1166     gen_store_spr(sprn, hmer);
1167     spr_store_dump_spr(sprn);
1168 }
1169 
1170 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1171 {
1172     gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1173 }
1174 #endif /* !defined(CONFIG_USER_ONLY) */
1175 
1176 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1177 {
1178     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1179     spr_read_generic(ctx, gprn, sprn);
1180 }
1181 
1182 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1183 {
1184     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1185     spr_write_generic(ctx, sprn, gprn);
1186 }
1187 
1188 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1189 {
1190     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1191     spr_read_generic(ctx, gprn, sprn);
1192 }
1193 
1194 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1195 {
1196     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1197     spr_write_generic(ctx, sprn, gprn);
1198 }
1199 
1200 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1201 {
1202     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1203     spr_read_prev_upper32(ctx, gprn, sprn);
1204 }
1205 
1206 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1207 {
1208     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1209     spr_write_prev_upper32(ctx, sprn, gprn);
1210 }
1211 
1212 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1213 {
1214     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1215     spr_read_generic(ctx, gprn, sprn);
1216 }
1217 
1218 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1219 {
1220     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1221     spr_write_generic(ctx, sprn, gprn);
1222 }
1223 
1224 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1225 {
1226     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1227     spr_read_prev_upper32(ctx, gprn, sprn);
1228 }
1229 
1230 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1231 {
1232     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1233     spr_write_prev_upper32(ctx, sprn, gprn);
1234 }
1235 
1236 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1237 {
1238     TCGv t0 = tcg_temp_new();
1239 
1240     /*
1241      * Access to the (H)DEXCR in problem state is done using separated
1242      * SPR indexes which are 16 below the SPR indexes which have full
1243      * access to the (H)DEXCR in privileged state. Problem state can
1244      * only read bits 32:63, bits 0:31 return 0.
1245      *
1246      * See section 9.3.1-9.3.2 of PowerISA v3.1B
1247      */
1248 
1249     gen_load_spr(t0, sprn + 16);
1250     tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1251 }
1252 #endif
1253 
1254 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
1255 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1256 
1257 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
1258 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1259 
1260 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
1261 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1262 
1263 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
1264 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1265 
1266 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
1267 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1268 
1269 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1270 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1271 
1272 typedef struct opcode_t {
1273     unsigned char opc1, opc2, opc3, opc4;
1274 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1275     unsigned char pad[4];
1276 #endif
1277     opc_handler_t handler;
1278     const char *oname;
1279 } opcode_t;
1280 
1281 static void gen_priv_opc(DisasContext *ctx)
1282 {
1283     gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1284 }
1285 
1286 /* Helpers for priv. check */
1287 #define GEN_PRIV(CTX)              \
1288     do {                           \
1289         gen_priv_opc(CTX); return; \
1290     } while (0)
1291 
1292 #if defined(CONFIG_USER_ONLY)
1293 #define CHK_HV(CTX) GEN_PRIV(CTX)
1294 #define CHK_SV(CTX) GEN_PRIV(CTX)
1295 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1296 #else
1297 #define CHK_HV(CTX)                         \
1298     do {                                    \
1299         if (unlikely(ctx->pr || !ctx->hv)) {\
1300             GEN_PRIV(CTX);                  \
1301         }                                   \
1302     } while (0)
1303 #define CHK_SV(CTX)              \
1304     do {                         \
1305         if (unlikely(ctx->pr)) { \
1306             GEN_PRIV(CTX);       \
1307         }                        \
1308     } while (0)
1309 #define CHK_HVRM(CTX)                                   \
1310     do {                                                \
1311         if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1312             GEN_PRIV(CTX);                              \
1313         }                                               \
1314     } while (0)
1315 #endif
1316 
1317 #define CHK_NONE(CTX)
1318 
1319 /*****************************************************************************/
1320 /* PowerPC instructions table                                                */
1321 
1322 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
1323 {                                                                             \
1324     .opc1 = op1,                                                              \
1325     .opc2 = op2,                                                              \
1326     .opc3 = op3,                                                              \
1327     .opc4 = 0xff,                                                             \
1328     .handler = {                                                              \
1329         .inval1  = invl,                                                      \
1330         .type = _typ,                                                         \
1331         .type2 = _typ2,                                                       \
1332         .handler = &gen_##name,                                               \
1333     },                                                                        \
1334     .oname = stringify(name),                                                 \
1335 }
1336 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
1337 {                                                                             \
1338     .opc1 = op1,                                                              \
1339     .opc2 = op2,                                                              \
1340     .opc3 = op3,                                                              \
1341     .opc4 = 0xff,                                                             \
1342     .handler = {                                                              \
1343         .inval1  = invl1,                                                     \
1344         .inval2  = invl2,                                                     \
1345         .type = _typ,                                                         \
1346         .type2 = _typ2,                                                       \
1347         .handler = &gen_##name,                                               \
1348     },                                                                        \
1349     .oname = stringify(name),                                                 \
1350 }
1351 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
1352 {                                                                             \
1353     .opc1 = op1,                                                              \
1354     .opc2 = op2,                                                              \
1355     .opc3 = op3,                                                              \
1356     .opc4 = 0xff,                                                             \
1357     .handler = {                                                              \
1358         .inval1  = invl,                                                      \
1359         .type = _typ,                                                         \
1360         .type2 = _typ2,                                                       \
1361         .handler = &gen_##name,                                               \
1362     },                                                                        \
1363     .oname = onam,                                                            \
1364 }
1365 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
1366 {                                                                             \
1367     .opc1 = op1,                                                              \
1368     .opc2 = op2,                                                              \
1369     .opc3 = op3,                                                              \
1370     .opc4 = op4,                                                              \
1371     .handler = {                                                              \
1372         .inval1  = invl,                                                      \
1373         .type = _typ,                                                         \
1374         .type2 = _typ2,                                                       \
1375         .handler = &gen_##name,                                               \
1376     },                                                                        \
1377     .oname = stringify(name),                                                 \
1378 }
1379 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
1380 {                                                                             \
1381     .opc1 = op1,                                                              \
1382     .opc2 = op2,                                                              \
1383     .opc3 = op3,                                                              \
1384     .opc4 = op4,                                                              \
1385     .handler = {                                                              \
1386         .inval1  = invl,                                                      \
1387         .type = _typ,                                                         \
1388         .type2 = _typ2,                                                       \
1389         .handler = &gen_##name,                                               \
1390     },                                                                        \
1391     .oname = onam,                                                            \
1392 }
1393 
1394 /* Invalid instruction */
1395 static void gen_invalid(DisasContext *ctx)
1396 {
1397     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1398 }
1399 
1400 static opc_handler_t invalid_handler = {
1401     .inval1  = 0xFFFFFFFF,
1402     .inval2  = 0xFFFFFFFF,
1403     .type    = PPC_NONE,
1404     .type2   = PPC_NONE,
1405     .handler = gen_invalid,
1406 };
1407 
1408 /***                           Integer comparison                          ***/
1409 
1410 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1411 {
1412     TCGv t0 = tcg_temp_new();
1413     TCGv t1 = tcg_temp_new();
1414     TCGv_i32 t = tcg_temp_new_i32();
1415 
1416     tcg_gen_movi_tl(t0, CRF_EQ);
1417     tcg_gen_movi_tl(t1, CRF_LT);
1418     tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1419                        t0, arg0, arg1, t1, t0);
1420     tcg_gen_movi_tl(t1, CRF_GT);
1421     tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1422                        t0, arg0, arg1, t1, t0);
1423 
1424     tcg_gen_trunc_tl_i32(t, t0);
1425     tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1426     tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1427 }
1428 
1429 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1430 {
1431     TCGv t0 = tcg_constant_tl(arg1);
1432     gen_op_cmp(arg0, t0, s, crf);
1433 }
1434 
1435 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1436 {
1437     TCGv t0, t1;
1438     t0 = tcg_temp_new();
1439     t1 = tcg_temp_new();
1440     if (s) {
1441         tcg_gen_ext32s_tl(t0, arg0);
1442         tcg_gen_ext32s_tl(t1, arg1);
1443     } else {
1444         tcg_gen_ext32u_tl(t0, arg0);
1445         tcg_gen_ext32u_tl(t1, arg1);
1446     }
1447     gen_op_cmp(t0, t1, s, crf);
1448 }
1449 
1450 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1451 {
1452     TCGv t0 = tcg_constant_tl(arg1);
1453     gen_op_cmp32(arg0, t0, s, crf);
1454 }
1455 
1456 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1457 {
1458     if (NARROW_MODE(ctx)) {
1459         gen_op_cmpi32(reg, 0, 1, 0);
1460     } else {
1461         gen_op_cmpi(reg, 0, 1, 0);
1462     }
1463 }
1464 
1465 /* cmprb - range comparison: isupper, isaplha, islower*/
1466 static void gen_cmprb(DisasContext *ctx)
1467 {
1468     TCGv_i32 src1 = tcg_temp_new_i32();
1469     TCGv_i32 src2 = tcg_temp_new_i32();
1470     TCGv_i32 src2lo = tcg_temp_new_i32();
1471     TCGv_i32 src2hi = tcg_temp_new_i32();
1472     TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1473 
1474     tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1475     tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1476 
1477     tcg_gen_andi_i32(src1, src1, 0xFF);
1478     tcg_gen_ext8u_i32(src2lo, src2);
1479     tcg_gen_shri_i32(src2, src2, 8);
1480     tcg_gen_ext8u_i32(src2hi, src2);
1481 
1482     tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1483     tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1484     tcg_gen_and_i32(crf, src2lo, src2hi);
1485 
1486     if (ctx->opcode & 0x00200000) {
1487         tcg_gen_shri_i32(src2, src2, 8);
1488         tcg_gen_ext8u_i32(src2lo, src2);
1489         tcg_gen_shri_i32(src2, src2, 8);
1490         tcg_gen_ext8u_i32(src2hi, src2);
1491         tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1492         tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1493         tcg_gen_and_i32(src2lo, src2lo, src2hi);
1494         tcg_gen_or_i32(crf, crf, src2lo);
1495     }
1496     tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1497 }
1498 
1499 #if defined(TARGET_PPC64)
1500 /* cmpeqb */
1501 static void gen_cmpeqb(DisasContext *ctx)
1502 {
1503     gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1504                       cpu_gpr[rB(ctx->opcode)]);
1505 }
1506 #endif
1507 
1508 /* isel (PowerPC 2.03 specification) */
1509 static void gen_isel(DisasContext *ctx)
1510 {
1511     uint32_t bi = rC(ctx->opcode);
1512     uint32_t mask = 0x08 >> (bi & 0x03);
1513     TCGv t0 = tcg_temp_new();
1514     TCGv zr;
1515 
1516     tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1517     tcg_gen_andi_tl(t0, t0, mask);
1518 
1519     zr = tcg_constant_tl(0);
1520     tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1521                        rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1522                        cpu_gpr[rB(ctx->opcode)]);
1523 }
1524 
1525 /* cmpb: PowerPC 2.05 specification */
1526 static void gen_cmpb(DisasContext *ctx)
1527 {
1528     gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1529                     cpu_gpr[rB(ctx->opcode)]);
1530 }
1531 
1532 /***                           Integer arithmetic                          ***/
1533 
1534 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1535                                            TCGv arg1, TCGv arg2, int sub)
1536 {
1537     TCGv t0 = tcg_temp_new();
1538 
1539     tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1540     tcg_gen_xor_tl(t0, arg1, arg2);
1541     if (sub) {
1542         tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1543     } else {
1544         tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1545     }
1546     if (NARROW_MODE(ctx)) {
1547         tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1548         if (is_isa300(ctx)) {
1549             tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1550         }
1551     } else {
1552         if (is_isa300(ctx)) {
1553             tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1554         }
1555         tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1556     }
1557     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1558 }
1559 
1560 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1561                                              TCGv res, TCGv arg0, TCGv arg1,
1562                                              TCGv ca32, int sub)
1563 {
1564     TCGv t0;
1565 
1566     if (!is_isa300(ctx)) {
1567         return;
1568     }
1569 
1570     t0 = tcg_temp_new();
1571     if (sub) {
1572         tcg_gen_eqv_tl(t0, arg0, arg1);
1573     } else {
1574         tcg_gen_xor_tl(t0, arg0, arg1);
1575     }
1576     tcg_gen_xor_tl(t0, t0, res);
1577     tcg_gen_extract_tl(ca32, t0, 32, 1);
1578 }
1579 
1580 /* Common add function */
1581 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1582                                     TCGv arg2, TCGv ca, TCGv ca32,
1583                                     bool add_ca, bool compute_ca,
1584                                     bool compute_ov, bool compute_rc0)
1585 {
1586     TCGv t0 = ret;
1587 
1588     if (compute_ca || compute_ov) {
1589         t0 = tcg_temp_new();
1590     }
1591 
1592     if (compute_ca) {
1593         if (NARROW_MODE(ctx)) {
1594             /*
1595              * Caution: a non-obvious corner case of the spec is that
1596              * we must produce the *entire* 64-bit addition, but
1597              * produce the carry into bit 32.
1598              */
1599             TCGv t1 = tcg_temp_new();
1600             tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
1601             tcg_gen_add_tl(t0, arg1, arg2);
1602             if (add_ca) {
1603                 tcg_gen_add_tl(t0, t0, ca);
1604             }
1605             tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
1606             tcg_gen_extract_tl(ca, ca, 32, 1);
1607             if (is_isa300(ctx)) {
1608                 tcg_gen_mov_tl(ca32, ca);
1609             }
1610         } else {
1611             TCGv zero = tcg_constant_tl(0);
1612             if (add_ca) {
1613                 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1614                 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1615             } else {
1616                 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1617             }
1618             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1619         }
1620     } else {
1621         tcg_gen_add_tl(t0, arg1, arg2);
1622         if (add_ca) {
1623             tcg_gen_add_tl(t0, t0, ca);
1624         }
1625     }
1626 
1627     if (compute_ov) {
1628         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1629     }
1630     if (unlikely(compute_rc0)) {
1631         gen_set_Rc0(ctx, t0);
1632     }
1633 
1634     if (t0 != ret) {
1635         tcg_gen_mov_tl(ret, t0);
1636     }
1637 }
1638 /* Add functions with two operands */
1639 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
1640 static void glue(gen_, name)(DisasContext *ctx)                               \
1641 {                                                                             \
1642     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1643                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1644                      ca, glue(ca, 32),                                        \
1645                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1646 }
1647 /* Add functions with one operand and one immediate */
1648 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
1649                                 add_ca, compute_ca, compute_ov)               \
1650 static void glue(gen_, name)(DisasContext *ctx)                               \
1651 {                                                                             \
1652     TCGv t0 = tcg_constant_tl(const_val);                                     \
1653     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1654                      cpu_gpr[rA(ctx->opcode)], t0,                            \
1655                      ca, glue(ca, 32),                                        \
1656                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1657 }
1658 
1659 /* add  add.  addo  addo. */
1660 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1661 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1662 /* addc  addc.  addco  addco. */
1663 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1664 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1665 /* adde  adde.  addeo  addeo. */
1666 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1667 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1668 /* addme  addme.  addmeo  addmeo.  */
1669 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1670 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1671 /* addex */
1672 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1673 /* addze  addze.  addzeo  addzeo.*/
1674 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1675 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1676 /* addic  addic.*/
1677 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1678 {
1679     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
1680     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1681                      c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1682 }
1683 
1684 static void gen_addic(DisasContext *ctx)
1685 {
1686     gen_op_addic(ctx, 0);
1687 }
1688 
1689 static void gen_addic_(DisasContext *ctx)
1690 {
1691     gen_op_addic(ctx, 1);
1692 }
1693 
1694 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1695                                      TCGv arg2, int sign, int compute_ov)
1696 {
1697     TCGv_i32 t0 = tcg_temp_new_i32();
1698     TCGv_i32 t1 = tcg_temp_new_i32();
1699     TCGv_i32 t2 = tcg_temp_new_i32();
1700     TCGv_i32 t3 = tcg_temp_new_i32();
1701 
1702     tcg_gen_trunc_tl_i32(t0, arg1);
1703     tcg_gen_trunc_tl_i32(t1, arg2);
1704     if (sign) {
1705         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1706         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1707         tcg_gen_and_i32(t2, t2, t3);
1708         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1709         tcg_gen_or_i32(t2, t2, t3);
1710         tcg_gen_movi_i32(t3, 0);
1711         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1712         tcg_gen_div_i32(t3, t0, t1);
1713         tcg_gen_extu_i32_tl(ret, t3);
1714     } else {
1715         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1716         tcg_gen_movi_i32(t3, 0);
1717         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1718         tcg_gen_divu_i32(t3, t0, t1);
1719         tcg_gen_extu_i32_tl(ret, t3);
1720     }
1721     if (compute_ov) {
1722         tcg_gen_extu_i32_tl(cpu_ov, t2);
1723         if (is_isa300(ctx)) {
1724             tcg_gen_extu_i32_tl(cpu_ov32, t2);
1725         }
1726         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1727     }
1728 
1729     if (unlikely(Rc(ctx->opcode) != 0)) {
1730         gen_set_Rc0(ctx, ret);
1731     }
1732 }
1733 /* Div functions */
1734 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1735 static void glue(gen_, name)(DisasContext *ctx)                               \
1736 {                                                                             \
1737     gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1738                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1739                      sign, compute_ov);                                       \
1740 }
1741 /* divwu  divwu.  divwuo  divwuo.   */
1742 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1743 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1744 /* divw  divw.  divwo  divwo.   */
1745 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1746 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1747 
1748 /* div[wd]eu[o][.] */
1749 #define GEN_DIVE(name, hlpr, compute_ov)                                      \
1750 static void gen_##name(DisasContext *ctx)                                     \
1751 {                                                                             \
1752     TCGv_i32 t0 = tcg_constant_i32(compute_ov);                               \
1753     gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env,                      \
1754                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1755     if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1756         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1757     }                                                                         \
1758 }
1759 
1760 GEN_DIVE(divweu, divweu, 0);
1761 GEN_DIVE(divweuo, divweu, 1);
1762 GEN_DIVE(divwe, divwe, 0);
1763 GEN_DIVE(divweo, divwe, 1);
1764 
1765 #if defined(TARGET_PPC64)
1766 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1767                                      TCGv arg2, int sign, int compute_ov)
1768 {
1769     TCGv_i64 t0 = tcg_temp_new_i64();
1770     TCGv_i64 t1 = tcg_temp_new_i64();
1771     TCGv_i64 t2 = tcg_temp_new_i64();
1772     TCGv_i64 t3 = tcg_temp_new_i64();
1773 
1774     tcg_gen_mov_i64(t0, arg1);
1775     tcg_gen_mov_i64(t1, arg2);
1776     if (sign) {
1777         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1778         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1779         tcg_gen_and_i64(t2, t2, t3);
1780         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1781         tcg_gen_or_i64(t2, t2, t3);
1782         tcg_gen_movi_i64(t3, 0);
1783         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1784         tcg_gen_div_i64(ret, t0, t1);
1785     } else {
1786         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1787         tcg_gen_movi_i64(t3, 0);
1788         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1789         tcg_gen_divu_i64(ret, t0, t1);
1790     }
1791     if (compute_ov) {
1792         tcg_gen_mov_tl(cpu_ov, t2);
1793         if (is_isa300(ctx)) {
1794             tcg_gen_mov_tl(cpu_ov32, t2);
1795         }
1796         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1797     }
1798 
1799     if (unlikely(Rc(ctx->opcode) != 0)) {
1800         gen_set_Rc0(ctx, ret);
1801     }
1802 }
1803 
1804 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1805 static void glue(gen_, name)(DisasContext *ctx)                               \
1806 {                                                                             \
1807     gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1808                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1809                       sign, compute_ov);                                      \
1810 }
1811 /* divdu  divdu.  divduo  divduo.   */
1812 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1813 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1814 /* divd  divd.  divdo  divdo.   */
1815 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1816 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1817 
1818 GEN_DIVE(divdeu, divdeu, 0);
1819 GEN_DIVE(divdeuo, divdeu, 1);
1820 GEN_DIVE(divde, divde, 0);
1821 GEN_DIVE(divdeo, divde, 1);
1822 #endif
1823 
1824 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1825                                      TCGv arg2, int sign)
1826 {
1827     TCGv_i32 t0 = tcg_temp_new_i32();
1828     TCGv_i32 t1 = tcg_temp_new_i32();
1829 
1830     tcg_gen_trunc_tl_i32(t0, arg1);
1831     tcg_gen_trunc_tl_i32(t1, arg2);
1832     if (sign) {
1833         TCGv_i32 t2 = tcg_temp_new_i32();
1834         TCGv_i32 t3 = tcg_temp_new_i32();
1835         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1836         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1837         tcg_gen_and_i32(t2, t2, t3);
1838         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1839         tcg_gen_or_i32(t2, t2, t3);
1840         tcg_gen_movi_i32(t3, 0);
1841         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1842         tcg_gen_rem_i32(t3, t0, t1);
1843         tcg_gen_ext_i32_tl(ret, t3);
1844     } else {
1845         TCGv_i32 t2 = tcg_constant_i32(1);
1846         TCGv_i32 t3 = tcg_constant_i32(0);
1847         tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1848         tcg_gen_remu_i32(t0, t0, t1);
1849         tcg_gen_extu_i32_tl(ret, t0);
1850     }
1851 }
1852 
1853 #define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1854 static void glue(gen_, name)(DisasContext *ctx)                             \
1855 {                                                                           \
1856     gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1857                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1858                       sign);                                                \
1859 }
1860 
1861 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1862 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1863 
1864 #if defined(TARGET_PPC64)
1865 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1866                                      TCGv arg2, int sign)
1867 {
1868     TCGv_i64 t0 = tcg_temp_new_i64();
1869     TCGv_i64 t1 = tcg_temp_new_i64();
1870 
1871     tcg_gen_mov_i64(t0, arg1);
1872     tcg_gen_mov_i64(t1, arg2);
1873     if (sign) {
1874         TCGv_i64 t2 = tcg_temp_new_i64();
1875         TCGv_i64 t3 = tcg_temp_new_i64();
1876         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1877         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1878         tcg_gen_and_i64(t2, t2, t3);
1879         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1880         tcg_gen_or_i64(t2, t2, t3);
1881         tcg_gen_movi_i64(t3, 0);
1882         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1883         tcg_gen_rem_i64(ret, t0, t1);
1884     } else {
1885         TCGv_i64 t2 = tcg_constant_i64(1);
1886         TCGv_i64 t3 = tcg_constant_i64(0);
1887         tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1888         tcg_gen_remu_i64(ret, t0, t1);
1889     }
1890 }
1891 
1892 #define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1893 static void glue(gen_, name)(DisasContext *ctx)                           \
1894 {                                                                         \
1895   gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1896                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1897                     sign);                                                \
1898 }
1899 
1900 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1901 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1902 #endif
1903 
1904 /* mulhw  mulhw. */
1905 static void gen_mulhw(DisasContext *ctx)
1906 {
1907     TCGv_i32 t0 = tcg_temp_new_i32();
1908     TCGv_i32 t1 = tcg_temp_new_i32();
1909 
1910     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1911     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1912     tcg_gen_muls2_i32(t0, t1, t0, t1);
1913     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1914     if (unlikely(Rc(ctx->opcode) != 0)) {
1915         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1916     }
1917 }
1918 
1919 /* mulhwu  mulhwu.  */
1920 static void gen_mulhwu(DisasContext *ctx)
1921 {
1922     TCGv_i32 t0 = tcg_temp_new_i32();
1923     TCGv_i32 t1 = tcg_temp_new_i32();
1924 
1925     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1926     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1927     tcg_gen_mulu2_i32(t0, t1, t0, t1);
1928     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1929     if (unlikely(Rc(ctx->opcode) != 0)) {
1930         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1931     }
1932 }
1933 
1934 /* mullw  mullw. */
1935 static void gen_mullw(DisasContext *ctx)
1936 {
1937 #if defined(TARGET_PPC64)
1938     TCGv_i64 t0, t1;
1939     t0 = tcg_temp_new_i64();
1940     t1 = tcg_temp_new_i64();
1941     tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1942     tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1943     tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1944 #else
1945     tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1946                     cpu_gpr[rB(ctx->opcode)]);
1947 #endif
1948     if (unlikely(Rc(ctx->opcode) != 0)) {
1949         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1950     }
1951 }
1952 
1953 /* mullwo  mullwo. */
1954 static void gen_mullwo(DisasContext *ctx)
1955 {
1956     TCGv_i32 t0 = tcg_temp_new_i32();
1957     TCGv_i32 t1 = tcg_temp_new_i32();
1958 
1959     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1960     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1961     tcg_gen_muls2_i32(t0, t1, t0, t1);
1962 #if defined(TARGET_PPC64)
1963     tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1964 #else
1965     tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
1966 #endif
1967 
1968     tcg_gen_sari_i32(t0, t0, 31);
1969     tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
1970     tcg_gen_extu_i32_tl(cpu_ov, t0);
1971     if (is_isa300(ctx)) {
1972         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1973     }
1974     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1975 
1976     if (unlikely(Rc(ctx->opcode) != 0)) {
1977         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1978     }
1979 }
1980 
1981 /* mulli */
1982 static void gen_mulli(DisasContext *ctx)
1983 {
1984     tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1985                     SIMM(ctx->opcode));
1986 }
1987 
1988 #if defined(TARGET_PPC64)
1989 /* mulhd  mulhd. */
1990 static void gen_mulhd(DisasContext *ctx)
1991 {
1992     TCGv lo = tcg_temp_new();
1993     tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
1994                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
1995     if (unlikely(Rc(ctx->opcode) != 0)) {
1996         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1997     }
1998 }
1999 
2000 /* mulhdu  mulhdu. */
2001 static void gen_mulhdu(DisasContext *ctx)
2002 {
2003     TCGv lo = tcg_temp_new();
2004     tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2005                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2006     if (unlikely(Rc(ctx->opcode) != 0)) {
2007         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2008     }
2009 }
2010 
2011 /* mulld  mulld. */
2012 static void gen_mulld(DisasContext *ctx)
2013 {
2014     tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2015                    cpu_gpr[rB(ctx->opcode)]);
2016     if (unlikely(Rc(ctx->opcode) != 0)) {
2017         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2018     }
2019 }
2020 
2021 /* mulldo  mulldo. */
2022 static void gen_mulldo(DisasContext *ctx)
2023 {
2024     TCGv_i64 t0 = tcg_temp_new_i64();
2025     TCGv_i64 t1 = tcg_temp_new_i64();
2026 
2027     tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2028                       cpu_gpr[rB(ctx->opcode)]);
2029     tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2030 
2031     tcg_gen_sari_i64(t0, t0, 63);
2032     tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2033     if (is_isa300(ctx)) {
2034         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2035     }
2036     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2037 
2038     if (unlikely(Rc(ctx->opcode) != 0)) {
2039         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2040     }
2041 }
2042 #endif
2043 
2044 /* Common subf function */
2045 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2046                                      TCGv arg2, bool add_ca, bool compute_ca,
2047                                      bool compute_ov, bool compute_rc0)
2048 {
2049     TCGv t0 = ret;
2050 
2051     if (compute_ca || compute_ov) {
2052         t0 = tcg_temp_new();
2053     }
2054 
2055     if (compute_ca) {
2056         /* dest = ~arg1 + arg2 [+ ca].  */
2057         if (NARROW_MODE(ctx)) {
2058             /*
2059              * Caution: a non-obvious corner case of the spec is that
2060              * we must produce the *entire* 64-bit addition, but
2061              * produce the carry into bit 32.
2062              */
2063             TCGv inv1 = tcg_temp_new();
2064             TCGv t1 = tcg_temp_new();
2065             tcg_gen_not_tl(inv1, arg1);
2066             if (add_ca) {
2067                 tcg_gen_add_tl(t0, arg2, cpu_ca);
2068             } else {
2069                 tcg_gen_addi_tl(t0, arg2, 1);
2070             }
2071             tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
2072             tcg_gen_add_tl(t0, t0, inv1);
2073             tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
2074             tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2075             if (is_isa300(ctx)) {
2076                 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2077             }
2078         } else if (add_ca) {
2079             TCGv zero, inv1 = tcg_temp_new();
2080             tcg_gen_not_tl(inv1, arg1);
2081             zero = tcg_constant_tl(0);
2082             tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2083             tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2084             gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2085         } else {
2086             tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2087             tcg_gen_sub_tl(t0, arg2, arg1);
2088             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2089         }
2090     } else if (add_ca) {
2091         /*
2092          * Since we're ignoring carry-out, we can simplify the
2093          * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2094          */
2095         tcg_gen_sub_tl(t0, arg2, arg1);
2096         tcg_gen_add_tl(t0, t0, cpu_ca);
2097         tcg_gen_subi_tl(t0, t0, 1);
2098     } else {
2099         tcg_gen_sub_tl(t0, arg2, arg1);
2100     }
2101 
2102     if (compute_ov) {
2103         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2104     }
2105     if (unlikely(compute_rc0)) {
2106         gen_set_Rc0(ctx, t0);
2107     }
2108 
2109     if (t0 != ret) {
2110         tcg_gen_mov_tl(ret, t0);
2111     }
2112 }
2113 /* Sub functions with Two operands functions */
2114 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
2115 static void glue(gen_, name)(DisasContext *ctx)                               \
2116 {                                                                             \
2117     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2118                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
2119                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2120 }
2121 /* Sub functions with one operand and one immediate */
2122 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
2123                                 add_ca, compute_ca, compute_ov)               \
2124 static void glue(gen_, name)(DisasContext *ctx)                               \
2125 {                                                                             \
2126     TCGv t0 = tcg_constant_tl(const_val);                                     \
2127     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2128                       cpu_gpr[rA(ctx->opcode)], t0,                           \
2129                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2130 }
2131 /* subf  subf.  subfo  subfo. */
2132 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2133 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2134 /* subfc  subfc.  subfco  subfco. */
2135 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2136 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2137 /* subfe  subfe.  subfeo  subfo. */
2138 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2139 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2140 /* subfme  subfme.  subfmeo  subfmeo.  */
2141 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2142 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2143 /* subfze  subfze.  subfzeo  subfzeo.*/
2144 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2145 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2146 
2147 /* subfic */
2148 static void gen_subfic(DisasContext *ctx)
2149 {
2150     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
2151     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2152                       c, 0, 1, 0, 0);
2153 }
2154 
2155 /* neg neg. nego nego. */
2156 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2157 {
2158     TCGv zero = tcg_constant_tl(0);
2159     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2160                       zero, 0, 0, compute_ov, Rc(ctx->opcode));
2161 }
2162 
2163 static void gen_neg(DisasContext *ctx)
2164 {
2165     tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2166     if (unlikely(Rc(ctx->opcode))) {
2167         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2168     }
2169 }
2170 
2171 static void gen_nego(DisasContext *ctx)
2172 {
2173     gen_op_arith_neg(ctx, 1);
2174 }
2175 
2176 /***                            Integer logical                            ***/
2177 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
2178 static void glue(gen_, name)(DisasContext *ctx)                               \
2179 {                                                                             \
2180     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
2181        cpu_gpr[rB(ctx->opcode)]);                                             \
2182     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2183         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2184 }
2185 
2186 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
2187 static void glue(gen_, name)(DisasContext *ctx)                               \
2188 {                                                                             \
2189     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
2190     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2191         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2192 }
2193 
2194 /* and & and. */
2195 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2196 /* andc & andc. */
2197 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2198 
2199 /* andi. */
2200 static void gen_andi_(DisasContext *ctx)
2201 {
2202     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2203                     UIMM(ctx->opcode));
2204     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2205 }
2206 
2207 /* andis. */
2208 static void gen_andis_(DisasContext *ctx)
2209 {
2210     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2211                     UIMM(ctx->opcode) << 16);
2212     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2213 }
2214 
2215 /* cntlzw */
2216 static void gen_cntlzw(DisasContext *ctx)
2217 {
2218     TCGv_i32 t = tcg_temp_new_i32();
2219 
2220     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2221     tcg_gen_clzi_i32(t, t, 32);
2222     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2223 
2224     if (unlikely(Rc(ctx->opcode) != 0)) {
2225         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2226     }
2227 }
2228 
2229 /* cnttzw */
2230 static void gen_cnttzw(DisasContext *ctx)
2231 {
2232     TCGv_i32 t = tcg_temp_new_i32();
2233 
2234     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2235     tcg_gen_ctzi_i32(t, t, 32);
2236     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2237 
2238     if (unlikely(Rc(ctx->opcode) != 0)) {
2239         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2240     }
2241 }
2242 
2243 /* eqv & eqv. */
2244 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2245 /* extsb & extsb. */
2246 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2247 /* extsh & extsh. */
2248 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2249 /* nand & nand. */
2250 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2251 /* nor & nor. */
2252 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2253 
2254 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2255 static void gen_pause(DisasContext *ctx)
2256 {
2257     TCGv_i32 t0 = tcg_constant_i32(0);
2258     tcg_gen_st_i32(t0, cpu_env,
2259                    -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2260 
2261     /* Stop translation, this gives other CPUs a chance to run */
2262     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2263 }
2264 #endif /* defined(TARGET_PPC64) */
2265 
2266 /* or & or. */
2267 static void gen_or(DisasContext *ctx)
2268 {
2269     int rs, ra, rb;
2270 
2271     rs = rS(ctx->opcode);
2272     ra = rA(ctx->opcode);
2273     rb = rB(ctx->opcode);
2274     /* Optimisation for mr. ri case */
2275     if (rs != ra || rs != rb) {
2276         if (rs != rb) {
2277             tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2278         } else {
2279             tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2280         }
2281         if (unlikely(Rc(ctx->opcode) != 0)) {
2282             gen_set_Rc0(ctx, cpu_gpr[ra]);
2283         }
2284     } else if (unlikely(Rc(ctx->opcode) != 0)) {
2285         gen_set_Rc0(ctx, cpu_gpr[rs]);
2286 #if defined(TARGET_PPC64)
2287     } else if (rs != 0) { /* 0 is nop */
2288         int prio = 0;
2289 
2290         switch (rs) {
2291         case 1:
2292             /* Set process priority to low */
2293             prio = 2;
2294             break;
2295         case 6:
2296             /* Set process priority to medium-low */
2297             prio = 3;
2298             break;
2299         case 2:
2300             /* Set process priority to normal */
2301             prio = 4;
2302             break;
2303 #if !defined(CONFIG_USER_ONLY)
2304         case 31:
2305             if (!ctx->pr) {
2306                 /* Set process priority to very low */
2307                 prio = 1;
2308             }
2309             break;
2310         case 5:
2311             if (!ctx->pr) {
2312                 /* Set process priority to medium-hight */
2313                 prio = 5;
2314             }
2315             break;
2316         case 3:
2317             if (!ctx->pr) {
2318                 /* Set process priority to high */
2319                 prio = 6;
2320             }
2321             break;
2322         case 7:
2323             if (ctx->hv && !ctx->pr) {
2324                 /* Set process priority to very high */
2325                 prio = 7;
2326             }
2327             break;
2328 #endif
2329         default:
2330             break;
2331         }
2332         if (prio) {
2333             TCGv t0 = tcg_temp_new();
2334             gen_load_spr(t0, SPR_PPR);
2335             tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2336             tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2337             gen_store_spr(SPR_PPR, t0);
2338         }
2339 #if !defined(CONFIG_USER_ONLY)
2340         /*
2341          * Pause out of TCG otherwise spin loops with smt_low eat too
2342          * much CPU and the kernel hangs.  This applies to all
2343          * encodings other than no-op, e.g., miso(rs=26), yield(27),
2344          * mdoio(29), mdoom(30), and all currently undefined.
2345          */
2346         gen_pause(ctx);
2347 #endif
2348 #endif
2349     }
2350 }
2351 /* orc & orc. */
2352 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2353 
2354 /* xor & xor. */
2355 static void gen_xor(DisasContext *ctx)
2356 {
2357     /* Optimisation for "set to zero" case */
2358     if (rS(ctx->opcode) != rB(ctx->opcode)) {
2359         tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2360                        cpu_gpr[rB(ctx->opcode)]);
2361     } else {
2362         tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2363     }
2364     if (unlikely(Rc(ctx->opcode) != 0)) {
2365         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2366     }
2367 }
2368 
2369 /* ori */
2370 static void gen_ori(DisasContext *ctx)
2371 {
2372     target_ulong uimm = UIMM(ctx->opcode);
2373 
2374     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2375         return;
2376     }
2377     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2378 }
2379 
2380 /* oris */
2381 static void gen_oris(DisasContext *ctx)
2382 {
2383     target_ulong uimm = UIMM(ctx->opcode);
2384 
2385     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2386         /* NOP */
2387         return;
2388     }
2389     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2390                    uimm << 16);
2391 }
2392 
2393 /* xori */
2394 static void gen_xori(DisasContext *ctx)
2395 {
2396     target_ulong uimm = UIMM(ctx->opcode);
2397 
2398     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2399         /* NOP */
2400         return;
2401     }
2402     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2403 }
2404 
2405 /* xoris */
2406 static void gen_xoris(DisasContext *ctx)
2407 {
2408     target_ulong uimm = UIMM(ctx->opcode);
2409 
2410     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2411         /* NOP */
2412         return;
2413     }
2414     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2415                     uimm << 16);
2416 }
2417 
2418 /* popcntb : PowerPC 2.03 specification */
2419 static void gen_popcntb(DisasContext *ctx)
2420 {
2421     gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2422 }
2423 
2424 static void gen_popcntw(DisasContext *ctx)
2425 {
2426 #if defined(TARGET_PPC64)
2427     gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2428 #else
2429     tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2430 #endif
2431 }
2432 
2433 #if defined(TARGET_PPC64)
2434 /* popcntd: PowerPC 2.06 specification */
2435 static void gen_popcntd(DisasContext *ctx)
2436 {
2437     tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2438 }
2439 #endif
2440 
2441 /* prtyw: PowerPC 2.05 specification */
2442 static void gen_prtyw(DisasContext *ctx)
2443 {
2444     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2445     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2446     TCGv t0 = tcg_temp_new();
2447     tcg_gen_shri_tl(t0, rs, 16);
2448     tcg_gen_xor_tl(ra, rs, t0);
2449     tcg_gen_shri_tl(t0, ra, 8);
2450     tcg_gen_xor_tl(ra, ra, t0);
2451     tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2452 }
2453 
2454 #if defined(TARGET_PPC64)
2455 /* prtyd: PowerPC 2.05 specification */
2456 static void gen_prtyd(DisasContext *ctx)
2457 {
2458     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2459     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2460     TCGv t0 = tcg_temp_new();
2461     tcg_gen_shri_tl(t0, rs, 32);
2462     tcg_gen_xor_tl(ra, rs, t0);
2463     tcg_gen_shri_tl(t0, ra, 16);
2464     tcg_gen_xor_tl(ra, ra, t0);
2465     tcg_gen_shri_tl(t0, ra, 8);
2466     tcg_gen_xor_tl(ra, ra, t0);
2467     tcg_gen_andi_tl(ra, ra, 1);
2468 }
2469 #endif
2470 
2471 #if defined(TARGET_PPC64)
2472 /* bpermd */
2473 static void gen_bpermd(DisasContext *ctx)
2474 {
2475     gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2476                       cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2477 }
2478 #endif
2479 
2480 #if defined(TARGET_PPC64)
2481 /* extsw & extsw. */
2482 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2483 
2484 /* cntlzd */
2485 static void gen_cntlzd(DisasContext *ctx)
2486 {
2487     tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2488     if (unlikely(Rc(ctx->opcode) != 0)) {
2489         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2490     }
2491 }
2492 
2493 /* cnttzd */
2494 static void gen_cnttzd(DisasContext *ctx)
2495 {
2496     tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2497     if (unlikely(Rc(ctx->opcode) != 0)) {
2498         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2499     }
2500 }
2501 
2502 /* darn */
2503 static void gen_darn(DisasContext *ctx)
2504 {
2505     int l = L(ctx->opcode);
2506 
2507     if (l > 2) {
2508         tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2509     } else {
2510         translator_io_start(&ctx->base);
2511         if (l == 0) {
2512             gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2513         } else {
2514             /* Return 64-bit random for both CRN and RRN */
2515             gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2516         }
2517     }
2518 }
2519 #endif
2520 
2521 /***                             Integer rotate                            ***/
2522 
2523 /* rlwimi & rlwimi. */
2524 static void gen_rlwimi(DisasContext *ctx)
2525 {
2526     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2527     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2528     uint32_t sh = SH(ctx->opcode);
2529     uint32_t mb = MB(ctx->opcode);
2530     uint32_t me = ME(ctx->opcode);
2531 
2532     if (sh == (31 - me) && mb <= me) {
2533         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2534     } else {
2535         target_ulong mask;
2536         bool mask_in_32b = true;
2537         TCGv t1;
2538 
2539 #if defined(TARGET_PPC64)
2540         mb += 32;
2541         me += 32;
2542 #endif
2543         mask = MASK(mb, me);
2544 
2545 #if defined(TARGET_PPC64)
2546         if (mask > 0xffffffffu) {
2547             mask_in_32b = false;
2548         }
2549 #endif
2550         t1 = tcg_temp_new();
2551         if (mask_in_32b) {
2552             TCGv_i32 t0 = tcg_temp_new_i32();
2553             tcg_gen_trunc_tl_i32(t0, t_rs);
2554             tcg_gen_rotli_i32(t0, t0, sh);
2555             tcg_gen_extu_i32_tl(t1, t0);
2556         } else {
2557 #if defined(TARGET_PPC64)
2558             tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2559             tcg_gen_rotli_i64(t1, t1, sh);
2560 #else
2561             g_assert_not_reached();
2562 #endif
2563         }
2564 
2565         tcg_gen_andi_tl(t1, t1, mask);
2566         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2567         tcg_gen_or_tl(t_ra, t_ra, t1);
2568     }
2569     if (unlikely(Rc(ctx->opcode) != 0)) {
2570         gen_set_Rc0(ctx, t_ra);
2571     }
2572 }
2573 
2574 /* rlwinm & rlwinm. */
2575 static void gen_rlwinm(DisasContext *ctx)
2576 {
2577     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2578     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2579     int sh = SH(ctx->opcode);
2580     int mb = MB(ctx->opcode);
2581     int me = ME(ctx->opcode);
2582     int len = me - mb + 1;
2583     int rsh = (32 - sh) & 31;
2584 
2585     if (sh != 0 && len > 0 && me == (31 - sh)) {
2586         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2587     } else if (me == 31 && rsh + len <= 32) {
2588         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2589     } else {
2590         target_ulong mask;
2591         bool mask_in_32b = true;
2592 #if defined(TARGET_PPC64)
2593         mb += 32;
2594         me += 32;
2595 #endif
2596         mask = MASK(mb, me);
2597 #if defined(TARGET_PPC64)
2598         if (mask > 0xffffffffu) {
2599             mask_in_32b = false;
2600         }
2601 #endif
2602         if (mask_in_32b) {
2603             if (sh == 0) {
2604                 tcg_gen_andi_tl(t_ra, t_rs, mask);
2605             } else {
2606                 TCGv_i32 t0 = tcg_temp_new_i32();
2607                 tcg_gen_trunc_tl_i32(t0, t_rs);
2608                 tcg_gen_rotli_i32(t0, t0, sh);
2609                 tcg_gen_andi_i32(t0, t0, mask);
2610                 tcg_gen_extu_i32_tl(t_ra, t0);
2611             }
2612         } else {
2613 #if defined(TARGET_PPC64)
2614             tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2615             tcg_gen_rotli_i64(t_ra, t_ra, sh);
2616             tcg_gen_andi_i64(t_ra, t_ra, mask);
2617 #else
2618             g_assert_not_reached();
2619 #endif
2620         }
2621     }
2622     if (unlikely(Rc(ctx->opcode) != 0)) {
2623         gen_set_Rc0(ctx, t_ra);
2624     }
2625 }
2626 
2627 /* rlwnm & rlwnm. */
2628 static void gen_rlwnm(DisasContext *ctx)
2629 {
2630     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2631     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2632     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2633     uint32_t mb = MB(ctx->opcode);
2634     uint32_t me = ME(ctx->opcode);
2635     target_ulong mask;
2636     bool mask_in_32b = true;
2637 
2638 #if defined(TARGET_PPC64)
2639     mb += 32;
2640     me += 32;
2641 #endif
2642     mask = MASK(mb, me);
2643 
2644 #if defined(TARGET_PPC64)
2645     if (mask > 0xffffffffu) {
2646         mask_in_32b = false;
2647     }
2648 #endif
2649     if (mask_in_32b) {
2650         TCGv_i32 t0 = tcg_temp_new_i32();
2651         TCGv_i32 t1 = tcg_temp_new_i32();
2652         tcg_gen_trunc_tl_i32(t0, t_rb);
2653         tcg_gen_trunc_tl_i32(t1, t_rs);
2654         tcg_gen_andi_i32(t0, t0, 0x1f);
2655         tcg_gen_rotl_i32(t1, t1, t0);
2656         tcg_gen_extu_i32_tl(t_ra, t1);
2657     } else {
2658 #if defined(TARGET_PPC64)
2659         TCGv_i64 t0 = tcg_temp_new_i64();
2660         tcg_gen_andi_i64(t0, t_rb, 0x1f);
2661         tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2662         tcg_gen_rotl_i64(t_ra, t_ra, t0);
2663 #else
2664         g_assert_not_reached();
2665 #endif
2666     }
2667 
2668     tcg_gen_andi_tl(t_ra, t_ra, mask);
2669 
2670     if (unlikely(Rc(ctx->opcode) != 0)) {
2671         gen_set_Rc0(ctx, t_ra);
2672     }
2673 }
2674 
2675 #if defined(TARGET_PPC64)
2676 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
2677 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2678 {                                                                             \
2679     gen_##name(ctx, 0);                                                       \
2680 }                                                                             \
2681                                                                               \
2682 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2683 {                                                                             \
2684     gen_##name(ctx, 1);                                                       \
2685 }
2686 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
2687 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2688 {                                                                             \
2689     gen_##name(ctx, 0, 0);                                                    \
2690 }                                                                             \
2691                                                                               \
2692 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2693 {                                                                             \
2694     gen_##name(ctx, 0, 1);                                                    \
2695 }                                                                             \
2696                                                                               \
2697 static void glue(gen_, name##2)(DisasContext *ctx)                            \
2698 {                                                                             \
2699     gen_##name(ctx, 1, 0);                                                    \
2700 }                                                                             \
2701                                                                               \
2702 static void glue(gen_, name##3)(DisasContext *ctx)                            \
2703 {                                                                             \
2704     gen_##name(ctx, 1, 1);                                                    \
2705 }
2706 
2707 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2708 {
2709     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2710     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2711     int len = me - mb + 1;
2712     int rsh = (64 - sh) & 63;
2713 
2714     if (sh != 0 && len > 0 && me == (63 - sh)) {
2715         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2716     } else if (me == 63 && rsh + len <= 64) {
2717         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2718     } else {
2719         tcg_gen_rotli_tl(t_ra, t_rs, sh);
2720         tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2721     }
2722     if (unlikely(Rc(ctx->opcode) != 0)) {
2723         gen_set_Rc0(ctx, t_ra);
2724     }
2725 }
2726 
2727 /* rldicl - rldicl. */
2728 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2729 {
2730     uint32_t sh, mb;
2731 
2732     sh = SH(ctx->opcode) | (shn << 5);
2733     mb = MB(ctx->opcode) | (mbn << 5);
2734     gen_rldinm(ctx, mb, 63, sh);
2735 }
2736 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2737 
2738 /* rldicr - rldicr. */
2739 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2740 {
2741     uint32_t sh, me;
2742 
2743     sh = SH(ctx->opcode) | (shn << 5);
2744     me = MB(ctx->opcode) | (men << 5);
2745     gen_rldinm(ctx, 0, me, sh);
2746 }
2747 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2748 
2749 /* rldic - rldic. */
2750 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2751 {
2752     uint32_t sh, mb;
2753 
2754     sh = SH(ctx->opcode) | (shn << 5);
2755     mb = MB(ctx->opcode) | (mbn << 5);
2756     gen_rldinm(ctx, mb, 63 - sh, sh);
2757 }
2758 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2759 
2760 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2761 {
2762     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2763     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2764     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2765     TCGv t0;
2766 
2767     t0 = tcg_temp_new();
2768     tcg_gen_andi_tl(t0, t_rb, 0x3f);
2769     tcg_gen_rotl_tl(t_ra, t_rs, t0);
2770 
2771     tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2772     if (unlikely(Rc(ctx->opcode) != 0)) {
2773         gen_set_Rc0(ctx, t_ra);
2774     }
2775 }
2776 
2777 /* rldcl - rldcl. */
2778 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2779 {
2780     uint32_t mb;
2781 
2782     mb = MB(ctx->opcode) | (mbn << 5);
2783     gen_rldnm(ctx, mb, 63);
2784 }
2785 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2786 
2787 /* rldcr - rldcr. */
2788 static inline void gen_rldcr(DisasContext *ctx, int men)
2789 {
2790     uint32_t me;
2791 
2792     me = MB(ctx->opcode) | (men << 5);
2793     gen_rldnm(ctx, 0, me);
2794 }
2795 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2796 
2797 /* rldimi - rldimi. */
2798 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2799 {
2800     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2801     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2802     uint32_t sh = SH(ctx->opcode) | (shn << 5);
2803     uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2804     uint32_t me = 63 - sh;
2805 
2806     if (mb <= me) {
2807         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2808     } else {
2809         target_ulong mask = MASK(mb, me);
2810         TCGv t1 = tcg_temp_new();
2811 
2812         tcg_gen_rotli_tl(t1, t_rs, sh);
2813         tcg_gen_andi_tl(t1, t1, mask);
2814         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2815         tcg_gen_or_tl(t_ra, t_ra, t1);
2816     }
2817     if (unlikely(Rc(ctx->opcode) != 0)) {
2818         gen_set_Rc0(ctx, t_ra);
2819     }
2820 }
2821 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2822 #endif
2823 
2824 /***                             Integer shift                             ***/
2825 
2826 /* slw & slw. */
2827 static void gen_slw(DisasContext *ctx)
2828 {
2829     TCGv t0, t1;
2830 
2831     t0 = tcg_temp_new();
2832     /* AND rS with a mask that is 0 when rB >= 0x20 */
2833 #if defined(TARGET_PPC64)
2834     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2835     tcg_gen_sari_tl(t0, t0, 0x3f);
2836 #else
2837     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2838     tcg_gen_sari_tl(t0, t0, 0x1f);
2839 #endif
2840     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2841     t1 = tcg_temp_new();
2842     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2843     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2844     tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2845     if (unlikely(Rc(ctx->opcode) != 0)) {
2846         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2847     }
2848 }
2849 
2850 /* sraw & sraw. */
2851 static void gen_sraw(DisasContext *ctx)
2852 {
2853     gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2854                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2855     if (unlikely(Rc(ctx->opcode) != 0)) {
2856         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2857     }
2858 }
2859 
2860 /* srawi & srawi. */
2861 static void gen_srawi(DisasContext *ctx)
2862 {
2863     int sh = SH(ctx->opcode);
2864     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2865     TCGv src = cpu_gpr[rS(ctx->opcode)];
2866     if (sh == 0) {
2867         tcg_gen_ext32s_tl(dst, src);
2868         tcg_gen_movi_tl(cpu_ca, 0);
2869         if (is_isa300(ctx)) {
2870             tcg_gen_movi_tl(cpu_ca32, 0);
2871         }
2872     } else {
2873         TCGv t0;
2874         tcg_gen_ext32s_tl(dst, src);
2875         tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2876         t0 = tcg_temp_new();
2877         tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2878         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2879         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2880         if (is_isa300(ctx)) {
2881             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2882         }
2883         tcg_gen_sari_tl(dst, dst, sh);
2884     }
2885     if (unlikely(Rc(ctx->opcode) != 0)) {
2886         gen_set_Rc0(ctx, dst);
2887     }
2888 }
2889 
2890 /* srw & srw. */
2891 static void gen_srw(DisasContext *ctx)
2892 {
2893     TCGv t0, t1;
2894 
2895     t0 = tcg_temp_new();
2896     /* AND rS with a mask that is 0 when rB >= 0x20 */
2897 #if defined(TARGET_PPC64)
2898     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2899     tcg_gen_sari_tl(t0, t0, 0x3f);
2900 #else
2901     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2902     tcg_gen_sari_tl(t0, t0, 0x1f);
2903 #endif
2904     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2905     tcg_gen_ext32u_tl(t0, t0);
2906     t1 = tcg_temp_new();
2907     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2908     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2909     if (unlikely(Rc(ctx->opcode) != 0)) {
2910         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2911     }
2912 }
2913 
2914 #if defined(TARGET_PPC64)
2915 /* sld & sld. */
2916 static void gen_sld(DisasContext *ctx)
2917 {
2918     TCGv t0, t1;
2919 
2920     t0 = tcg_temp_new();
2921     /* AND rS with a mask that is 0 when rB >= 0x40 */
2922     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2923     tcg_gen_sari_tl(t0, t0, 0x3f);
2924     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2925     t1 = tcg_temp_new();
2926     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2927     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2928     if (unlikely(Rc(ctx->opcode) != 0)) {
2929         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2930     }
2931 }
2932 
2933 /* srad & srad. */
2934 static void gen_srad(DisasContext *ctx)
2935 {
2936     gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2937                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2938     if (unlikely(Rc(ctx->opcode) != 0)) {
2939         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2940     }
2941 }
2942 /* sradi & sradi. */
2943 static inline void gen_sradi(DisasContext *ctx, int n)
2944 {
2945     int sh = SH(ctx->opcode) + (n << 5);
2946     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2947     TCGv src = cpu_gpr[rS(ctx->opcode)];
2948     if (sh == 0) {
2949         tcg_gen_mov_tl(dst, src);
2950         tcg_gen_movi_tl(cpu_ca, 0);
2951         if (is_isa300(ctx)) {
2952             tcg_gen_movi_tl(cpu_ca32, 0);
2953         }
2954     } else {
2955         TCGv t0;
2956         tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2957         t0 = tcg_temp_new();
2958         tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2959         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2960         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2961         if (is_isa300(ctx)) {
2962             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2963         }
2964         tcg_gen_sari_tl(dst, src, sh);
2965     }
2966     if (unlikely(Rc(ctx->opcode) != 0)) {
2967         gen_set_Rc0(ctx, dst);
2968     }
2969 }
2970 
2971 static void gen_sradi0(DisasContext *ctx)
2972 {
2973     gen_sradi(ctx, 0);
2974 }
2975 
2976 static void gen_sradi1(DisasContext *ctx)
2977 {
2978     gen_sradi(ctx, 1);
2979 }
2980 
2981 /* extswsli & extswsli. */
2982 static inline void gen_extswsli(DisasContext *ctx, int n)
2983 {
2984     int sh = SH(ctx->opcode) + (n << 5);
2985     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2986     TCGv src = cpu_gpr[rS(ctx->opcode)];
2987 
2988     tcg_gen_ext32s_tl(dst, src);
2989     tcg_gen_shli_tl(dst, dst, sh);
2990     if (unlikely(Rc(ctx->opcode) != 0)) {
2991         gen_set_Rc0(ctx, dst);
2992     }
2993 }
2994 
2995 static void gen_extswsli0(DisasContext *ctx)
2996 {
2997     gen_extswsli(ctx, 0);
2998 }
2999 
3000 static void gen_extswsli1(DisasContext *ctx)
3001 {
3002     gen_extswsli(ctx, 1);
3003 }
3004 
3005 /* srd & srd. */
3006 static void gen_srd(DisasContext *ctx)
3007 {
3008     TCGv t0, t1;
3009 
3010     t0 = tcg_temp_new();
3011     /* AND rS with a mask that is 0 when rB >= 0x40 */
3012     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3013     tcg_gen_sari_tl(t0, t0, 0x3f);
3014     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3015     t1 = tcg_temp_new();
3016     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3017     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3018     if (unlikely(Rc(ctx->opcode) != 0)) {
3019         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3020     }
3021 }
3022 #endif
3023 
3024 /***                           Addressing modes                            ***/
3025 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3026 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3027                                       target_long maskl)
3028 {
3029     target_long simm = SIMM(ctx->opcode);
3030 
3031     simm &= ~maskl;
3032     if (rA(ctx->opcode) == 0) {
3033         if (NARROW_MODE(ctx)) {
3034             simm = (uint32_t)simm;
3035         }
3036         tcg_gen_movi_tl(EA, simm);
3037     } else if (likely(simm != 0)) {
3038         tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3039         if (NARROW_MODE(ctx)) {
3040             tcg_gen_ext32u_tl(EA, EA);
3041         }
3042     } else {
3043         if (NARROW_MODE(ctx)) {
3044             tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3045         } else {
3046             tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3047         }
3048     }
3049 }
3050 
3051 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3052 {
3053     if (rA(ctx->opcode) == 0) {
3054         if (NARROW_MODE(ctx)) {
3055             tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3056         } else {
3057             tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3058         }
3059     } else {
3060         tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3061         if (NARROW_MODE(ctx)) {
3062             tcg_gen_ext32u_tl(EA, EA);
3063         }
3064     }
3065 }
3066 
3067 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3068 {
3069     if (rA(ctx->opcode) == 0) {
3070         tcg_gen_movi_tl(EA, 0);
3071     } else if (NARROW_MODE(ctx)) {
3072         tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3073     } else {
3074         tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3075     }
3076 }
3077 
3078 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3079                                 target_long val)
3080 {
3081     tcg_gen_addi_tl(ret, arg1, val);
3082     if (NARROW_MODE(ctx)) {
3083         tcg_gen_ext32u_tl(ret, ret);
3084     }
3085 }
3086 
3087 static inline void gen_align_no_le(DisasContext *ctx)
3088 {
3089     gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3090                       (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3091 }
3092 
3093 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3094 {
3095     TCGv ea = tcg_temp_new();
3096     if (ra) {
3097         tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3098     } else {
3099         tcg_gen_mov_tl(ea, displ);
3100     }
3101     if (NARROW_MODE(ctx)) {
3102         tcg_gen_ext32u_tl(ea, ea);
3103     }
3104     return ea;
3105 }
3106 
3107 /***                             Integer load                              ***/
3108 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3109 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3110 
3111 #define GEN_QEMU_LOAD_TL(ldop, op)                                      \
3112 static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
3113                                   TCGv val,                             \
3114                                   TCGv addr)                            \
3115 {                                                                       \
3116     tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
3117 }
3118 
3119 GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
3120 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3121 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3122 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3123 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3124 
3125 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3126 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3127 
3128 #define GEN_QEMU_LOAD_64(ldop, op)                                  \
3129 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
3130                                              TCGv_i64 val,          \
3131                                              TCGv addr)             \
3132 {                                                                   \
3133     tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
3134 }
3135 
3136 GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
3137 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3138 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3139 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3140 GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_UQ))
3141 
3142 #if defined(TARGET_PPC64)
3143 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3144 #endif
3145 
3146 #define GEN_QEMU_STORE_TL(stop, op)                                     \
3147 static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
3148                                   TCGv val,                             \
3149                                   TCGv addr)                            \
3150 {                                                                       \
3151     tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
3152 }
3153 
3154 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3155 GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
3156 #endif
3157 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3158 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3159 
3160 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3161 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3162 
3163 #define GEN_QEMU_STORE_64(stop, op)                               \
3164 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
3165                                               TCGv_i64 val,       \
3166                                               TCGv addr)          \
3167 {                                                                 \
3168     tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
3169 }
3170 
3171 GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
3172 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3173 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3174 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3175 
3176 #if defined(TARGET_PPC64)
3177 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3178 #endif
3179 
3180 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
3181 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3182 {                                                                             \
3183     TCGv EA;                                                                  \
3184     chk(ctx);                                                                 \
3185     gen_set_access_type(ctx, ACCESS_INT);                                     \
3186     EA = tcg_temp_new();                                                      \
3187     gen_addr_reg_index(ctx, EA);                                              \
3188     gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
3189 }
3190 
3191 #define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
3192     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3193 
3194 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
3195     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3196 
3197 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
3198 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3199 {                                                                             \
3200     TCGv EA;                                                                  \
3201     CHK_SV(ctx);                                                              \
3202     gen_set_access_type(ctx, ACCESS_INT);                                     \
3203     EA = tcg_temp_new();                                                      \
3204     gen_addr_reg_index(ctx, EA);                                              \
3205     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3206 }
3207 
3208 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3209 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3210 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3211 #if defined(TARGET_PPC64)
3212 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3213 #endif
3214 
3215 #if defined(TARGET_PPC64)
3216 /* CI load/store variants */
3217 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3218 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3219 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3220 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3221 #endif
3222 
3223 /***                              Integer store                            ***/
3224 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
3225 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3226 {                                                                             \
3227     TCGv EA;                                                                  \
3228     chk(ctx);                                                                 \
3229     gen_set_access_type(ctx, ACCESS_INT);                                     \
3230     EA = tcg_temp_new();                                                      \
3231     gen_addr_reg_index(ctx, EA);                                              \
3232     gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
3233 }
3234 #define GEN_STX(name, stop, opc2, opc3, type)                                 \
3235     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3236 
3237 #define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
3238     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3239 
3240 #define GEN_STEPX(name, stop, opc2, opc3)                                     \
3241 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3242 {                                                                             \
3243     TCGv EA;                                                                  \
3244     CHK_SV(ctx);                                                              \
3245     gen_set_access_type(ctx, ACCESS_INT);                                     \
3246     EA = tcg_temp_new();                                                      \
3247     gen_addr_reg_index(ctx, EA);                                              \
3248     tcg_gen_qemu_st_tl(                                                       \
3249         cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
3250 }
3251 
3252 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3253 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3254 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3255 #if defined(TARGET_PPC64)
3256 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3257 #endif
3258 
3259 #if defined(TARGET_PPC64)
3260 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3261 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3262 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3263 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3264 #endif
3265 /***                Integer load and store with byte reverse               ***/
3266 
3267 /* lhbrx */
3268 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3269 
3270 /* lwbrx */
3271 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3272 
3273 #if defined(TARGET_PPC64)
3274 /* ldbrx */
3275 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3276 /* stdbrx */
3277 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3278 #endif  /* TARGET_PPC64 */
3279 
3280 /* sthbrx */
3281 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3282 /* stwbrx */
3283 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3284 
3285 /***                    Integer load and store multiple                    ***/
3286 
3287 /* lmw */
3288 static void gen_lmw(DisasContext *ctx)
3289 {
3290     TCGv t0;
3291     TCGv_i32 t1;
3292 
3293     if (ctx->le_mode) {
3294         gen_align_no_le(ctx);
3295         return;
3296     }
3297     gen_set_access_type(ctx, ACCESS_INT);
3298     t0 = tcg_temp_new();
3299     t1 = tcg_constant_i32(rD(ctx->opcode));
3300     gen_addr_imm_index(ctx, t0, 0);
3301     gen_helper_lmw(cpu_env, t0, t1);
3302 }
3303 
3304 /* stmw */
3305 static void gen_stmw(DisasContext *ctx)
3306 {
3307     TCGv t0;
3308     TCGv_i32 t1;
3309 
3310     if (ctx->le_mode) {
3311         gen_align_no_le(ctx);
3312         return;
3313     }
3314     gen_set_access_type(ctx, ACCESS_INT);
3315     t0 = tcg_temp_new();
3316     t1 = tcg_constant_i32(rS(ctx->opcode));
3317     gen_addr_imm_index(ctx, t0, 0);
3318     gen_helper_stmw(cpu_env, t0, t1);
3319 }
3320 
3321 /***                    Integer load and store strings                     ***/
3322 
3323 /* lswi */
3324 /*
3325  * PowerPC32 specification says we must generate an exception if rA is
3326  * in the range of registers to be loaded.  In an other hand, IBM says
3327  * this is valid, but rA won't be loaded.  For now, I'll follow the
3328  * spec...
3329  */
3330 static void gen_lswi(DisasContext *ctx)
3331 {
3332     TCGv t0;
3333     TCGv_i32 t1, t2;
3334     int nb = NB(ctx->opcode);
3335     int start = rD(ctx->opcode);
3336     int ra = rA(ctx->opcode);
3337     int nr;
3338 
3339     if (ctx->le_mode) {
3340         gen_align_no_le(ctx);
3341         return;
3342     }
3343     if (nb == 0) {
3344         nb = 32;
3345     }
3346     nr = DIV_ROUND_UP(nb, 4);
3347     if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3348         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3349         return;
3350     }
3351     gen_set_access_type(ctx, ACCESS_INT);
3352     t0 = tcg_temp_new();
3353     gen_addr_register(ctx, t0);
3354     t1 = tcg_constant_i32(nb);
3355     t2 = tcg_constant_i32(start);
3356     gen_helper_lsw(cpu_env, t0, t1, t2);
3357 }
3358 
3359 /* lswx */
3360 static void gen_lswx(DisasContext *ctx)
3361 {
3362     TCGv t0;
3363     TCGv_i32 t1, t2, t3;
3364 
3365     if (ctx->le_mode) {
3366         gen_align_no_le(ctx);
3367         return;
3368     }
3369     gen_set_access_type(ctx, ACCESS_INT);
3370     t0 = tcg_temp_new();
3371     gen_addr_reg_index(ctx, t0);
3372     t1 = tcg_constant_i32(rD(ctx->opcode));
3373     t2 = tcg_constant_i32(rA(ctx->opcode));
3374     t3 = tcg_constant_i32(rB(ctx->opcode));
3375     gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3376 }
3377 
3378 /* stswi */
3379 static void gen_stswi(DisasContext *ctx)
3380 {
3381     TCGv t0;
3382     TCGv_i32 t1, t2;
3383     int nb = NB(ctx->opcode);
3384 
3385     if (ctx->le_mode) {
3386         gen_align_no_le(ctx);
3387         return;
3388     }
3389     gen_set_access_type(ctx, ACCESS_INT);
3390     t0 = tcg_temp_new();
3391     gen_addr_register(ctx, t0);
3392     if (nb == 0) {
3393         nb = 32;
3394     }
3395     t1 = tcg_constant_i32(nb);
3396     t2 = tcg_constant_i32(rS(ctx->opcode));
3397     gen_helper_stsw(cpu_env, t0, t1, t2);
3398 }
3399 
3400 /* stswx */
3401 static void gen_stswx(DisasContext *ctx)
3402 {
3403     TCGv t0;
3404     TCGv_i32 t1, t2;
3405 
3406     if (ctx->le_mode) {
3407         gen_align_no_le(ctx);
3408         return;
3409     }
3410     gen_set_access_type(ctx, ACCESS_INT);
3411     t0 = tcg_temp_new();
3412     gen_addr_reg_index(ctx, t0);
3413     t1 = tcg_temp_new_i32();
3414     tcg_gen_trunc_tl_i32(t1, cpu_xer);
3415     tcg_gen_andi_i32(t1, t1, 0x7F);
3416     t2 = tcg_constant_i32(rS(ctx->opcode));
3417     gen_helper_stsw(cpu_env, t0, t1, t2);
3418 }
3419 
3420 /***                        Memory synchronisation                         ***/
3421 /* eieio */
3422 static void gen_eieio(DisasContext *ctx)
3423 {
3424     TCGBar bar = TCG_MO_ALL;
3425 
3426     /*
3427      * eieio has complex semanitcs. It provides memory ordering between
3428      * operations in the set:
3429      * - loads from CI memory.
3430      * - stores to CI memory.
3431      * - stores to WT memory.
3432      *
3433      * It separately also orders memory for operations in the set:
3434      * - stores to cacheble memory.
3435      *
3436      * It also serializes instructions:
3437      * - dcbt and dcbst.
3438      *
3439      * It separately serializes:
3440      * - tlbie and tlbsync.
3441      *
3442      * And separately serializes:
3443      * - slbieg, slbiag, and slbsync.
3444      *
3445      * The end result is that CI memory ordering requires TCG_MO_ALL
3446      * and it is not possible to special-case more relaxed ordering for
3447      * cacheable accesses. TCG_BAR_SC is required to provide this
3448      * serialization.
3449      */
3450 
3451     /*
3452      * POWER9 has a eieio instruction variant using bit 6 as a hint to
3453      * tell the CPU it is a store-forwarding barrier.
3454      */
3455     if (ctx->opcode & 0x2000000) {
3456         /*
3457          * ISA says that "Reserved fields in instructions are ignored
3458          * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3459          * as this is not an instruction software should be using,
3460          * complain to the user.
3461          */
3462         if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3463             qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3464                           TARGET_FMT_lx "\n", ctx->cia);
3465         } else {
3466             bar = TCG_MO_ST_LD;
3467         }
3468     }
3469 
3470     tcg_gen_mb(bar | TCG_BAR_SC);
3471 }
3472 
3473 #if !defined(CONFIG_USER_ONLY)
3474 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3475 {
3476     TCGv_i32 t;
3477     TCGLabel *l;
3478 
3479     if (!ctx->lazy_tlb_flush) {
3480         return;
3481     }
3482     l = gen_new_label();
3483     t = tcg_temp_new_i32();
3484     tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3485     tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3486     if (global) {
3487         gen_helper_check_tlb_flush_global(cpu_env);
3488     } else {
3489         gen_helper_check_tlb_flush_local(cpu_env);
3490     }
3491     gen_set_label(l);
3492 }
3493 #else
3494 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3495 #endif
3496 
3497 /* isync */
3498 static void gen_isync(DisasContext *ctx)
3499 {
3500     /*
3501      * We need to check for a pending TLB flush. This can only happen in
3502      * kernel mode however so check MSR_PR
3503      */
3504     if (!ctx->pr) {
3505         gen_check_tlb_flush(ctx, false);
3506     }
3507     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3508     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3509 }
3510 
3511 #define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3512 
3513 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3514 {
3515     TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3516     TCGv t0 = tcg_temp_new();
3517 
3518     gen_set_access_type(ctx, ACCESS_RES);
3519     gen_addr_reg_index(ctx, t0);
3520     tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3521     tcg_gen_mov_tl(cpu_reserve, t0);
3522     tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
3523     tcg_gen_mov_tl(cpu_reserve_val, gpr);
3524 }
3525 
3526 #define LARX(name, memop)                  \
3527 static void gen_##name(DisasContext *ctx)  \
3528 {                                          \
3529     gen_load_locked(ctx, memop);           \
3530 }
3531 
3532 /* lwarx */
3533 LARX(lbarx, DEF_MEMOP(MO_UB))
3534 LARX(lharx, DEF_MEMOP(MO_UW))
3535 LARX(lwarx, DEF_MEMOP(MO_UL))
3536 
3537 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3538                                       TCGv EA, TCGCond cond, int addend)
3539 {
3540     TCGv t = tcg_temp_new();
3541     TCGv t2 = tcg_temp_new();
3542     TCGv u = tcg_temp_new();
3543 
3544     tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3545     tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3546     tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3547     tcg_gen_addi_tl(u, t, addend);
3548 
3549     /* E.g. for fetch and increment bounded... */
3550     /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3551     tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3552     tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3553 
3554     /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3555     tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3556     tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3557 }
3558 
3559 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3560 {
3561     uint32_t gpr_FC = FC(ctx->opcode);
3562     TCGv EA = tcg_temp_new();
3563     int rt = rD(ctx->opcode);
3564     bool need_serial;
3565     TCGv src, dst;
3566 
3567     gen_addr_register(ctx, EA);
3568     dst = cpu_gpr[rt];
3569     src = cpu_gpr[(rt + 1) & 31];
3570 
3571     need_serial = false;
3572     memop |= MO_ALIGN;
3573     switch (gpr_FC) {
3574     case 0: /* Fetch and add */
3575         tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3576         break;
3577     case 1: /* Fetch and xor */
3578         tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3579         break;
3580     case 2: /* Fetch and or */
3581         tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3582         break;
3583     case 3: /* Fetch and 'and' */
3584         tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3585         break;
3586     case 4:  /* Fetch and max unsigned */
3587         tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3588         break;
3589     case 5:  /* Fetch and max signed */
3590         tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3591         break;
3592     case 6:  /* Fetch and min unsigned */
3593         tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3594         break;
3595     case 7:  /* Fetch and min signed */
3596         tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3597         break;
3598     case 8: /* Swap */
3599         tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3600         break;
3601 
3602     case 16: /* Compare and swap not equal */
3603         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3604             need_serial = true;
3605         } else {
3606             TCGv t0 = tcg_temp_new();
3607             TCGv t1 = tcg_temp_new();
3608 
3609             tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3610             if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3611                 tcg_gen_mov_tl(t1, src);
3612             } else {
3613                 tcg_gen_ext32u_tl(t1, src);
3614             }
3615             tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3616                                cpu_gpr[(rt + 2) & 31], t0);
3617             tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3618             tcg_gen_mov_tl(dst, t0);
3619         }
3620         break;
3621 
3622     case 24: /* Fetch and increment bounded */
3623         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3624             need_serial = true;
3625         } else {
3626             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3627         }
3628         break;
3629     case 25: /* Fetch and increment equal */
3630         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3631             need_serial = true;
3632         } else {
3633             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3634         }
3635         break;
3636     case 28: /* Fetch and decrement bounded */
3637         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3638             need_serial = true;
3639         } else {
3640             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3641         }
3642         break;
3643 
3644     default:
3645         /* invoke data storage error handler */
3646         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3647     }
3648 
3649     if (need_serial) {
3650         /* Restart with exclusive lock.  */
3651         gen_helper_exit_atomic(cpu_env);
3652         ctx->base.is_jmp = DISAS_NORETURN;
3653     }
3654 }
3655 
3656 static void gen_lwat(DisasContext *ctx)
3657 {
3658     gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3659 }
3660 
3661 #ifdef TARGET_PPC64
3662 static void gen_ldat(DisasContext *ctx)
3663 {
3664     gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3665 }
3666 #endif
3667 
3668 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3669 {
3670     uint32_t gpr_FC = FC(ctx->opcode);
3671     TCGv EA = tcg_temp_new();
3672     TCGv src, discard;
3673 
3674     gen_addr_register(ctx, EA);
3675     src = cpu_gpr[rD(ctx->opcode)];
3676     discard = tcg_temp_new();
3677 
3678     memop |= MO_ALIGN;
3679     switch (gpr_FC) {
3680     case 0: /* add and Store */
3681         tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3682         break;
3683     case 1: /* xor and Store */
3684         tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3685         break;
3686     case 2: /* Or and Store */
3687         tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3688         break;
3689     case 3: /* 'and' and Store */
3690         tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3691         break;
3692     case 4:  /* Store max unsigned */
3693         tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3694         break;
3695     case 5:  /* Store max signed */
3696         tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3697         break;
3698     case 6:  /* Store min unsigned */
3699         tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3700         break;
3701     case 7:  /* Store min signed */
3702         tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3703         break;
3704     case 24: /* Store twin  */
3705         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3706             /* Restart with exclusive lock.  */
3707             gen_helper_exit_atomic(cpu_env);
3708             ctx->base.is_jmp = DISAS_NORETURN;
3709         } else {
3710             TCGv t = tcg_temp_new();
3711             TCGv t2 = tcg_temp_new();
3712             TCGv s = tcg_temp_new();
3713             TCGv s2 = tcg_temp_new();
3714             TCGv ea_plus_s = tcg_temp_new();
3715 
3716             tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3717             tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3718             tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3719             tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3720             tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3721             tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3722             tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3723         }
3724         break;
3725     default:
3726         /* invoke data storage error handler */
3727         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3728     }
3729 }
3730 
3731 static void gen_stwat(DisasContext *ctx)
3732 {
3733     gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3734 }
3735 
3736 #ifdef TARGET_PPC64
3737 static void gen_stdat(DisasContext *ctx)
3738 {
3739     gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3740 }
3741 #endif
3742 
3743 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3744 {
3745     TCGLabel *lfail;
3746     TCGv EA;
3747     TCGv cr0;
3748     TCGv t0;
3749     int rs = rS(ctx->opcode);
3750 
3751     lfail = gen_new_label();
3752     EA = tcg_temp_new();
3753     cr0 = tcg_temp_new();
3754     t0 = tcg_temp_new();
3755 
3756     tcg_gen_mov_tl(cr0, cpu_so);
3757     gen_set_access_type(ctx, ACCESS_RES);
3758     gen_addr_reg_index(ctx, EA);
3759     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3760     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3761 
3762     tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3763                               cpu_gpr[rs], ctx->mem_idx,
3764                               DEF_MEMOP(memop) | MO_ALIGN);
3765     tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3766     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3767     tcg_gen_or_tl(cr0, cr0, t0);
3768 
3769     gen_set_label(lfail);
3770     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3771     tcg_gen_movi_tl(cpu_reserve, -1);
3772 }
3773 
3774 #define STCX(name, memop)                  \
3775 static void gen_##name(DisasContext *ctx)  \
3776 {                                          \
3777     gen_conditional_store(ctx, memop);     \
3778 }
3779 
3780 STCX(stbcx_, DEF_MEMOP(MO_UB))
3781 STCX(sthcx_, DEF_MEMOP(MO_UW))
3782 STCX(stwcx_, DEF_MEMOP(MO_UL))
3783 
3784 #if defined(TARGET_PPC64)
3785 /* ldarx */
3786 LARX(ldarx, DEF_MEMOP(MO_UQ))
3787 /* stdcx. */
3788 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3789 
3790 /* lqarx */
3791 static void gen_lqarx(DisasContext *ctx)
3792 {
3793     int rd = rD(ctx->opcode);
3794     TCGv EA, hi, lo;
3795     TCGv_i128 t16;
3796 
3797     if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3798                  (rd == rB(ctx->opcode)))) {
3799         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3800         return;
3801     }
3802 
3803     gen_set_access_type(ctx, ACCESS_RES);
3804     EA = tcg_temp_new();
3805     gen_addr_reg_index(ctx, EA);
3806 
3807     /* Note that the low part is always in RD+1, even in LE mode.  */
3808     lo = cpu_gpr[rd + 1];
3809     hi = cpu_gpr[rd];
3810 
3811     t16 = tcg_temp_new_i128();
3812     tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3813     tcg_gen_extr_i128_i64(lo, hi, t16);
3814 
3815     tcg_gen_mov_tl(cpu_reserve, EA);
3816     tcg_gen_movi_tl(cpu_reserve_length, 16);
3817     tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3818     tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3819 }
3820 
3821 /* stqcx. */
3822 static void gen_stqcx_(DisasContext *ctx)
3823 {
3824     TCGLabel *lfail;
3825     TCGv EA, t0, t1;
3826     TCGv cr0;
3827     TCGv_i128 cmp, val;
3828     int rs = rS(ctx->opcode);
3829 
3830     if (unlikely(rs & 1)) {
3831         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3832         return;
3833     }
3834 
3835     lfail = gen_new_label();
3836     EA = tcg_temp_new();
3837     cr0 = tcg_temp_new();
3838 
3839     tcg_gen_mov_tl(cr0, cpu_so);
3840     gen_set_access_type(ctx, ACCESS_RES);
3841     gen_addr_reg_index(ctx, EA);
3842     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3843     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3844 
3845     cmp = tcg_temp_new_i128();
3846     val = tcg_temp_new_i128();
3847 
3848     tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3849 
3850     /* Note that the low part is always in RS+1, even in LE mode.  */
3851     tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3852 
3853     tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3854                                 DEF_MEMOP(MO_128 | MO_ALIGN));
3855 
3856     t0 = tcg_temp_new();
3857     t1 = tcg_temp_new();
3858     tcg_gen_extr_i128_i64(t1, t0, val);
3859 
3860     tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3861     tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3862     tcg_gen_or_tl(t0, t0, t1);
3863 
3864     tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3865     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3866     tcg_gen_or_tl(cr0, cr0, t0);
3867 
3868     gen_set_label(lfail);
3869     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3870     tcg_gen_movi_tl(cpu_reserve, -1);
3871 }
3872 #endif /* defined(TARGET_PPC64) */
3873 
3874 /* sync */
3875 static void gen_sync(DisasContext *ctx)
3876 {
3877     TCGBar bar = TCG_MO_ALL;
3878     uint32_t l = (ctx->opcode >> 21) & 3;
3879 
3880     if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3881         bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3882     }
3883 
3884     /*
3885      * We may need to check for a pending TLB flush.
3886      *
3887      * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3888      *
3889      * Additionally, this can only happen in kernel mode however so
3890      * check MSR_PR as well.
3891      */
3892     if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3893         gen_check_tlb_flush(ctx, true);
3894     }
3895 
3896     tcg_gen_mb(bar | TCG_BAR_SC);
3897 }
3898 
3899 /* wait */
3900 static void gen_wait(DisasContext *ctx)
3901 {
3902     uint32_t wc;
3903 
3904     if (ctx->insns_flags & PPC_WAIT) {
3905         /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3906 
3907         if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3908             /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3909             wc = WC(ctx->opcode);
3910         } else {
3911             wc = 0;
3912         }
3913 
3914     } else if (ctx->insns_flags2 & PPC2_ISA300) {
3915         /* v3.0 defines a new 'wait' encoding. */
3916         wc = WC(ctx->opcode);
3917         if (ctx->insns_flags2 & PPC2_ISA310) {
3918             uint32_t pl = PL(ctx->opcode);
3919 
3920             /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3921             if (wc == 3) {
3922                 gen_invalid(ctx);
3923                 return;
3924             }
3925 
3926             /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3927             if (pl > 0 && wc != 2) {
3928                 gen_invalid(ctx);
3929                 return;
3930             }
3931 
3932         } else { /* ISA300 */
3933             /* WC 1-3 are reserved */
3934             if (wc > 0) {
3935                 gen_invalid(ctx);
3936                 return;
3937             }
3938         }
3939 
3940     } else {
3941         warn_report("wait instruction decoded with wrong ISA flags.");
3942         gen_invalid(ctx);
3943         return;
3944     }
3945 
3946     /*
3947      * wait without WC field or with WC=0 waits for an exception / interrupt
3948      * to occur.
3949      */
3950     if (wc == 0) {
3951         TCGv_i32 t0 = tcg_constant_i32(1);
3952         tcg_gen_st_i32(t0, cpu_env,
3953                        -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3954         /* Stop translation, as the CPU is supposed to sleep from now */
3955         gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3956     }
3957 
3958     /*
3959      * Other wait types must not just wait until an exception occurs because
3960      * ignoring their other wake-up conditions could cause a hang.
3961      *
3962      * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
3963      * no-ops.
3964      *
3965      * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
3966      *
3967      * wc=2 waits for an implementation-specific condition, such could be
3968      * always true, so it can be implemented as a no-op.
3969      *
3970      * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
3971      *
3972      * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
3973      * Reservation-loss may have implementation-specific conditions, so it
3974      * can be implemented as a no-op.
3975      *
3976      * wc=2 waits for an exception or an amount of time to pass. This
3977      * amount is implementation-specific so it can be implemented as a
3978      * no-op.
3979      *
3980      * ISA v3.1 allows for execution to resume "in the rare case of
3981      * an implementation-dependent event", so in any case software must
3982      * not depend on the architected resumption condition to become
3983      * true, so no-op implementations should be architecturally correct
3984      * (if suboptimal).
3985      */
3986 }
3987 
3988 #if defined(TARGET_PPC64)
3989 static void gen_doze(DisasContext *ctx)
3990 {
3991 #if defined(CONFIG_USER_ONLY)
3992     GEN_PRIV(ctx);
3993 #else
3994     TCGv_i32 t;
3995 
3996     CHK_HV(ctx);
3997     t = tcg_constant_i32(PPC_PM_DOZE);
3998     gen_helper_pminsn(cpu_env, t);
3999     /* Stop translation, as the CPU is supposed to sleep from now */
4000     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4001 #endif /* defined(CONFIG_USER_ONLY) */
4002 }
4003 
4004 static void gen_nap(DisasContext *ctx)
4005 {
4006 #if defined(CONFIG_USER_ONLY)
4007     GEN_PRIV(ctx);
4008 #else
4009     TCGv_i32 t;
4010 
4011     CHK_HV(ctx);
4012     t = tcg_constant_i32(PPC_PM_NAP);
4013     gen_helper_pminsn(cpu_env, t);
4014     /* Stop translation, as the CPU is supposed to sleep from now */
4015     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4016 #endif /* defined(CONFIG_USER_ONLY) */
4017 }
4018 
4019 static void gen_stop(DisasContext *ctx)
4020 {
4021 #if defined(CONFIG_USER_ONLY)
4022     GEN_PRIV(ctx);
4023 #else
4024     TCGv_i32 t;
4025 
4026     CHK_HV(ctx);
4027     t = tcg_constant_i32(PPC_PM_STOP);
4028     gen_helper_pminsn(cpu_env, t);
4029     /* Stop translation, as the CPU is supposed to sleep from now */
4030     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4031 #endif /* defined(CONFIG_USER_ONLY) */
4032 }
4033 
4034 static void gen_sleep(DisasContext *ctx)
4035 {
4036 #if defined(CONFIG_USER_ONLY)
4037     GEN_PRIV(ctx);
4038 #else
4039     TCGv_i32 t;
4040 
4041     CHK_HV(ctx);
4042     t = tcg_constant_i32(PPC_PM_SLEEP);
4043     gen_helper_pminsn(cpu_env, t);
4044     /* Stop translation, as the CPU is supposed to sleep from now */
4045     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4046 #endif /* defined(CONFIG_USER_ONLY) */
4047 }
4048 
4049 static void gen_rvwinkle(DisasContext *ctx)
4050 {
4051 #if defined(CONFIG_USER_ONLY)
4052     GEN_PRIV(ctx);
4053 #else
4054     TCGv_i32 t;
4055 
4056     CHK_HV(ctx);
4057     t = tcg_constant_i32(PPC_PM_RVWINKLE);
4058     gen_helper_pminsn(cpu_env, t);
4059     /* Stop translation, as the CPU is supposed to sleep from now */
4060     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4061 #endif /* defined(CONFIG_USER_ONLY) */
4062 }
4063 #endif /* #if defined(TARGET_PPC64) */
4064 
4065 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4066 {
4067 #if defined(TARGET_PPC64)
4068     if (ctx->has_cfar) {
4069         tcg_gen_movi_tl(cpu_cfar, nip);
4070     }
4071 #endif
4072 }
4073 
4074 #if defined(TARGET_PPC64)
4075 static void pmu_count_insns(DisasContext *ctx)
4076 {
4077     /*
4078      * Do not bother calling the helper if the PMU isn't counting
4079      * instructions.
4080      */
4081     if (!ctx->pmu_insn_cnt) {
4082         return;
4083     }
4084 
4085  #if !defined(CONFIG_USER_ONLY)
4086     TCGLabel *l;
4087     TCGv t0;
4088 
4089     /*
4090      * The PMU insns_inc() helper stops the internal PMU timer if a
4091      * counter overflows happens. In that case, if the guest is
4092      * running with icount and we do not handle it beforehand,
4093      * the helper can trigger a 'bad icount read'.
4094      */
4095     translator_io_start(&ctx->base);
4096 
4097     /* Avoid helper calls when only PMC5-6 are enabled. */
4098     if (!ctx->pmc_other) {
4099         l = gen_new_label();
4100         t0 = tcg_temp_new();
4101 
4102         gen_load_spr(t0, SPR_POWER_PMC5);
4103         tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4104         gen_store_spr(SPR_POWER_PMC5, t0);
4105         /* Check for overflow, if it's enabled */
4106         if (ctx->mmcr0_pmcjce) {
4107             tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4108             gen_helper_handle_pmc5_overflow(cpu_env);
4109         }
4110 
4111         gen_set_label(l);
4112     } else {
4113         gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4114     }
4115   #else
4116     /*
4117      * User mode can read (but not write) PMC5 and start/stop
4118      * the PMU via MMCR0_FC. In this case just increment
4119      * PMC5 with base.num_insns.
4120      */
4121     TCGv t0 = tcg_temp_new();
4122 
4123     gen_load_spr(t0, SPR_POWER_PMC5);
4124     tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4125     gen_store_spr(SPR_POWER_PMC5, t0);
4126   #endif /* #if !defined(CONFIG_USER_ONLY) */
4127 }
4128 #else
4129 static void pmu_count_insns(DisasContext *ctx)
4130 {
4131     return;
4132 }
4133 #endif /* #if defined(TARGET_PPC64) */
4134 
4135 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4136 {
4137     return translator_use_goto_tb(&ctx->base, dest);
4138 }
4139 
4140 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4141 {
4142     if (unlikely(ctx->singlestep_enabled)) {
4143         gen_debug_exception(ctx);
4144     } else {
4145         /*
4146          * tcg_gen_lookup_and_goto_ptr will exit the TB if
4147          * CF_NO_GOTO_PTR is set. Count insns now.
4148          */
4149         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4150             pmu_count_insns(ctx);
4151         }
4152 
4153         tcg_gen_lookup_and_goto_ptr();
4154     }
4155 }
4156 
4157 /***                                Branch                                 ***/
4158 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4159 {
4160     if (NARROW_MODE(ctx)) {
4161         dest = (uint32_t) dest;
4162     }
4163     if (use_goto_tb(ctx, dest)) {
4164         pmu_count_insns(ctx);
4165         tcg_gen_goto_tb(n);
4166         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4167         tcg_gen_exit_tb(ctx->base.tb, n);
4168     } else {
4169         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4170         gen_lookup_and_goto_ptr(ctx);
4171     }
4172 }
4173 
4174 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4175 {
4176     if (NARROW_MODE(ctx)) {
4177         nip = (uint32_t)nip;
4178     }
4179     tcg_gen_movi_tl(cpu_lr, nip);
4180 }
4181 
4182 /* b ba bl bla */
4183 static void gen_b(DisasContext *ctx)
4184 {
4185     target_ulong li, target;
4186 
4187     /* sign extend LI */
4188     li = LI(ctx->opcode);
4189     li = (li ^ 0x02000000) - 0x02000000;
4190     if (likely(AA(ctx->opcode) == 0)) {
4191         target = ctx->cia + li;
4192     } else {
4193         target = li;
4194     }
4195     if (LK(ctx->opcode)) {
4196         gen_setlr(ctx, ctx->base.pc_next);
4197     }
4198     gen_update_cfar(ctx, ctx->cia);
4199     gen_goto_tb(ctx, 0, target);
4200     ctx->base.is_jmp = DISAS_NORETURN;
4201 }
4202 
4203 #define BCOND_IM  0
4204 #define BCOND_LR  1
4205 #define BCOND_CTR 2
4206 #define BCOND_TAR 3
4207 
4208 static void gen_bcond(DisasContext *ctx, int type)
4209 {
4210     uint32_t bo = BO(ctx->opcode);
4211     TCGLabel *l1;
4212     TCGv target;
4213 
4214     if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4215         target = tcg_temp_new();
4216         if (type == BCOND_CTR) {
4217             tcg_gen_mov_tl(target, cpu_ctr);
4218         } else if (type == BCOND_TAR) {
4219             gen_load_spr(target, SPR_TAR);
4220         } else {
4221             tcg_gen_mov_tl(target, cpu_lr);
4222         }
4223     } else {
4224         target = NULL;
4225     }
4226     if (LK(ctx->opcode)) {
4227         gen_setlr(ctx, ctx->base.pc_next);
4228     }
4229     l1 = gen_new_label();
4230     if ((bo & 0x4) == 0) {
4231         /* Decrement and test CTR */
4232         TCGv temp = tcg_temp_new();
4233 
4234         if (type == BCOND_CTR) {
4235             /*
4236              * All ISAs up to v3 describe this form of bcctr as invalid but
4237              * some processors, ie. 64-bit server processors compliant with
4238              * arch 2.x, do implement a "test and decrement" logic instead,
4239              * as described in their respective UMs. This logic involves CTR
4240              * to act as both the branch target and a counter, which makes
4241              * it basically useless and thus never used in real code.
4242              *
4243              * This form was hence chosen to trigger extra micro-architectural
4244              * side-effect on real HW needed for the Spectre v2 workaround.
4245              * It is up to guests that implement such workaround, ie. linux, to
4246              * use this form in a way it just triggers the side-effect without
4247              * doing anything else harmful.
4248              */
4249             if (unlikely(!is_book3s_arch2x(ctx))) {
4250                 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4251                 return;
4252             }
4253 
4254             if (NARROW_MODE(ctx)) {
4255                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4256             } else {
4257                 tcg_gen_mov_tl(temp, cpu_ctr);
4258             }
4259             if (bo & 0x2) {
4260                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4261             } else {
4262                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4263             }
4264             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4265         } else {
4266             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4267             if (NARROW_MODE(ctx)) {
4268                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4269             } else {
4270                 tcg_gen_mov_tl(temp, cpu_ctr);
4271             }
4272             if (bo & 0x2) {
4273                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4274             } else {
4275                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4276             }
4277         }
4278     }
4279     if ((bo & 0x10) == 0) {
4280         /* Test CR */
4281         uint32_t bi = BI(ctx->opcode);
4282         uint32_t mask = 0x08 >> (bi & 0x03);
4283         TCGv_i32 temp = tcg_temp_new_i32();
4284 
4285         if (bo & 0x8) {
4286             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4287             tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4288         } else {
4289             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4290             tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4291         }
4292     }
4293     gen_update_cfar(ctx, ctx->cia);
4294     if (type == BCOND_IM) {
4295         target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4296         if (likely(AA(ctx->opcode) == 0)) {
4297             gen_goto_tb(ctx, 0, ctx->cia + li);
4298         } else {
4299             gen_goto_tb(ctx, 0, li);
4300         }
4301     } else {
4302         if (NARROW_MODE(ctx)) {
4303             tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4304         } else {
4305             tcg_gen_andi_tl(cpu_nip, target, ~3);
4306         }
4307         gen_lookup_and_goto_ptr(ctx);
4308     }
4309     if ((bo & 0x14) != 0x14) {
4310         /* fallthrough case */
4311         gen_set_label(l1);
4312         gen_goto_tb(ctx, 1, ctx->base.pc_next);
4313     }
4314     ctx->base.is_jmp = DISAS_NORETURN;
4315 }
4316 
4317 static void gen_bc(DisasContext *ctx)
4318 {
4319     gen_bcond(ctx, BCOND_IM);
4320 }
4321 
4322 static void gen_bcctr(DisasContext *ctx)
4323 {
4324     gen_bcond(ctx, BCOND_CTR);
4325 }
4326 
4327 static void gen_bclr(DisasContext *ctx)
4328 {
4329     gen_bcond(ctx, BCOND_LR);
4330 }
4331 
4332 static void gen_bctar(DisasContext *ctx)
4333 {
4334     gen_bcond(ctx, BCOND_TAR);
4335 }
4336 
4337 /***                      Condition register logical                       ***/
4338 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
4339 static void glue(gen_, name)(DisasContext *ctx)                               \
4340 {                                                                             \
4341     uint8_t bitmask;                                                          \
4342     int sh;                                                                   \
4343     TCGv_i32 t0, t1;                                                          \
4344     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
4345     t0 = tcg_temp_new_i32();                                                  \
4346     if (sh > 0)                                                               \
4347         tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
4348     else if (sh < 0)                                                          \
4349         tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
4350     else                                                                      \
4351         tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
4352     t1 = tcg_temp_new_i32();                                                  \
4353     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
4354     if (sh > 0)                                                               \
4355         tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
4356     else if (sh < 0)                                                          \
4357         tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
4358     else                                                                      \
4359         tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
4360     tcg_op(t0, t0, t1);                                                       \
4361     bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
4362     tcg_gen_andi_i32(t0, t0, bitmask);                                        \
4363     tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
4364     tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
4365 }
4366 
4367 /* crand */
4368 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4369 /* crandc */
4370 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4371 /* creqv */
4372 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4373 /* crnand */
4374 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4375 /* crnor */
4376 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4377 /* cror */
4378 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4379 /* crorc */
4380 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4381 /* crxor */
4382 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4383 
4384 /* mcrf */
4385 static void gen_mcrf(DisasContext *ctx)
4386 {
4387     tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4388 }
4389 
4390 /***                           System linkage                              ***/
4391 
4392 /* rfi (supervisor only) */
4393 static void gen_rfi(DisasContext *ctx)
4394 {
4395 #if defined(CONFIG_USER_ONLY)
4396     GEN_PRIV(ctx);
4397 #else
4398     /*
4399      * This instruction doesn't exist anymore on 64-bit server
4400      * processors compliant with arch 2.x
4401      */
4402     if (is_book3s_arch2x(ctx)) {
4403         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4404         return;
4405     }
4406     /* Restore CPU state */
4407     CHK_SV(ctx);
4408     translator_io_start(&ctx->base);
4409     gen_update_cfar(ctx, ctx->cia);
4410     gen_helper_rfi(cpu_env);
4411     ctx->base.is_jmp = DISAS_EXIT;
4412 #endif
4413 }
4414 
4415 #if defined(TARGET_PPC64)
4416 static void gen_rfid(DisasContext *ctx)
4417 {
4418 #if defined(CONFIG_USER_ONLY)
4419     GEN_PRIV(ctx);
4420 #else
4421     /* Restore CPU state */
4422     CHK_SV(ctx);
4423     translator_io_start(&ctx->base);
4424     gen_update_cfar(ctx, ctx->cia);
4425     gen_helper_rfid(cpu_env);
4426     ctx->base.is_jmp = DISAS_EXIT;
4427 #endif
4428 }
4429 
4430 #if !defined(CONFIG_USER_ONLY)
4431 static void gen_rfscv(DisasContext *ctx)
4432 {
4433 #if defined(CONFIG_USER_ONLY)
4434     GEN_PRIV(ctx);
4435 #else
4436     /* Restore CPU state */
4437     CHK_SV(ctx);
4438     translator_io_start(&ctx->base);
4439     gen_update_cfar(ctx, ctx->cia);
4440     gen_helper_rfscv(cpu_env);
4441     ctx->base.is_jmp = DISAS_EXIT;
4442 #endif
4443 }
4444 #endif
4445 
4446 static void gen_hrfid(DisasContext *ctx)
4447 {
4448 #if defined(CONFIG_USER_ONLY)
4449     GEN_PRIV(ctx);
4450 #else
4451     /* Restore CPU state */
4452     CHK_HV(ctx);
4453     gen_helper_hrfid(cpu_env);
4454     ctx->base.is_jmp = DISAS_EXIT;
4455 #endif
4456 }
4457 #endif
4458 
4459 /* sc */
4460 #if defined(CONFIG_USER_ONLY)
4461 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4462 #else
4463 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4464 #define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
4465 #endif
4466 static void gen_sc(DisasContext *ctx)
4467 {
4468     uint32_t lev;
4469 
4470     /*
4471      * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
4472      * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
4473      * for Ultravisor which TCG does not support, so just ignore the top 6.
4474      */
4475     lev = (ctx->opcode >> 5) & 0x1;
4476     gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4477 }
4478 
4479 #if defined(TARGET_PPC64)
4480 #if !defined(CONFIG_USER_ONLY)
4481 static void gen_scv(DisasContext *ctx)
4482 {
4483     uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4484 
4485     /* Set the PC back to the faulting instruction. */
4486     gen_update_nip(ctx, ctx->cia);
4487     gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4488 
4489     ctx->base.is_jmp = DISAS_NORETURN;
4490 }
4491 #endif
4492 #endif
4493 
4494 /***                                Trap                                   ***/
4495 
4496 /* Check for unconditional traps (always or never) */
4497 static bool check_unconditional_trap(DisasContext *ctx)
4498 {
4499     /* Trap never */
4500     if (TO(ctx->opcode) == 0) {
4501         return true;
4502     }
4503     /* Trap always */
4504     if (TO(ctx->opcode) == 31) {
4505         gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4506         return true;
4507     }
4508     return false;
4509 }
4510 
4511 /* tw */
4512 static void gen_tw(DisasContext *ctx)
4513 {
4514     TCGv_i32 t0;
4515 
4516     if (check_unconditional_trap(ctx)) {
4517         return;
4518     }
4519     t0 = tcg_constant_i32(TO(ctx->opcode));
4520     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4521                   t0);
4522 }
4523 
4524 /* twi */
4525 static void gen_twi(DisasContext *ctx)
4526 {
4527     TCGv t0;
4528     TCGv_i32 t1;
4529 
4530     if (check_unconditional_trap(ctx)) {
4531         return;
4532     }
4533     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4534     t1 = tcg_constant_i32(TO(ctx->opcode));
4535     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4536 }
4537 
4538 #if defined(TARGET_PPC64)
4539 /* td */
4540 static void gen_td(DisasContext *ctx)
4541 {
4542     TCGv_i32 t0;
4543 
4544     if (check_unconditional_trap(ctx)) {
4545         return;
4546     }
4547     t0 = tcg_constant_i32(TO(ctx->opcode));
4548     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4549                   t0);
4550 }
4551 
4552 /* tdi */
4553 static void gen_tdi(DisasContext *ctx)
4554 {
4555     TCGv t0;
4556     TCGv_i32 t1;
4557 
4558     if (check_unconditional_trap(ctx)) {
4559         return;
4560     }
4561     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4562     t1 = tcg_constant_i32(TO(ctx->opcode));
4563     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4564 }
4565 #endif
4566 
4567 /***                          Processor control                            ***/
4568 
4569 /* mcrxr */
4570 static void gen_mcrxr(DisasContext *ctx)
4571 {
4572     TCGv_i32 t0 = tcg_temp_new_i32();
4573     TCGv_i32 t1 = tcg_temp_new_i32();
4574     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4575 
4576     tcg_gen_trunc_tl_i32(t0, cpu_so);
4577     tcg_gen_trunc_tl_i32(t1, cpu_ov);
4578     tcg_gen_trunc_tl_i32(dst, cpu_ca);
4579     tcg_gen_shli_i32(t0, t0, 3);
4580     tcg_gen_shli_i32(t1, t1, 2);
4581     tcg_gen_shli_i32(dst, dst, 1);
4582     tcg_gen_or_i32(dst, dst, t0);
4583     tcg_gen_or_i32(dst, dst, t1);
4584 
4585     tcg_gen_movi_tl(cpu_so, 0);
4586     tcg_gen_movi_tl(cpu_ov, 0);
4587     tcg_gen_movi_tl(cpu_ca, 0);
4588 }
4589 
4590 #ifdef TARGET_PPC64
4591 /* mcrxrx */
4592 static void gen_mcrxrx(DisasContext *ctx)
4593 {
4594     TCGv t0 = tcg_temp_new();
4595     TCGv t1 = tcg_temp_new();
4596     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4597 
4598     /* copy OV and OV32 */
4599     tcg_gen_shli_tl(t0, cpu_ov, 1);
4600     tcg_gen_or_tl(t0, t0, cpu_ov32);
4601     tcg_gen_shli_tl(t0, t0, 2);
4602     /* copy CA and CA32 */
4603     tcg_gen_shli_tl(t1, cpu_ca, 1);
4604     tcg_gen_or_tl(t1, t1, cpu_ca32);
4605     tcg_gen_or_tl(t0, t0, t1);
4606     tcg_gen_trunc_tl_i32(dst, t0);
4607 }
4608 #endif
4609 
4610 /* mfcr mfocrf */
4611 static void gen_mfcr(DisasContext *ctx)
4612 {
4613     uint32_t crm, crn;
4614 
4615     if (likely(ctx->opcode & 0x00100000)) {
4616         crm = CRM(ctx->opcode);
4617         if (likely(crm && ((crm & (crm - 1)) == 0))) {
4618             crn = ctz32(crm);
4619             tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4620             tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4621                             cpu_gpr[rD(ctx->opcode)], crn * 4);
4622         }
4623     } else {
4624         TCGv_i32 t0 = tcg_temp_new_i32();
4625         tcg_gen_mov_i32(t0, cpu_crf[0]);
4626         tcg_gen_shli_i32(t0, t0, 4);
4627         tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4628         tcg_gen_shli_i32(t0, t0, 4);
4629         tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4630         tcg_gen_shli_i32(t0, t0, 4);
4631         tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4632         tcg_gen_shli_i32(t0, t0, 4);
4633         tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4634         tcg_gen_shli_i32(t0, t0, 4);
4635         tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4636         tcg_gen_shli_i32(t0, t0, 4);
4637         tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4638         tcg_gen_shli_i32(t0, t0, 4);
4639         tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4640         tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4641     }
4642 }
4643 
4644 /* mfmsr */
4645 static void gen_mfmsr(DisasContext *ctx)
4646 {
4647     CHK_SV(ctx);
4648     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4649 }
4650 
4651 /* mfspr */
4652 static inline void gen_op_mfspr(DisasContext *ctx)
4653 {
4654     void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4655     uint32_t sprn = SPR(ctx->opcode);
4656 
4657 #if defined(CONFIG_USER_ONLY)
4658     read_cb = ctx->spr_cb[sprn].uea_read;
4659 #else
4660     if (ctx->pr) {
4661         read_cb = ctx->spr_cb[sprn].uea_read;
4662     } else if (ctx->hv) {
4663         read_cb = ctx->spr_cb[sprn].hea_read;
4664     } else {
4665         read_cb = ctx->spr_cb[sprn].oea_read;
4666     }
4667 #endif
4668     if (likely(read_cb != NULL)) {
4669         if (likely(read_cb != SPR_NOACCESS)) {
4670             (*read_cb)(ctx, rD(ctx->opcode), sprn);
4671         } else {
4672             /* Privilege exception */
4673             /*
4674              * This is a hack to avoid warnings when running Linux:
4675              * this OS breaks the PowerPC virtualisation model,
4676              * allowing userland application to read the PVR
4677              */
4678             if (sprn != SPR_PVR) {
4679                 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4680                               "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4681                               ctx->cia);
4682             }
4683             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4684         }
4685     } else {
4686         /* ISA 2.07 defines these as no-ops */
4687         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4688             (sprn >= 808 && sprn <= 811)) {
4689             /* This is a nop */
4690             return;
4691         }
4692         /* Not defined */
4693         qemu_log_mask(LOG_GUEST_ERROR,
4694                       "Trying to read invalid spr %d (0x%03x) at "
4695                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4696 
4697         /*
4698          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4699          * generate a priv, a hv emu or a no-op
4700          */
4701         if (sprn & 0x10) {
4702             if (ctx->pr) {
4703                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4704             }
4705         } else {
4706             if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4707                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4708             }
4709         }
4710     }
4711 }
4712 
4713 static void gen_mfspr(DisasContext *ctx)
4714 {
4715     gen_op_mfspr(ctx);
4716 }
4717 
4718 /* mftb */
4719 static void gen_mftb(DisasContext *ctx)
4720 {
4721     gen_op_mfspr(ctx);
4722 }
4723 
4724 /* mtcrf mtocrf*/
4725 static void gen_mtcrf(DisasContext *ctx)
4726 {
4727     uint32_t crm, crn;
4728 
4729     crm = CRM(ctx->opcode);
4730     if (likely((ctx->opcode & 0x00100000))) {
4731         if (crm && ((crm & (crm - 1)) == 0)) {
4732             TCGv_i32 temp = tcg_temp_new_i32();
4733             crn = ctz32(crm);
4734             tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4735             tcg_gen_shri_i32(temp, temp, crn * 4);
4736             tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4737         }
4738     } else {
4739         TCGv_i32 temp = tcg_temp_new_i32();
4740         tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4741         for (crn = 0 ; crn < 8 ; crn++) {
4742             if (crm & (1 << crn)) {
4743                     tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4744                     tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4745             }
4746         }
4747     }
4748 }
4749 
4750 /* mtmsr */
4751 #if defined(TARGET_PPC64)
4752 static void gen_mtmsrd(DisasContext *ctx)
4753 {
4754     if (unlikely(!is_book3s_arch2x(ctx))) {
4755         gen_invalid(ctx);
4756         return;
4757     }
4758 
4759     CHK_SV(ctx);
4760 
4761 #if !defined(CONFIG_USER_ONLY)
4762     TCGv t0, t1;
4763     target_ulong mask;
4764 
4765     t0 = tcg_temp_new();
4766     t1 = tcg_temp_new();
4767 
4768     translator_io_start(&ctx->base);
4769 
4770     if (ctx->opcode & 0x00010000) {
4771         /* L=1 form only updates EE and RI */
4772         mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4773     } else {
4774         /* mtmsrd does not alter HV, S, ME, or LE */
4775         mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4776                  (1ULL << MSR_HV));
4777         /*
4778          * XXX: we need to update nip before the store if we enter
4779          *      power saving mode, we will exit the loop directly from
4780          *      ppc_store_msr
4781          */
4782         gen_update_nip(ctx, ctx->base.pc_next);
4783     }
4784 
4785     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4786     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4787     tcg_gen_or_tl(t0, t0, t1);
4788 
4789     gen_helper_store_msr(cpu_env, t0);
4790 
4791     /* Must stop the translation as machine state (may have) changed */
4792     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4793 #endif /* !defined(CONFIG_USER_ONLY) */
4794 }
4795 #endif /* defined(TARGET_PPC64) */
4796 
4797 static void gen_mtmsr(DisasContext *ctx)
4798 {
4799     CHK_SV(ctx);
4800 
4801 #if !defined(CONFIG_USER_ONLY)
4802     TCGv t0, t1;
4803     target_ulong mask = 0xFFFFFFFF;
4804 
4805     t0 = tcg_temp_new();
4806     t1 = tcg_temp_new();
4807 
4808     translator_io_start(&ctx->base);
4809     if (ctx->opcode & 0x00010000) {
4810         /* L=1 form only updates EE and RI */
4811         mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4812     } else {
4813         /* mtmsr does not alter S, ME, or LE */
4814         mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4815 
4816         /*
4817          * XXX: we need to update nip before the store if we enter
4818          *      power saving mode, we will exit the loop directly from
4819          *      ppc_store_msr
4820          */
4821         gen_update_nip(ctx, ctx->base.pc_next);
4822     }
4823 
4824     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4825     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4826     tcg_gen_or_tl(t0, t0, t1);
4827 
4828     gen_helper_store_msr(cpu_env, t0);
4829 
4830     /* Must stop the translation as machine state (may have) changed */
4831     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4832 #endif
4833 }
4834 
4835 /* mtspr */
4836 static void gen_mtspr(DisasContext *ctx)
4837 {
4838     void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4839     uint32_t sprn = SPR(ctx->opcode);
4840 
4841 #if defined(CONFIG_USER_ONLY)
4842     write_cb = ctx->spr_cb[sprn].uea_write;
4843 #else
4844     if (ctx->pr) {
4845         write_cb = ctx->spr_cb[sprn].uea_write;
4846     } else if (ctx->hv) {
4847         write_cb = ctx->spr_cb[sprn].hea_write;
4848     } else {
4849         write_cb = ctx->spr_cb[sprn].oea_write;
4850     }
4851 #endif
4852     if (likely(write_cb != NULL)) {
4853         if (likely(write_cb != SPR_NOACCESS)) {
4854             (*write_cb)(ctx, sprn, rS(ctx->opcode));
4855         } else {
4856             /* Privilege exception */
4857             qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4858                           "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4859                           ctx->cia);
4860             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4861         }
4862     } else {
4863         /* ISA 2.07 defines these as no-ops */
4864         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4865             (sprn >= 808 && sprn <= 811)) {
4866             /* This is a nop */
4867             return;
4868         }
4869 
4870         /* Not defined */
4871         qemu_log_mask(LOG_GUEST_ERROR,
4872                       "Trying to write invalid spr %d (0x%03x) at "
4873                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4874 
4875 
4876         /*
4877          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4878          * generate a priv, a hv emu or a no-op
4879          */
4880         if (sprn & 0x10) {
4881             if (ctx->pr) {
4882                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4883             }
4884         } else {
4885             if (ctx->pr || sprn == 0) {
4886                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4887             }
4888         }
4889     }
4890 }
4891 
4892 #if defined(TARGET_PPC64)
4893 /* setb */
4894 static void gen_setb(DisasContext *ctx)
4895 {
4896     TCGv_i32 t0 = tcg_temp_new_i32();
4897     TCGv_i32 t8 = tcg_constant_i32(8);
4898     TCGv_i32 tm1 = tcg_constant_i32(-1);
4899     int crf = crfS(ctx->opcode);
4900 
4901     tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4902     tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4903     tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4904 }
4905 #endif
4906 
4907 /***                         Cache management                              ***/
4908 
4909 /* dcbf */
4910 static void gen_dcbf(DisasContext *ctx)
4911 {
4912     /* XXX: specification says this is treated as a load by the MMU */
4913     TCGv t0;
4914     gen_set_access_type(ctx, ACCESS_CACHE);
4915     t0 = tcg_temp_new();
4916     gen_addr_reg_index(ctx, t0);
4917     gen_qemu_ld8u(ctx, t0, t0);
4918 }
4919 
4920 /* dcbfep (external PID dcbf) */
4921 static void gen_dcbfep(DisasContext *ctx)
4922 {
4923     /* XXX: specification says this is treated as a load by the MMU */
4924     TCGv t0;
4925     CHK_SV(ctx);
4926     gen_set_access_type(ctx, ACCESS_CACHE);
4927     t0 = tcg_temp_new();
4928     gen_addr_reg_index(ctx, t0);
4929     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4930 }
4931 
4932 /* dcbi (Supervisor only) */
4933 static void gen_dcbi(DisasContext *ctx)
4934 {
4935 #if defined(CONFIG_USER_ONLY)
4936     GEN_PRIV(ctx);
4937 #else
4938     TCGv EA, val;
4939 
4940     CHK_SV(ctx);
4941     EA = tcg_temp_new();
4942     gen_set_access_type(ctx, ACCESS_CACHE);
4943     gen_addr_reg_index(ctx, EA);
4944     val = tcg_temp_new();
4945     /* XXX: specification says this should be treated as a store by the MMU */
4946     gen_qemu_ld8u(ctx, val, EA);
4947     gen_qemu_st8(ctx, val, EA);
4948 #endif /* defined(CONFIG_USER_ONLY) */
4949 }
4950 
4951 /* dcdst */
4952 static void gen_dcbst(DisasContext *ctx)
4953 {
4954     /* XXX: specification say this is treated as a load by the MMU */
4955     TCGv t0;
4956     gen_set_access_type(ctx, ACCESS_CACHE);
4957     t0 = tcg_temp_new();
4958     gen_addr_reg_index(ctx, t0);
4959     gen_qemu_ld8u(ctx, t0, t0);
4960 }
4961 
4962 /* dcbstep (dcbstep External PID version) */
4963 static void gen_dcbstep(DisasContext *ctx)
4964 {
4965     /* XXX: specification say this is treated as a load by the MMU */
4966     TCGv t0;
4967     gen_set_access_type(ctx, ACCESS_CACHE);
4968     t0 = tcg_temp_new();
4969     gen_addr_reg_index(ctx, t0);
4970     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4971 }
4972 
4973 /* dcbt */
4974 static void gen_dcbt(DisasContext *ctx)
4975 {
4976     /*
4977      * interpreted as no-op
4978      * XXX: specification say this is treated as a load by the MMU but
4979      *      does not generate any exception
4980      */
4981 }
4982 
4983 /* dcbtep */
4984 static void gen_dcbtep(DisasContext *ctx)
4985 {
4986     /*
4987      * interpreted as no-op
4988      * XXX: specification say this is treated as a load by the MMU but
4989      *      does not generate any exception
4990      */
4991 }
4992 
4993 /* dcbtst */
4994 static void gen_dcbtst(DisasContext *ctx)
4995 {
4996     /*
4997      * interpreted as no-op
4998      * XXX: specification say this is treated as a load by the MMU but
4999      *      does not generate any exception
5000      */
5001 }
5002 
5003 /* dcbtstep */
5004 static void gen_dcbtstep(DisasContext *ctx)
5005 {
5006     /*
5007      * interpreted as no-op
5008      * XXX: specification say this is treated as a load by the MMU but
5009      *      does not generate any exception
5010      */
5011 }
5012 
5013 /* dcbtls */
5014 static void gen_dcbtls(DisasContext *ctx)
5015 {
5016     /* Always fails locking the cache */
5017     TCGv t0 = tcg_temp_new();
5018     gen_load_spr(t0, SPR_Exxx_L1CSR0);
5019     tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5020     gen_store_spr(SPR_Exxx_L1CSR0, t0);
5021 }
5022 
5023 /* dcblc */
5024 static void gen_dcblc(DisasContext *ctx)
5025 {
5026     /*
5027      * interpreted as no-op
5028      */
5029 }
5030 
5031 /* dcbz */
5032 static void gen_dcbz(DisasContext *ctx)
5033 {
5034     TCGv tcgv_addr;
5035     TCGv_i32 tcgv_op;
5036 
5037     gen_set_access_type(ctx, ACCESS_CACHE);
5038     tcgv_addr = tcg_temp_new();
5039     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5040     gen_addr_reg_index(ctx, tcgv_addr);
5041     gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5042 }
5043 
5044 /* dcbzep */
5045 static void gen_dcbzep(DisasContext *ctx)
5046 {
5047     TCGv tcgv_addr;
5048     TCGv_i32 tcgv_op;
5049 
5050     gen_set_access_type(ctx, ACCESS_CACHE);
5051     tcgv_addr = tcg_temp_new();
5052     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5053     gen_addr_reg_index(ctx, tcgv_addr);
5054     gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5055 }
5056 
5057 /* dst / dstt */
5058 static void gen_dst(DisasContext *ctx)
5059 {
5060     if (rA(ctx->opcode) == 0) {
5061         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5062     } else {
5063         /* interpreted as no-op */
5064     }
5065 }
5066 
5067 /* dstst /dststt */
5068 static void gen_dstst(DisasContext *ctx)
5069 {
5070     if (rA(ctx->opcode) == 0) {
5071         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5072     } else {
5073         /* interpreted as no-op */
5074     }
5075 
5076 }
5077 
5078 /* dss / dssall */
5079 static void gen_dss(DisasContext *ctx)
5080 {
5081     /* interpreted as no-op */
5082 }
5083 
5084 /* icbi */
5085 static void gen_icbi(DisasContext *ctx)
5086 {
5087     TCGv t0;
5088     gen_set_access_type(ctx, ACCESS_CACHE);
5089     t0 = tcg_temp_new();
5090     gen_addr_reg_index(ctx, t0);
5091     gen_helper_icbi(cpu_env, t0);
5092 }
5093 
5094 /* icbiep */
5095 static void gen_icbiep(DisasContext *ctx)
5096 {
5097     TCGv t0;
5098     gen_set_access_type(ctx, ACCESS_CACHE);
5099     t0 = tcg_temp_new();
5100     gen_addr_reg_index(ctx, t0);
5101     gen_helper_icbiep(cpu_env, t0);
5102 }
5103 
5104 /* Optional: */
5105 /* dcba */
5106 static void gen_dcba(DisasContext *ctx)
5107 {
5108     /*
5109      * interpreted as no-op
5110      * XXX: specification say this is treated as a store by the MMU
5111      *      but does not generate any exception
5112      */
5113 }
5114 
5115 /***                    Segment register manipulation                      ***/
5116 /* Supervisor only: */
5117 
5118 /* mfsr */
5119 static void gen_mfsr(DisasContext *ctx)
5120 {
5121 #if defined(CONFIG_USER_ONLY)
5122     GEN_PRIV(ctx);
5123 #else
5124     TCGv t0;
5125 
5126     CHK_SV(ctx);
5127     t0 = tcg_constant_tl(SR(ctx->opcode));
5128     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5129 #endif /* defined(CONFIG_USER_ONLY) */
5130 }
5131 
5132 /* mfsrin */
5133 static void gen_mfsrin(DisasContext *ctx)
5134 {
5135 #if defined(CONFIG_USER_ONLY)
5136     GEN_PRIV(ctx);
5137 #else
5138     TCGv t0;
5139 
5140     CHK_SV(ctx);
5141     t0 = tcg_temp_new();
5142     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5143     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5144 #endif /* defined(CONFIG_USER_ONLY) */
5145 }
5146 
5147 /* mtsr */
5148 static void gen_mtsr(DisasContext *ctx)
5149 {
5150 #if defined(CONFIG_USER_ONLY)
5151     GEN_PRIV(ctx);
5152 #else
5153     TCGv t0;
5154 
5155     CHK_SV(ctx);
5156     t0 = tcg_constant_tl(SR(ctx->opcode));
5157     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5158 #endif /* defined(CONFIG_USER_ONLY) */
5159 }
5160 
5161 /* mtsrin */
5162 static void gen_mtsrin(DisasContext *ctx)
5163 {
5164 #if defined(CONFIG_USER_ONLY)
5165     GEN_PRIV(ctx);
5166 #else
5167     TCGv t0;
5168     CHK_SV(ctx);
5169 
5170     t0 = tcg_temp_new();
5171     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5172     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5173 #endif /* defined(CONFIG_USER_ONLY) */
5174 }
5175 
5176 #if defined(TARGET_PPC64)
5177 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5178 
5179 /* mfsr */
5180 static void gen_mfsr_64b(DisasContext *ctx)
5181 {
5182 #if defined(CONFIG_USER_ONLY)
5183     GEN_PRIV(ctx);
5184 #else
5185     TCGv t0;
5186 
5187     CHK_SV(ctx);
5188     t0 = tcg_constant_tl(SR(ctx->opcode));
5189     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5190 #endif /* defined(CONFIG_USER_ONLY) */
5191 }
5192 
5193 /* mfsrin */
5194 static void gen_mfsrin_64b(DisasContext *ctx)
5195 {
5196 #if defined(CONFIG_USER_ONLY)
5197     GEN_PRIV(ctx);
5198 #else
5199     TCGv t0;
5200 
5201     CHK_SV(ctx);
5202     t0 = tcg_temp_new();
5203     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5204     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5205 #endif /* defined(CONFIG_USER_ONLY) */
5206 }
5207 
5208 /* mtsr */
5209 static void gen_mtsr_64b(DisasContext *ctx)
5210 {
5211 #if defined(CONFIG_USER_ONLY)
5212     GEN_PRIV(ctx);
5213 #else
5214     TCGv t0;
5215 
5216     CHK_SV(ctx);
5217     t0 = tcg_constant_tl(SR(ctx->opcode));
5218     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5219 #endif /* defined(CONFIG_USER_ONLY) */
5220 }
5221 
5222 /* mtsrin */
5223 static void gen_mtsrin_64b(DisasContext *ctx)
5224 {
5225 #if defined(CONFIG_USER_ONLY)
5226     GEN_PRIV(ctx);
5227 #else
5228     TCGv t0;
5229 
5230     CHK_SV(ctx);
5231     t0 = tcg_temp_new();
5232     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5233     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5234 #endif /* defined(CONFIG_USER_ONLY) */
5235 }
5236 
5237 #endif /* defined(TARGET_PPC64) */
5238 
5239 /***                      Lookaside buffer management                      ***/
5240 /* Optional & supervisor only: */
5241 
5242 /* tlbia */
5243 static void gen_tlbia(DisasContext *ctx)
5244 {
5245 #if defined(CONFIG_USER_ONLY)
5246     GEN_PRIV(ctx);
5247 #else
5248     CHK_HV(ctx);
5249 
5250     gen_helper_tlbia(cpu_env);
5251 #endif  /* defined(CONFIG_USER_ONLY) */
5252 }
5253 
5254 /* tlbsync */
5255 static void gen_tlbsync(DisasContext *ctx)
5256 {
5257 #if defined(CONFIG_USER_ONLY)
5258     GEN_PRIV(ctx);
5259 #else
5260 
5261     if (ctx->gtse) {
5262         CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5263     } else {
5264         CHK_HV(ctx); /* Else hypervisor privileged */
5265     }
5266 
5267     /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5268     if (ctx->insns_flags & PPC_BOOKE) {
5269         gen_check_tlb_flush(ctx, true);
5270     }
5271 #endif /* defined(CONFIG_USER_ONLY) */
5272 }
5273 
5274 /***                              External control                         ***/
5275 /* Optional: */
5276 
5277 /* eciwx */
5278 static void gen_eciwx(DisasContext *ctx)
5279 {
5280     TCGv t0;
5281     /* Should check EAR[E] ! */
5282     gen_set_access_type(ctx, ACCESS_EXT);
5283     t0 = tcg_temp_new();
5284     gen_addr_reg_index(ctx, t0);
5285     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5286                        DEF_MEMOP(MO_UL | MO_ALIGN));
5287 }
5288 
5289 /* ecowx */
5290 static void gen_ecowx(DisasContext *ctx)
5291 {
5292     TCGv t0;
5293     /* Should check EAR[E] ! */
5294     gen_set_access_type(ctx, ACCESS_EXT);
5295     t0 = tcg_temp_new();
5296     gen_addr_reg_index(ctx, t0);
5297     tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5298                        DEF_MEMOP(MO_UL | MO_ALIGN));
5299 }
5300 
5301 /* 602 - 603 - G2 TLB management */
5302 
5303 /* tlbld */
5304 static void gen_tlbld_6xx(DisasContext *ctx)
5305 {
5306 #if defined(CONFIG_USER_ONLY)
5307     GEN_PRIV(ctx);
5308 #else
5309     CHK_SV(ctx);
5310     gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5311 #endif /* defined(CONFIG_USER_ONLY) */
5312 }
5313 
5314 /* tlbli */
5315 static void gen_tlbli_6xx(DisasContext *ctx)
5316 {
5317 #if defined(CONFIG_USER_ONLY)
5318     GEN_PRIV(ctx);
5319 #else
5320     CHK_SV(ctx);
5321     gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5322 #endif /* defined(CONFIG_USER_ONLY) */
5323 }
5324 
5325 /* BookE specific instructions */
5326 
5327 /* XXX: not implemented on 440 ? */
5328 static void gen_mfapidi(DisasContext *ctx)
5329 {
5330     /* XXX: TODO */
5331     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5332 }
5333 
5334 /* XXX: not implemented on 440 ? */
5335 static void gen_tlbiva(DisasContext *ctx)
5336 {
5337 #if defined(CONFIG_USER_ONLY)
5338     GEN_PRIV(ctx);
5339 #else
5340     TCGv t0;
5341 
5342     CHK_SV(ctx);
5343     t0 = tcg_temp_new();
5344     gen_addr_reg_index(ctx, t0);
5345     gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5346 #endif /* defined(CONFIG_USER_ONLY) */
5347 }
5348 
5349 /* All 405 MAC instructions are translated here */
5350 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5351                                         int ra, int rb, int rt, int Rc)
5352 {
5353     TCGv t0, t1;
5354 
5355     t0 = tcg_temp_new();
5356     t1 = tcg_temp_new();
5357 
5358     switch (opc3 & 0x0D) {
5359     case 0x05:
5360         /* macchw    - macchw.    - macchwo   - macchwo.   */
5361         /* macchws   - macchws.   - macchwso  - macchwso.  */
5362         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
5363         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
5364         /* mulchw - mulchw. */
5365         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5366         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5367         tcg_gen_ext16s_tl(t1, t1);
5368         break;
5369     case 0x04:
5370         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
5371         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
5372         /* mulchwu - mulchwu. */
5373         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5374         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5375         tcg_gen_ext16u_tl(t1, t1);
5376         break;
5377     case 0x01:
5378         /* machhw    - machhw.    - machhwo   - machhwo.   */
5379         /* machhws   - machhws.   - machhwso  - machhwso.  */
5380         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
5381         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
5382         /* mulhhw - mulhhw. */
5383         tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5384         tcg_gen_ext16s_tl(t0, t0);
5385         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5386         tcg_gen_ext16s_tl(t1, t1);
5387         break;
5388     case 0x00:
5389         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
5390         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
5391         /* mulhhwu - mulhhwu. */
5392         tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5393         tcg_gen_ext16u_tl(t0, t0);
5394         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5395         tcg_gen_ext16u_tl(t1, t1);
5396         break;
5397     case 0x0D:
5398         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
5399         /* maclhws   - maclhws.   - maclhwso  - maclhwso.  */
5400         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
5401         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
5402         /* mullhw - mullhw. */
5403         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5404         tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5405         break;
5406     case 0x0C:
5407         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
5408         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
5409         /* mullhwu - mullhwu. */
5410         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5411         tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5412         break;
5413     }
5414     if (opc2 & 0x04) {
5415         /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5416         tcg_gen_mul_tl(t1, t0, t1);
5417         if (opc2 & 0x02) {
5418             /* nmultiply-and-accumulate (0x0E) */
5419             tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5420         } else {
5421             /* multiply-and-accumulate (0x0C) */
5422             tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5423         }
5424 
5425         if (opc3 & 0x12) {
5426             /* Check overflow and/or saturate */
5427             TCGLabel *l1 = gen_new_label();
5428 
5429             if (opc3 & 0x10) {
5430                 /* Start with XER OV disabled, the most likely case */
5431                 tcg_gen_movi_tl(cpu_ov, 0);
5432             }
5433             if (opc3 & 0x01) {
5434                 /* Signed */
5435                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5436                 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5437                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5438                 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5439                 if (opc3 & 0x02) {
5440                     /* Saturate */
5441                     tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5442                     tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5443                 }
5444             } else {
5445                 /* Unsigned */
5446                 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5447                 if (opc3 & 0x02) {
5448                     /* Saturate */
5449                     tcg_gen_movi_tl(t0, UINT32_MAX);
5450                 }
5451             }
5452             if (opc3 & 0x10) {
5453                 /* Check overflow */
5454                 tcg_gen_movi_tl(cpu_ov, 1);
5455                 tcg_gen_movi_tl(cpu_so, 1);
5456             }
5457             gen_set_label(l1);
5458             tcg_gen_mov_tl(cpu_gpr[rt], t0);
5459         }
5460     } else {
5461         tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5462     }
5463     if (unlikely(Rc) != 0) {
5464         /* Update Rc0 */
5465         gen_set_Rc0(ctx, cpu_gpr[rt]);
5466     }
5467 }
5468 
5469 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
5470 static void glue(gen_, name)(DisasContext *ctx)                               \
5471 {                                                                             \
5472     gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode),   \
5473                          rD(ctx->opcode), Rc(ctx->opcode));                   \
5474 }
5475 
5476 /* macchw    - macchw.    */
5477 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5478 /* macchwo   - macchwo.   */
5479 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5480 /* macchws   - macchws.   */
5481 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5482 /* macchwso  - macchwso.  */
5483 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5484 /* macchwsu  - macchwsu.  */
5485 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5486 /* macchwsuo - macchwsuo. */
5487 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5488 /* macchwu   - macchwu.   */
5489 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5490 /* macchwuo  - macchwuo.  */
5491 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5492 /* machhw    - machhw.    */
5493 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5494 /* machhwo   - machhwo.   */
5495 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5496 /* machhws   - machhws.   */
5497 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5498 /* machhwso  - machhwso.  */
5499 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5500 /* machhwsu  - machhwsu.  */
5501 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5502 /* machhwsuo - machhwsuo. */
5503 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5504 /* machhwu   - machhwu.   */
5505 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5506 /* machhwuo  - machhwuo.  */
5507 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5508 /* maclhw    - maclhw.    */
5509 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5510 /* maclhwo   - maclhwo.   */
5511 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5512 /* maclhws   - maclhws.   */
5513 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5514 /* maclhwso  - maclhwso.  */
5515 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5516 /* maclhwu   - maclhwu.   */
5517 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5518 /* maclhwuo  - maclhwuo.  */
5519 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5520 /* maclhwsu  - maclhwsu.  */
5521 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5522 /* maclhwsuo - maclhwsuo. */
5523 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5524 /* nmacchw   - nmacchw.   */
5525 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5526 /* nmacchwo  - nmacchwo.  */
5527 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5528 /* nmacchws  - nmacchws.  */
5529 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5530 /* nmacchwso - nmacchwso. */
5531 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5532 /* nmachhw   - nmachhw.   */
5533 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5534 /* nmachhwo  - nmachhwo.  */
5535 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5536 /* nmachhws  - nmachhws.  */
5537 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5538 /* nmachhwso - nmachhwso. */
5539 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5540 /* nmaclhw   - nmaclhw.   */
5541 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5542 /* nmaclhwo  - nmaclhwo.  */
5543 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5544 /* nmaclhws  - nmaclhws.  */
5545 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5546 /* nmaclhwso - nmaclhwso. */
5547 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5548 
5549 /* mulchw  - mulchw.  */
5550 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5551 /* mulchwu - mulchwu. */
5552 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5553 /* mulhhw  - mulhhw.  */
5554 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5555 /* mulhhwu - mulhhwu. */
5556 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5557 /* mullhw  - mullhw.  */
5558 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5559 /* mullhwu - mullhwu. */
5560 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5561 
5562 /* mfdcr */
5563 static void gen_mfdcr(DisasContext *ctx)
5564 {
5565 #if defined(CONFIG_USER_ONLY)
5566     GEN_PRIV(ctx);
5567 #else
5568     TCGv dcrn;
5569 
5570     CHK_SV(ctx);
5571     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5572     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5573 #endif /* defined(CONFIG_USER_ONLY) */
5574 }
5575 
5576 /* mtdcr */
5577 static void gen_mtdcr(DisasContext *ctx)
5578 {
5579 #if defined(CONFIG_USER_ONLY)
5580     GEN_PRIV(ctx);
5581 #else
5582     TCGv dcrn;
5583 
5584     CHK_SV(ctx);
5585     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5586     gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5587 #endif /* defined(CONFIG_USER_ONLY) */
5588 }
5589 
5590 /* mfdcrx */
5591 /* XXX: not implemented on 440 ? */
5592 static void gen_mfdcrx(DisasContext *ctx)
5593 {
5594 #if defined(CONFIG_USER_ONLY)
5595     GEN_PRIV(ctx);
5596 #else
5597     CHK_SV(ctx);
5598     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5599                         cpu_gpr[rA(ctx->opcode)]);
5600     /* Note: Rc update flag set leads to undefined state of Rc0 */
5601 #endif /* defined(CONFIG_USER_ONLY) */
5602 }
5603 
5604 /* mtdcrx */
5605 /* XXX: not implemented on 440 ? */
5606 static void gen_mtdcrx(DisasContext *ctx)
5607 {
5608 #if defined(CONFIG_USER_ONLY)
5609     GEN_PRIV(ctx);
5610 #else
5611     CHK_SV(ctx);
5612     gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5613                          cpu_gpr[rS(ctx->opcode)]);
5614     /* Note: Rc update flag set leads to undefined state of Rc0 */
5615 #endif /* defined(CONFIG_USER_ONLY) */
5616 }
5617 
5618 /* dccci */
5619 static void gen_dccci(DisasContext *ctx)
5620 {
5621     CHK_SV(ctx);
5622     /* interpreted as no-op */
5623 }
5624 
5625 /* dcread */
5626 static void gen_dcread(DisasContext *ctx)
5627 {
5628 #if defined(CONFIG_USER_ONLY)
5629     GEN_PRIV(ctx);
5630 #else
5631     TCGv EA, val;
5632 
5633     CHK_SV(ctx);
5634     gen_set_access_type(ctx, ACCESS_CACHE);
5635     EA = tcg_temp_new();
5636     gen_addr_reg_index(ctx, EA);
5637     val = tcg_temp_new();
5638     gen_qemu_ld32u(ctx, val, EA);
5639     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5640 #endif /* defined(CONFIG_USER_ONLY) */
5641 }
5642 
5643 /* icbt */
5644 static void gen_icbt_40x(DisasContext *ctx)
5645 {
5646     /*
5647      * interpreted as no-op
5648      * XXX: specification say this is treated as a load by the MMU but
5649      *      does not generate any exception
5650      */
5651 }
5652 
5653 /* iccci */
5654 static void gen_iccci(DisasContext *ctx)
5655 {
5656     CHK_SV(ctx);
5657     /* interpreted as no-op */
5658 }
5659 
5660 /* icread */
5661 static void gen_icread(DisasContext *ctx)
5662 {
5663     CHK_SV(ctx);
5664     /* interpreted as no-op */
5665 }
5666 
5667 /* rfci (supervisor only) */
5668 static void gen_rfci_40x(DisasContext *ctx)
5669 {
5670 #if defined(CONFIG_USER_ONLY)
5671     GEN_PRIV(ctx);
5672 #else
5673     CHK_SV(ctx);
5674     /* Restore CPU state */
5675     gen_helper_40x_rfci(cpu_env);
5676     ctx->base.is_jmp = DISAS_EXIT;
5677 #endif /* defined(CONFIG_USER_ONLY) */
5678 }
5679 
5680 static void gen_rfci(DisasContext *ctx)
5681 {
5682 #if defined(CONFIG_USER_ONLY)
5683     GEN_PRIV(ctx);
5684 #else
5685     CHK_SV(ctx);
5686     /* Restore CPU state */
5687     gen_helper_rfci(cpu_env);
5688     ctx->base.is_jmp = DISAS_EXIT;
5689 #endif /* defined(CONFIG_USER_ONLY) */
5690 }
5691 
5692 /* BookE specific */
5693 
5694 /* XXX: not implemented on 440 ? */
5695 static void gen_rfdi(DisasContext *ctx)
5696 {
5697 #if defined(CONFIG_USER_ONLY)
5698     GEN_PRIV(ctx);
5699 #else
5700     CHK_SV(ctx);
5701     /* Restore CPU state */
5702     gen_helper_rfdi(cpu_env);
5703     ctx->base.is_jmp = DISAS_EXIT;
5704 #endif /* defined(CONFIG_USER_ONLY) */
5705 }
5706 
5707 /* XXX: not implemented on 440 ? */
5708 static void gen_rfmci(DisasContext *ctx)
5709 {
5710 #if defined(CONFIG_USER_ONLY)
5711     GEN_PRIV(ctx);
5712 #else
5713     CHK_SV(ctx);
5714     /* Restore CPU state */
5715     gen_helper_rfmci(cpu_env);
5716     ctx->base.is_jmp = DISAS_EXIT;
5717 #endif /* defined(CONFIG_USER_ONLY) */
5718 }
5719 
5720 /* TLB management - PowerPC 405 implementation */
5721 
5722 /* tlbre */
5723 static void gen_tlbre_40x(DisasContext *ctx)
5724 {
5725 #if defined(CONFIG_USER_ONLY)
5726     GEN_PRIV(ctx);
5727 #else
5728     CHK_SV(ctx);
5729     switch (rB(ctx->opcode)) {
5730     case 0:
5731         gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5732                                 cpu_gpr[rA(ctx->opcode)]);
5733         break;
5734     case 1:
5735         gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5736                                 cpu_gpr[rA(ctx->opcode)]);
5737         break;
5738     default:
5739         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5740         break;
5741     }
5742 #endif /* defined(CONFIG_USER_ONLY) */
5743 }
5744 
5745 /* tlbsx - tlbsx. */
5746 static void gen_tlbsx_40x(DisasContext *ctx)
5747 {
5748 #if defined(CONFIG_USER_ONLY)
5749     GEN_PRIV(ctx);
5750 #else
5751     TCGv t0;
5752 
5753     CHK_SV(ctx);
5754     t0 = tcg_temp_new();
5755     gen_addr_reg_index(ctx, t0);
5756     gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5757     if (Rc(ctx->opcode)) {
5758         TCGLabel *l1 = gen_new_label();
5759         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5760         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5761         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5762         gen_set_label(l1);
5763     }
5764 #endif /* defined(CONFIG_USER_ONLY) */
5765 }
5766 
5767 /* tlbwe */
5768 static void gen_tlbwe_40x(DisasContext *ctx)
5769 {
5770 #if defined(CONFIG_USER_ONLY)
5771     GEN_PRIV(ctx);
5772 #else
5773     CHK_SV(ctx);
5774 
5775     switch (rB(ctx->opcode)) {
5776     case 0:
5777         gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5778                                 cpu_gpr[rS(ctx->opcode)]);
5779         break;
5780     case 1:
5781         gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5782                                 cpu_gpr[rS(ctx->opcode)]);
5783         break;
5784     default:
5785         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5786         break;
5787     }
5788 #endif /* defined(CONFIG_USER_ONLY) */
5789 }
5790 
5791 /* TLB management - PowerPC 440 implementation */
5792 
5793 /* tlbre */
5794 static void gen_tlbre_440(DisasContext *ctx)
5795 {
5796 #if defined(CONFIG_USER_ONLY)
5797     GEN_PRIV(ctx);
5798 #else
5799     CHK_SV(ctx);
5800 
5801     switch (rB(ctx->opcode)) {
5802     case 0:
5803     case 1:
5804     case 2:
5805         {
5806             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5807             gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
5808                                  t0, cpu_gpr[rA(ctx->opcode)]);
5809         }
5810         break;
5811     default:
5812         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5813         break;
5814     }
5815 #endif /* defined(CONFIG_USER_ONLY) */
5816 }
5817 
5818 /* tlbsx - tlbsx. */
5819 static void gen_tlbsx_440(DisasContext *ctx)
5820 {
5821 #if defined(CONFIG_USER_ONLY)
5822     GEN_PRIV(ctx);
5823 #else
5824     TCGv t0;
5825 
5826     CHK_SV(ctx);
5827     t0 = tcg_temp_new();
5828     gen_addr_reg_index(ctx, t0);
5829     gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5830     if (Rc(ctx->opcode)) {
5831         TCGLabel *l1 = gen_new_label();
5832         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5833         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5834         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5835         gen_set_label(l1);
5836     }
5837 #endif /* defined(CONFIG_USER_ONLY) */
5838 }
5839 
5840 /* tlbwe */
5841 static void gen_tlbwe_440(DisasContext *ctx)
5842 {
5843 #if defined(CONFIG_USER_ONLY)
5844     GEN_PRIV(ctx);
5845 #else
5846     CHK_SV(ctx);
5847     switch (rB(ctx->opcode)) {
5848     case 0:
5849     case 1:
5850     case 2:
5851         {
5852             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5853             gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
5854                                  cpu_gpr[rS(ctx->opcode)]);
5855         }
5856         break;
5857     default:
5858         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5859         break;
5860     }
5861 #endif /* defined(CONFIG_USER_ONLY) */
5862 }
5863 
5864 /* TLB management - PowerPC BookE 2.06 implementation */
5865 
5866 /* tlbre */
5867 static void gen_tlbre_booke206(DisasContext *ctx)
5868 {
5869  #if defined(CONFIG_USER_ONLY)
5870     GEN_PRIV(ctx);
5871 #else
5872    CHK_SV(ctx);
5873     gen_helper_booke206_tlbre(cpu_env);
5874 #endif /* defined(CONFIG_USER_ONLY) */
5875 }
5876 
5877 /* tlbsx - tlbsx. */
5878 static void gen_tlbsx_booke206(DisasContext *ctx)
5879 {
5880 #if defined(CONFIG_USER_ONLY)
5881     GEN_PRIV(ctx);
5882 #else
5883     TCGv t0;
5884 
5885     CHK_SV(ctx);
5886     if (rA(ctx->opcode)) {
5887         t0 = tcg_temp_new();
5888         tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5889     } else {
5890         t0 = cpu_gpr[rB(ctx->opcode)];
5891     }
5892     gen_helper_booke206_tlbsx(cpu_env, t0);
5893 #endif /* defined(CONFIG_USER_ONLY) */
5894 }
5895 
5896 /* tlbwe */
5897 static void gen_tlbwe_booke206(DisasContext *ctx)
5898 {
5899 #if defined(CONFIG_USER_ONLY)
5900     GEN_PRIV(ctx);
5901 #else
5902     CHK_SV(ctx);
5903     gen_helper_booke206_tlbwe(cpu_env);
5904 #endif /* defined(CONFIG_USER_ONLY) */
5905 }
5906 
5907 static void gen_tlbivax_booke206(DisasContext *ctx)
5908 {
5909 #if defined(CONFIG_USER_ONLY)
5910     GEN_PRIV(ctx);
5911 #else
5912     TCGv t0;
5913 
5914     CHK_SV(ctx);
5915     t0 = tcg_temp_new();
5916     gen_addr_reg_index(ctx, t0);
5917     gen_helper_booke206_tlbivax(cpu_env, t0);
5918 #endif /* defined(CONFIG_USER_ONLY) */
5919 }
5920 
5921 static void gen_tlbilx_booke206(DisasContext *ctx)
5922 {
5923 #if defined(CONFIG_USER_ONLY)
5924     GEN_PRIV(ctx);
5925 #else
5926     TCGv t0;
5927 
5928     CHK_SV(ctx);
5929     t0 = tcg_temp_new();
5930     gen_addr_reg_index(ctx, t0);
5931 
5932     switch ((ctx->opcode >> 21) & 0x3) {
5933     case 0:
5934         gen_helper_booke206_tlbilx0(cpu_env, t0);
5935         break;
5936     case 1:
5937         gen_helper_booke206_tlbilx1(cpu_env, t0);
5938         break;
5939     case 3:
5940         gen_helper_booke206_tlbilx3(cpu_env, t0);
5941         break;
5942     default:
5943         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5944         break;
5945     }
5946 #endif /* defined(CONFIG_USER_ONLY) */
5947 }
5948 
5949 /* wrtee */
5950 static void gen_wrtee(DisasContext *ctx)
5951 {
5952 #if defined(CONFIG_USER_ONLY)
5953     GEN_PRIV(ctx);
5954 #else
5955     TCGv t0;
5956 
5957     CHK_SV(ctx);
5958     t0 = tcg_temp_new();
5959     tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
5960     tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5961     tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
5962     gen_ppc_maybe_interrupt(ctx);
5963     /*
5964      * Stop translation to have a chance to raise an exception if we
5965      * just set msr_ee to 1
5966      */
5967     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5968 #endif /* defined(CONFIG_USER_ONLY) */
5969 }
5970 
5971 /* wrteei */
5972 static void gen_wrteei(DisasContext *ctx)
5973 {
5974 #if defined(CONFIG_USER_ONLY)
5975     GEN_PRIV(ctx);
5976 #else
5977     CHK_SV(ctx);
5978     if (ctx->opcode & 0x00008000) {
5979         tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
5980         gen_ppc_maybe_interrupt(ctx);
5981         /* Stop translation to have a chance to raise an exception */
5982         ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5983     } else {
5984         tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
5985     }
5986 #endif /* defined(CONFIG_USER_ONLY) */
5987 }
5988 
5989 /* PowerPC 440 specific instructions */
5990 
5991 /* dlmzb */
5992 static void gen_dlmzb(DisasContext *ctx)
5993 {
5994     TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
5995     gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
5996                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
5997 }
5998 
5999 /* mbar replaces eieio on 440 */
6000 static void gen_mbar(DisasContext *ctx)
6001 {
6002     /* interpreted as no-op */
6003 }
6004 
6005 /* msync replaces sync on 440 */
6006 static void gen_msync_4xx(DisasContext *ctx)
6007 {
6008     /* Only e500 seems to treat reserved bits as invalid */
6009     if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6010         (ctx->opcode & 0x03FFF801)) {
6011         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6012     }
6013     /* otherwise interpreted as no-op */
6014 }
6015 
6016 /* icbt */
6017 static void gen_icbt_440(DisasContext *ctx)
6018 {
6019     /*
6020      * interpreted as no-op
6021      * XXX: specification say this is treated as a load by the MMU but
6022      *      does not generate any exception
6023      */
6024 }
6025 
6026 #if defined(TARGET_PPC64)
6027 static void gen_maddld(DisasContext *ctx)
6028 {
6029     TCGv_i64 t1 = tcg_temp_new_i64();
6030 
6031     tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6032     tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6033 }
6034 
6035 /* maddhd maddhdu */
6036 static void gen_maddhd_maddhdu(DisasContext *ctx)
6037 {
6038     TCGv_i64 lo = tcg_temp_new_i64();
6039     TCGv_i64 hi = tcg_temp_new_i64();
6040     TCGv_i64 t1 = tcg_temp_new_i64();
6041 
6042     if (Rc(ctx->opcode)) {
6043         tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6044                           cpu_gpr[rB(ctx->opcode)]);
6045         tcg_gen_movi_i64(t1, 0);
6046     } else {
6047         tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6048                           cpu_gpr[rB(ctx->opcode)]);
6049         tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6050     }
6051     tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6052                      cpu_gpr[rC(ctx->opcode)], t1);
6053 }
6054 #endif /* defined(TARGET_PPC64) */
6055 
6056 static void gen_tbegin(DisasContext *ctx)
6057 {
6058     if (unlikely(!ctx->tm_enabled)) {
6059         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6060         return;
6061     }
6062     gen_helper_tbegin(cpu_env);
6063 }
6064 
6065 #define GEN_TM_NOOP(name)                                      \
6066 static inline void gen_##name(DisasContext *ctx)               \
6067 {                                                              \
6068     if (unlikely(!ctx->tm_enabled)) {                          \
6069         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6070         return;                                                \
6071     }                                                          \
6072     /*                                                         \
6073      * Because tbegin always fails in QEMU, these user         \
6074      * space instructions all have a simple implementation:    \
6075      *                                                         \
6076      *     CR[0] = 0b0 || MSR[TS] || 0b0                       \
6077      *           = 0b0 || 0b00    || 0b0                       \
6078      */                                                        \
6079     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6080 }
6081 
6082 GEN_TM_NOOP(tend);
6083 GEN_TM_NOOP(tabort);
6084 GEN_TM_NOOP(tabortwc);
6085 GEN_TM_NOOP(tabortwci);
6086 GEN_TM_NOOP(tabortdc);
6087 GEN_TM_NOOP(tabortdci);
6088 GEN_TM_NOOP(tsr);
6089 
6090 static inline void gen_cp_abort(DisasContext *ctx)
6091 {
6092     /* Do Nothing */
6093 }
6094 
6095 #define GEN_CP_PASTE_NOOP(name)                           \
6096 static inline void gen_##name(DisasContext *ctx)          \
6097 {                                                         \
6098     /*                                                    \
6099      * Generate invalid exception until we have an        \
6100      * implementation of the copy paste facility          \
6101      */                                                   \
6102     gen_invalid(ctx);                                     \
6103 }
6104 
6105 GEN_CP_PASTE_NOOP(copy)
6106 GEN_CP_PASTE_NOOP(paste)
6107 
6108 static void gen_tcheck(DisasContext *ctx)
6109 {
6110     if (unlikely(!ctx->tm_enabled)) {
6111         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6112         return;
6113     }
6114     /*
6115      * Because tbegin always fails, the tcheck implementation is
6116      * simple:
6117      *
6118      * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6119      *         = 0b1 || 0b00 || 0b0
6120      */
6121     tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6122 }
6123 
6124 #if defined(CONFIG_USER_ONLY)
6125 #define GEN_TM_PRIV_NOOP(name)                                 \
6126 static inline void gen_##name(DisasContext *ctx)               \
6127 {                                                              \
6128     gen_priv_opc(ctx);                                         \
6129 }
6130 
6131 #else
6132 
6133 #define GEN_TM_PRIV_NOOP(name)                                 \
6134 static inline void gen_##name(DisasContext *ctx)               \
6135 {                                                              \
6136     CHK_SV(ctx);                                               \
6137     if (unlikely(!ctx->tm_enabled)) {                          \
6138         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6139         return;                                                \
6140     }                                                          \
6141     /*                                                         \
6142      * Because tbegin always fails, the implementation is      \
6143      * simple:                                                 \
6144      *                                                         \
6145      *   CR[0] = 0b0 || MSR[TS] || 0b0                         \
6146      *         = 0b0 || 0b00 | 0b0                             \
6147      */                                                        \
6148     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6149 }
6150 
6151 #endif
6152 
6153 GEN_TM_PRIV_NOOP(treclaim);
6154 GEN_TM_PRIV_NOOP(trechkpt);
6155 
6156 static inline void get_fpr(TCGv_i64 dst, int regno)
6157 {
6158     tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6159 }
6160 
6161 static inline void set_fpr(int regno, TCGv_i64 src)
6162 {
6163     tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6164     /*
6165      * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6166      * corresponding to the target FPR was undefined. However,
6167      * most (if not all) real hardware were setting the result to 0.
6168      * Starting at ISA v3.1, the result for doubleword 1 is now defined
6169      * to be 0.
6170      */
6171     tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6172 }
6173 
6174 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6175 {
6176     tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6177 }
6178 
6179 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6180 {
6181     tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6182 }
6183 
6184 /*
6185  * Helpers for decodetree used by !function for decoding arguments.
6186  */
6187 static int times_2(DisasContext *ctx, int x)
6188 {
6189     return x * 2;
6190 }
6191 
6192 static int times_4(DisasContext *ctx, int x)
6193 {
6194     return x * 4;
6195 }
6196 
6197 static int times_16(DisasContext *ctx, int x)
6198 {
6199     return x * 16;
6200 }
6201 
6202 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6203 {
6204     return deposit64(0xfffffffffffffe00, 3, 6, x);
6205 }
6206 
6207 /*
6208  * Helpers for trans_* functions to check for specific insns flags.
6209  * Use token pasting to ensure that we use the proper flag with the
6210  * proper variable.
6211  */
6212 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6213     do {                                                \
6214         if (((CTX)->insns_flags & PPC_##NAME) == 0) {   \
6215             return false;                               \
6216         }                                               \
6217     } while (0)
6218 
6219 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6220     do {                                                \
6221         if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6222             return false;                               \
6223         }                                               \
6224     } while (0)
6225 
6226 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6227 #if TARGET_LONG_BITS == 32
6228 # define REQUIRE_64BIT(CTX)  return false
6229 #else
6230 # define REQUIRE_64BIT(CTX)  REQUIRE_INSNS_FLAGS(CTX, 64B)
6231 #endif
6232 
6233 #define REQUIRE_VECTOR(CTX)                             \
6234     do {                                                \
6235         if (unlikely(!(CTX)->altivec_enabled)) {        \
6236             gen_exception((CTX), POWERPC_EXCP_VPU);     \
6237             return true;                                \
6238         }                                               \
6239     } while (0)
6240 
6241 #define REQUIRE_VSX(CTX)                                \
6242     do {                                                \
6243         if (unlikely(!(CTX)->vsx_enabled)) {            \
6244             gen_exception((CTX), POWERPC_EXCP_VSXU);    \
6245             return true;                                \
6246         }                                               \
6247     } while (0)
6248 
6249 #define REQUIRE_FPU(ctx)                                \
6250     do {                                                \
6251         if (unlikely(!(ctx)->fpu_enabled)) {            \
6252             gen_exception((ctx), POWERPC_EXCP_FPU);     \
6253             return true;                                \
6254         }                                               \
6255     } while (0)
6256 
6257 #if !defined(CONFIG_USER_ONLY)
6258 #define REQUIRE_SV(CTX)             \
6259     do {                            \
6260         if (unlikely((CTX)->pr)) {  \
6261             gen_priv_opc(CTX);      \
6262             return true;            \
6263         }                           \
6264     } while (0)
6265 
6266 #define REQUIRE_HV(CTX)                             \
6267     do {                                            \
6268         if (unlikely((CTX)->pr || !(CTX)->hv)) {    \
6269             gen_priv_opc(CTX);                      \
6270             return true;                            \
6271         }                                           \
6272     } while (0)
6273 #else
6274 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6275 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6276 #endif
6277 
6278 /*
6279  * Helpers for implementing sets of trans_* functions.
6280  * Defer the implementation of NAME to FUNC, with optional extra arguments.
6281  */
6282 #define TRANS(NAME, FUNC, ...) \
6283     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6284     { return FUNC(ctx, a, __VA_ARGS__); }
6285 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6286     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6287     {                                                          \
6288         REQUIRE_INSNS_FLAGS(ctx, FLAGS);                       \
6289         return FUNC(ctx, a, __VA_ARGS__);                      \
6290     }
6291 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6292     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6293     {                                                          \
6294         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6295         return FUNC(ctx, a, __VA_ARGS__);                      \
6296     }
6297 
6298 #define TRANS64(NAME, FUNC, ...) \
6299     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6300     { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6301 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6302     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6303     {                                                          \
6304         REQUIRE_64BIT(ctx);                                    \
6305         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6306         return FUNC(ctx, a, __VA_ARGS__);                      \
6307     }
6308 
6309 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6310 
6311 
6312 #include "decode-insn32.c.inc"
6313 #include "decode-insn64.c.inc"
6314 #include "power8-pmu-regs.c.inc"
6315 
6316 /*
6317  * Incorporate CIA into the constant when R=1.
6318  * Validate that when R=1, RA=0.
6319  */
6320 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6321 {
6322     d->rt = a->rt;
6323     d->ra = a->ra;
6324     d->si = a->si;
6325     if (a->r) {
6326         if (unlikely(a->ra != 0)) {
6327             gen_invalid(ctx);
6328             return false;
6329         }
6330         d->si += ctx->cia;
6331     }
6332     return true;
6333 }
6334 
6335 #include "translate/fixedpoint-impl.c.inc"
6336 
6337 #include "translate/fp-impl.c.inc"
6338 
6339 #include "translate/vmx-impl.c.inc"
6340 
6341 #include "translate/vsx-impl.c.inc"
6342 
6343 #include "translate/dfp-impl.c.inc"
6344 
6345 #include "translate/spe-impl.c.inc"
6346 
6347 #include "translate/branch-impl.c.inc"
6348 
6349 #include "translate/processor-ctrl-impl.c.inc"
6350 
6351 #include "translate/storage-ctrl-impl.c.inc"
6352 
6353 /* Handles lfdp */
6354 static void gen_dform39(DisasContext *ctx)
6355 {
6356     if ((ctx->opcode & 0x3) == 0) {
6357         if (ctx->insns_flags2 & PPC2_ISA205) {
6358             return gen_lfdp(ctx);
6359         }
6360     }
6361     return gen_invalid(ctx);
6362 }
6363 
6364 /* Handles stfdp */
6365 static void gen_dform3D(DisasContext *ctx)
6366 {
6367     if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6368         /* stfdp */
6369         if (ctx->insns_flags2 & PPC2_ISA205) {
6370             return gen_stfdp(ctx);
6371         }
6372     }
6373     return gen_invalid(ctx);
6374 }
6375 
6376 #if defined(TARGET_PPC64)
6377 /* brd */
6378 static void gen_brd(DisasContext *ctx)
6379 {
6380     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6381 }
6382 
6383 /* brw */
6384 static void gen_brw(DisasContext *ctx)
6385 {
6386     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6387     tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6388 
6389 }
6390 
6391 /* brh */
6392 static void gen_brh(DisasContext *ctx)
6393 {
6394     TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6395     TCGv_i64 t1 = tcg_temp_new_i64();
6396     TCGv_i64 t2 = tcg_temp_new_i64();
6397 
6398     tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6399     tcg_gen_and_i64(t2, t1, mask);
6400     tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6401     tcg_gen_shli_i64(t1, t1, 8);
6402     tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6403 }
6404 #endif
6405 
6406 static opcode_t opcodes[] = {
6407 #if defined(TARGET_PPC64)
6408 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6409 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6410 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6411 #endif
6412 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6413 #if defined(TARGET_PPC64)
6414 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6415 #endif
6416 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6417 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6418 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6419 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6420 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6421 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6422 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6423 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6424 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6425 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6426 #if defined(TARGET_PPC64)
6427 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6428 #endif
6429 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6430 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6431 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6432 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6433 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6434 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6435 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6436 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6437 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6438 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6439 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6440 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6441 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6442 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6443 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6444 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6445 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6446 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6447 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6448 #if defined(TARGET_PPC64)
6449 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6450 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6451 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6452 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6453 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6454 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6455 #endif
6456 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6457 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6458 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6459 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6460 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6461 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6462 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6463 #if defined(TARGET_PPC64)
6464 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6465 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6466 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6467 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6468 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6469 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6470                PPC_NONE, PPC2_ISA300),
6471 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6472                PPC_NONE, PPC2_ISA300),
6473 #endif
6474 /* handles lfdp, lxsd, lxssp */
6475 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6476 /* handles stfdp, stxsd, stxssp */
6477 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6478 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6479 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6480 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6481 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6482 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6483 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6484 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6485 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6486 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6487 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6488 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6489 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6490 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6491 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6492 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6493 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6494 #if defined(TARGET_PPC64)
6495 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6496 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6497 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6498 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6499 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6500 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6501 #endif
6502 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6503 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6504 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6505 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6506 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6507 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6508 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6509 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6510 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6511 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6512 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6513 #if defined(TARGET_PPC64)
6514 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6515 #if !defined(CONFIG_USER_ONLY)
6516 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6517 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6518 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6519 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6520 #endif
6521 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6522 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6523 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6524 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6525 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6526 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6527 #endif
6528 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6529 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6530 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6531 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6532 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6533 #if defined(TARGET_PPC64)
6534 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6535 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6536 #endif
6537 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6538 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6539 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6540 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6541 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6542 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6543 #if defined(TARGET_PPC64)
6544 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6545 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6546 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6547 #endif
6548 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6549 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6550 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6551 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6552 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6553 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6554 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6555 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6556 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6557 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6558 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6559 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6560 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6561 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6562 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6563 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6564 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6565 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6566 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6567 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6568 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6569 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6570 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6571 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6572 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6573 #if defined(TARGET_PPC64)
6574 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6575 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6576              PPC_SEGMENT_64B),
6577 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6578 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6579              PPC_SEGMENT_64B),
6580 #endif
6581 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6582 /*
6583  * XXX Those instructions will need to be handled differently for
6584  * different ISA versions
6585  */
6586 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6587 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6588 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6589 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6590 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6591 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6592 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6593 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6594 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6595 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6596 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6597 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6598 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6599 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6600 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6601 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6602 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6603 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6604 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6605 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6606 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6607 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6608 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6609 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6610 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6611 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6612 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6613                PPC_NONE, PPC2_BOOKE206),
6614 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6615                PPC_NONE, PPC2_BOOKE206),
6616 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6617                PPC_NONE, PPC2_BOOKE206),
6618 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6619                PPC_NONE, PPC2_BOOKE206),
6620 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6621                PPC_NONE, PPC2_BOOKE206),
6622 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6623 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6624 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6625 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6626               PPC_BOOKE, PPC2_BOOKE206),
6627 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6628 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6629                PPC_BOOKE, PPC2_BOOKE206),
6630 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6631              PPC_440_SPEC),
6632 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6633 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6634 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6635 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6636 #if defined(TARGET_PPC64)
6637 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6638               PPC2_ISA300),
6639 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6640 #endif
6641 
6642 #undef GEN_INT_ARITH_ADD
6643 #undef GEN_INT_ARITH_ADD_CONST
6644 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
6645 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6646 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
6647                                 add_ca, compute_ca, compute_ov)               \
6648 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6649 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6650 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6651 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6652 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6653 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6654 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6655 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6656 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6657 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6658 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6659 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6660 
6661 #undef GEN_INT_ARITH_DIVW
6662 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
6663 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6664 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6665 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6666 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6667 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6668 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6669 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6670 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6671 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6672 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6673 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6674 
6675 #if defined(TARGET_PPC64)
6676 #undef GEN_INT_ARITH_DIVD
6677 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
6678 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6679 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6680 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6681 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6682 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6683 
6684 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6685 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6686 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6687 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6688 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6689 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6690 
6691 #undef GEN_INT_ARITH_MUL_HELPER
6692 #define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
6693 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6694 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6695 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6696 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6697 #endif
6698 
6699 #undef GEN_INT_ARITH_SUBF
6700 #undef GEN_INT_ARITH_SUBF_CONST
6701 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
6702 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6703 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
6704                                 add_ca, compute_ca, compute_ov)               \
6705 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6706 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6707 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6708 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6709 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6710 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6711 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6712 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6713 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6714 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6715 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6716 
6717 #undef GEN_LOGICAL1
6718 #undef GEN_LOGICAL2
6719 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
6720 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6721 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
6722 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6723 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6724 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6725 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6726 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6727 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6728 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6729 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6730 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6731 #if defined(TARGET_PPC64)
6732 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6733 #endif
6734 
6735 #if defined(TARGET_PPC64)
6736 #undef GEN_PPC64_R2
6737 #undef GEN_PPC64_R4
6738 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
6739 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6740 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6741              PPC_64B)
6742 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
6743 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6744 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000,   \
6745              PPC_64B),                                                        \
6746 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6747              PPC_64B),                                                        \
6748 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
6749              PPC_64B)
6750 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6751 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6752 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6753 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6754 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6755 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6756 #endif
6757 
6758 #undef GEN_LDX_E
6759 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
6760 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6761 
6762 #if defined(TARGET_PPC64)
6763 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6764 
6765 /* HV/P7 and later only */
6766 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6767 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6768 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6769 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6770 #endif
6771 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6772 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6773 
6774 /* External PID based load */
6775 #undef GEN_LDEPX
6776 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
6777 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6778               0x00000001, PPC_NONE, PPC2_BOOKE206),
6779 
6780 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6781 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6782 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6783 #if defined(TARGET_PPC64)
6784 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6785 #endif
6786 
6787 #undef GEN_STX_E
6788 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
6789 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6790 
6791 #if defined(TARGET_PPC64)
6792 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6793 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6794 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6795 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6796 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6797 #endif
6798 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6799 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6800 
6801 #undef GEN_STEPX
6802 #define GEN_STEPX(name, ldop, opc2, opc3)                                     \
6803 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6804               0x00000001, PPC_NONE, PPC2_BOOKE206),
6805 
6806 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6807 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6808 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6809 #if defined(TARGET_PPC64)
6810 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6811 #endif
6812 
6813 #undef GEN_CRLOGIC
6814 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
6815 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6816 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6817 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6818 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6819 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6820 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6821 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6822 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6823 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6824 
6825 #undef GEN_MAC_HANDLER
6826 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
6827 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6828 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6829 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6830 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6831 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6832 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6833 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6834 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6835 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6836 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6837 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6838 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6839 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6840 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6841 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6842 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6843 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6844 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6845 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6846 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6847 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6848 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6849 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6850 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6851 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6852 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6853 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6854 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6855 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6856 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6857 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6858 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6859 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6860 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6861 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6862 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6863 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6864 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6865 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6866 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6867 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6868 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6869 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6870 
6871 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6872                PPC_NONE, PPC2_TM),
6873 GEN_HANDLER2_E(tend,   "tend",   0x1F, 0x0E, 0x15, 0x01FFF800, \
6874                PPC_NONE, PPC2_TM),
6875 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6876                PPC_NONE, PPC2_TM),
6877 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6878                PPC_NONE, PPC2_TM),
6879 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6880                PPC_NONE, PPC2_TM),
6881 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6882                PPC_NONE, PPC2_TM),
6883 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6884                PPC_NONE, PPC2_TM),
6885 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6886                PPC_NONE, PPC2_TM),
6887 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6888                PPC_NONE, PPC2_TM),
6889 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6890                PPC_NONE, PPC2_TM),
6891 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6892                PPC_NONE, PPC2_TM),
6893 
6894 #include "translate/fp-ops.c.inc"
6895 
6896 #include "translate/vmx-ops.c.inc"
6897 
6898 #include "translate/vsx-ops.c.inc"
6899 
6900 #include "translate/spe-ops.c.inc"
6901 };
6902 
6903 /*****************************************************************************/
6904 /* Opcode types */
6905 enum {
6906     PPC_DIRECT   = 0, /* Opcode routine        */
6907     PPC_INDIRECT = 1, /* Indirect opcode table */
6908 };
6909 
6910 #define PPC_OPCODE_MASK 0x3
6911 
6912 static inline int is_indirect_opcode(void *handler)
6913 {
6914     return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6915 }
6916 
6917 static inline opc_handler_t **ind_table(void *handler)
6918 {
6919     return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6920 }
6921 
6922 /* Instruction table creation */
6923 /* Opcodes tables creation */
6924 static void fill_new_table(opc_handler_t **table, int len)
6925 {
6926     int i;
6927 
6928     for (i = 0; i < len; i++) {
6929         table[i] = &invalid_handler;
6930     }
6931 }
6932 
6933 static int create_new_table(opc_handler_t **table, unsigned char idx)
6934 {
6935     opc_handler_t **tmp;
6936 
6937     tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
6938     fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
6939     table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
6940 
6941     return 0;
6942 }
6943 
6944 static int insert_in_table(opc_handler_t **table, unsigned char idx,
6945                             opc_handler_t *handler)
6946 {
6947     if (table[idx] != &invalid_handler) {
6948         return -1;
6949     }
6950     table[idx] = handler;
6951 
6952     return 0;
6953 }
6954 
6955 static int register_direct_insn(opc_handler_t **ppc_opcodes,
6956                                 unsigned char idx, opc_handler_t *handler)
6957 {
6958     if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
6959         printf("*** ERROR: opcode %02x already assigned in main "
6960                "opcode table\n", idx);
6961         return -1;
6962     }
6963 
6964     return 0;
6965 }
6966 
6967 static int register_ind_in_table(opc_handler_t **table,
6968                                  unsigned char idx1, unsigned char idx2,
6969                                  opc_handler_t *handler)
6970 {
6971     if (table[idx1] == &invalid_handler) {
6972         if (create_new_table(table, idx1) < 0) {
6973             printf("*** ERROR: unable to create indirect table "
6974                    "idx=%02x\n", idx1);
6975             return -1;
6976         }
6977     } else {
6978         if (!is_indirect_opcode(table[idx1])) {
6979             printf("*** ERROR: idx %02x already assigned to a direct "
6980                    "opcode\n", idx1);
6981             return -1;
6982         }
6983     }
6984     if (handler != NULL &&
6985         insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
6986         printf("*** ERROR: opcode %02x already assigned in "
6987                "opcode table %02x\n", idx2, idx1);
6988         return -1;
6989     }
6990 
6991     return 0;
6992 }
6993 
6994 static int register_ind_insn(opc_handler_t **ppc_opcodes,
6995                              unsigned char idx1, unsigned char idx2,
6996                              opc_handler_t *handler)
6997 {
6998     return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
6999 }
7000 
7001 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7002                                 unsigned char idx1, unsigned char idx2,
7003                                 unsigned char idx3, opc_handler_t *handler)
7004 {
7005     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7006         printf("*** ERROR: unable to join indirect table idx "
7007                "[%02x-%02x]\n", idx1, idx2);
7008         return -1;
7009     }
7010     if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7011                               handler) < 0) {
7012         printf("*** ERROR: unable to insert opcode "
7013                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7014         return -1;
7015     }
7016 
7017     return 0;
7018 }
7019 
7020 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7021                                  unsigned char idx1, unsigned char idx2,
7022                                  unsigned char idx3, unsigned char idx4,
7023                                  opc_handler_t *handler)
7024 {
7025     opc_handler_t **table;
7026 
7027     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7028         printf("*** ERROR: unable to join indirect table idx "
7029                "[%02x-%02x]\n", idx1, idx2);
7030         return -1;
7031     }
7032     table = ind_table(ppc_opcodes[idx1]);
7033     if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7034         printf("*** ERROR: unable to join 2nd-level indirect table idx "
7035                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7036         return -1;
7037     }
7038     table = ind_table(table[idx2]);
7039     if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7040         printf("*** ERROR: unable to insert opcode "
7041                "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7042         return -1;
7043     }
7044     return 0;
7045 }
7046 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7047 {
7048     if (insn->opc2 != 0xFF) {
7049         if (insn->opc3 != 0xFF) {
7050             if (insn->opc4 != 0xFF) {
7051                 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7052                                           insn->opc3, insn->opc4,
7053                                           &insn->handler) < 0) {
7054                     return -1;
7055                 }
7056             } else {
7057                 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7058                                          insn->opc3, &insn->handler) < 0) {
7059                     return -1;
7060                 }
7061             }
7062         } else {
7063             if (register_ind_insn(ppc_opcodes, insn->opc1,
7064                                   insn->opc2, &insn->handler) < 0) {
7065                 return -1;
7066             }
7067         }
7068     } else {
7069         if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7070             return -1;
7071         }
7072     }
7073 
7074     return 0;
7075 }
7076 
7077 static int test_opcode_table(opc_handler_t **table, int len)
7078 {
7079     int i, count, tmp;
7080 
7081     for (i = 0, count = 0; i < len; i++) {
7082         /* Consistency fixup */
7083         if (table[i] == NULL) {
7084             table[i] = &invalid_handler;
7085         }
7086         if (table[i] != &invalid_handler) {
7087             if (is_indirect_opcode(table[i])) {
7088                 tmp = test_opcode_table(ind_table(table[i]),
7089                     PPC_CPU_INDIRECT_OPCODES_LEN);
7090                 if (tmp == 0) {
7091                     free(table[i]);
7092                     table[i] = &invalid_handler;
7093                 } else {
7094                     count++;
7095                 }
7096             } else {
7097                 count++;
7098             }
7099         }
7100     }
7101 
7102     return count;
7103 }
7104 
7105 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7106 {
7107     if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7108         printf("*** WARNING: no opcode defined !\n");
7109     }
7110 }
7111 
7112 /*****************************************************************************/
7113 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7114 {
7115     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7116     opcode_t *opc;
7117 
7118     fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7119     for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7120         if (((opc->handler.type & pcc->insns_flags) != 0) ||
7121             ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7122             if (register_insn(cpu->opcodes, opc) < 0) {
7123                 error_setg(errp, "ERROR initializing PowerPC instruction "
7124                            "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7125                            opc->opc3);
7126                 return;
7127             }
7128         }
7129     }
7130     fix_opcode_tables(cpu->opcodes);
7131     fflush(stdout);
7132     fflush(stderr);
7133 }
7134 
7135 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7136 {
7137     opc_handler_t **table, **table_2;
7138     int i, j, k;
7139 
7140     for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7141         if (cpu->opcodes[i] == &invalid_handler) {
7142             continue;
7143         }
7144         if (is_indirect_opcode(cpu->opcodes[i])) {
7145             table = ind_table(cpu->opcodes[i]);
7146             for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7147                 if (table[j] == &invalid_handler) {
7148                     continue;
7149                 }
7150                 if (is_indirect_opcode(table[j])) {
7151                     table_2 = ind_table(table[j]);
7152                     for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7153                         if (table_2[k] != &invalid_handler &&
7154                             is_indirect_opcode(table_2[k])) {
7155                             g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7156                                                      ~PPC_INDIRECT));
7157                         }
7158                     }
7159                     g_free((opc_handler_t *)((uintptr_t)table[j] &
7160                                              ~PPC_INDIRECT));
7161                 }
7162             }
7163             g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7164                 ~PPC_INDIRECT));
7165         }
7166     }
7167 }
7168 
7169 int ppc_fixup_cpu(PowerPCCPU *cpu)
7170 {
7171     CPUPPCState *env = &cpu->env;
7172 
7173     /*
7174      * TCG doesn't (yet) emulate some groups of instructions that are
7175      * implemented on some otherwise supported CPUs (e.g. VSX and
7176      * decimal floating point instructions on POWER7).  We remove
7177      * unsupported instruction groups from the cpu state's instruction
7178      * masks and hope the guest can cope.  For at least the pseries
7179      * machine, the unavailability of these instructions can be
7180      * advertised to the guest via the device tree.
7181      */
7182     if ((env->insns_flags & ~PPC_TCG_INSNS)
7183         || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7184         warn_report("Disabling some instructions which are not "
7185                     "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7186                     env->insns_flags & ~PPC_TCG_INSNS,
7187                     env->insns_flags2 & ~PPC_TCG_INSNS2);
7188     }
7189     env->insns_flags &= PPC_TCG_INSNS;
7190     env->insns_flags2 &= PPC_TCG_INSNS2;
7191     return 0;
7192 }
7193 
7194 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7195 {
7196     opc_handler_t **table, *handler;
7197     uint32_t inval;
7198 
7199     ctx->opcode = insn;
7200 
7201     LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7202               insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7203               ctx->le_mode ? "little" : "big");
7204 
7205     table = cpu->opcodes;
7206     handler = table[opc1(insn)];
7207     if (is_indirect_opcode(handler)) {
7208         table = ind_table(handler);
7209         handler = table[opc2(insn)];
7210         if (is_indirect_opcode(handler)) {
7211             table = ind_table(handler);
7212             handler = table[opc3(insn)];
7213             if (is_indirect_opcode(handler)) {
7214                 table = ind_table(handler);
7215                 handler = table[opc4(insn)];
7216             }
7217         }
7218     }
7219 
7220     /* Is opcode *REALLY* valid ? */
7221     if (unlikely(handler->handler == &gen_invalid)) {
7222         qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7223                       "%02x - %02x - %02x - %02x (%08x) "
7224                       TARGET_FMT_lx "\n",
7225                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7226                       insn, ctx->cia);
7227         return false;
7228     }
7229 
7230     if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7231                  && Rc(insn))) {
7232         inval = handler->inval2;
7233     } else {
7234         inval = handler->inval1;
7235     }
7236 
7237     if (unlikely((insn & inval) != 0)) {
7238         qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7239                       "%02x - %02x - %02x - %02x (%08x) "
7240                       TARGET_FMT_lx "\n", insn & inval,
7241                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7242                       insn, ctx->cia);
7243         return false;
7244     }
7245 
7246     handler->handler(ctx);
7247     return true;
7248 }
7249 
7250 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7251 {
7252     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7253     CPUPPCState *env = cs->env_ptr;
7254     uint32_t hflags = ctx->base.tb->flags;
7255 
7256     ctx->spr_cb = env->spr_cb;
7257     ctx->pr = (hflags >> HFLAGS_PR) & 1;
7258     ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7259     ctx->dr = (hflags >> HFLAGS_DR) & 1;
7260     ctx->hv = (hflags >> HFLAGS_HV) & 1;
7261     ctx->insns_flags = env->insns_flags;
7262     ctx->insns_flags2 = env->insns_flags2;
7263     ctx->access_type = -1;
7264     ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7265     ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7266     ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7267     ctx->flags = env->flags;
7268 #if defined(TARGET_PPC64)
7269     ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7270     ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7271 #endif
7272     ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7273         || env->mmu_model & POWERPC_MMU_64;
7274 
7275     ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7276     ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7277     ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7278     ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7279     ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7280     ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7281     ctx->hr = (hflags >> HFLAGS_HR) & 1;
7282     ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7283     ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7284     ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7285     ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7286     ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7287 
7288     ctx->singlestep_enabled = 0;
7289     if ((hflags >> HFLAGS_SE) & 1) {
7290         ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7291         ctx->base.max_insns = 1;
7292     }
7293     if ((hflags >> HFLAGS_BE) & 1) {
7294         ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7295     }
7296 }
7297 
7298 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7299 {
7300 }
7301 
7302 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7303 {
7304     tcg_gen_insn_start(dcbase->pc_next);
7305 }
7306 
7307 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7308 {
7309     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7310     return opc1(insn) == 1;
7311 }
7312 
7313 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7314 {
7315     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7316     PowerPCCPU *cpu = POWERPC_CPU(cs);
7317     CPUPPCState *env = cs->env_ptr;
7318     target_ulong pc;
7319     uint32_t insn;
7320     bool ok;
7321 
7322     LOG_DISAS("----------------\n");
7323     LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7324               ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7325 
7326     ctx->cia = pc = ctx->base.pc_next;
7327     insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7328     ctx->base.pc_next = pc += 4;
7329 
7330     if (!is_prefix_insn(ctx, insn)) {
7331         ok = (decode_insn32(ctx, insn) ||
7332               decode_legacy(cpu, ctx, insn));
7333     } else if ((pc & 63) == 0) {
7334         /*
7335          * Power v3.1, section 1.9 Exceptions:
7336          * attempt to execute a prefixed instruction that crosses a
7337          * 64-byte address boundary (system alignment error).
7338          */
7339         gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7340         ok = true;
7341     } else {
7342         uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7343                                              need_byteswap(ctx));
7344         ctx->base.pc_next = pc += 4;
7345         ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7346     }
7347     if (!ok) {
7348         gen_invalid(ctx);
7349     }
7350 
7351     /* End the TB when crossing a page boundary. */
7352     if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7353         ctx->base.is_jmp = DISAS_TOO_MANY;
7354     }
7355 }
7356 
7357 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7358 {
7359     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7360     DisasJumpType is_jmp = ctx->base.is_jmp;
7361     target_ulong nip = ctx->base.pc_next;
7362 
7363     if (is_jmp == DISAS_NORETURN) {
7364         /* We have already exited the TB. */
7365         return;
7366     }
7367 
7368     /* Honor single stepping. */
7369     if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
7370         && (nip <= 0x100 || nip > 0xf00)) {
7371         switch (is_jmp) {
7372         case DISAS_TOO_MANY:
7373         case DISAS_EXIT_UPDATE:
7374         case DISAS_CHAIN_UPDATE:
7375             gen_update_nip(ctx, nip);
7376             break;
7377         case DISAS_EXIT:
7378         case DISAS_CHAIN:
7379             break;
7380         default:
7381             g_assert_not_reached();
7382         }
7383 
7384         gen_debug_exception(ctx);
7385         return;
7386     }
7387 
7388     switch (is_jmp) {
7389     case DISAS_TOO_MANY:
7390         if (use_goto_tb(ctx, nip)) {
7391             pmu_count_insns(ctx);
7392             tcg_gen_goto_tb(0);
7393             gen_update_nip(ctx, nip);
7394             tcg_gen_exit_tb(ctx->base.tb, 0);
7395             break;
7396         }
7397         /* fall through */
7398     case DISAS_CHAIN_UPDATE:
7399         gen_update_nip(ctx, nip);
7400         /* fall through */
7401     case DISAS_CHAIN:
7402         /*
7403          * tcg_gen_lookup_and_goto_ptr will exit the TB if
7404          * CF_NO_GOTO_PTR is set. Count insns now.
7405          */
7406         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7407             pmu_count_insns(ctx);
7408         }
7409 
7410         tcg_gen_lookup_and_goto_ptr();
7411         break;
7412 
7413     case DISAS_EXIT_UPDATE:
7414         gen_update_nip(ctx, nip);
7415         /* fall through */
7416     case DISAS_EXIT:
7417         pmu_count_insns(ctx);
7418         tcg_gen_exit_tb(NULL, 0);
7419         break;
7420 
7421     default:
7422         g_assert_not_reached();
7423     }
7424 }
7425 
7426 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7427                              CPUState *cs, FILE *logfile)
7428 {
7429     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7430     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7431 }
7432 
7433 static const TranslatorOps ppc_tr_ops = {
7434     .init_disas_context = ppc_tr_init_disas_context,
7435     .tb_start           = ppc_tr_tb_start,
7436     .insn_start         = ppc_tr_insn_start,
7437     .translate_insn     = ppc_tr_translate_insn,
7438     .tb_stop            = ppc_tr_tb_stop,
7439     .disas_log          = ppc_tr_disas_log,
7440 };
7441 
7442 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7443                            target_ulong pc, void *host_pc)
7444 {
7445     DisasContext ctx;
7446 
7447     translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7448 }
7449