xref: /openbmc/qemu/target/ppc/translate.c (revision a11e3a15)
1 /*
2  *  PowerPC emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (C) 2011 Freescale Semiconductor, Inc.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "exec/translator.h"
34 #include "exec/log.h"
35 #include "qemu/atomic128.h"
36 #include "spr_common.h"
37 #include "power8-pmu.h"
38 
39 #include "qemu/qemu-print.h"
40 #include "qapi/error.h"
41 
42 #define HELPER_H "helper.h"
43 #include "exec/helper-info.c.inc"
44 #undef  HELPER_H
45 
46 #define CPU_SINGLE_STEP 0x1
47 #define CPU_BRANCH_STEP 0x2
48 
49 /* Include definitions for instructions classes and implementations flags */
50 /* #define PPC_DEBUG_DISAS */
51 
52 #ifdef PPC_DEBUG_DISAS
53 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
54 #else
55 #  define LOG_DISAS(...) do { } while (0)
56 #endif
57 /*****************************************************************************/
58 /* Code translation helpers                                                  */
59 
60 /* global register indexes */
61 static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
62                           + 10 * 4 + 22 * 5 /* SPE GPRh */
63                           + 8 * 5           /* CRF */];
64 static TCGv cpu_gpr[32];
65 static TCGv cpu_gprh[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
75 static TCGv cpu_reserve;
76 static TCGv cpu_reserve_length;
77 static TCGv cpu_reserve_val;
78 static TCGv cpu_reserve_val2;
79 static TCGv cpu_fpscr;
80 static TCGv_i32 cpu_access_type;
81 
82 void ppc_translate_init(void)
83 {
84     int i;
85     char *p;
86     size_t cpu_reg_names_size;
87 
88     p = cpu_reg_names;
89     cpu_reg_names_size = sizeof(cpu_reg_names);
90 
91     for (i = 0; i < 8; i++) {
92         snprintf(p, cpu_reg_names_size, "crf%d", i);
93         cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
94                                             offsetof(CPUPPCState, crf[i]), p);
95         p += 5;
96         cpu_reg_names_size -= 5;
97     }
98 
99     for (i = 0; i < 32; i++) {
100         snprintf(p, cpu_reg_names_size, "r%d", i);
101         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
102                                         offsetof(CPUPPCState, gpr[i]), p);
103         p += (i < 10) ? 3 : 4;
104         cpu_reg_names_size -= (i < 10) ? 3 : 4;
105         snprintf(p, cpu_reg_names_size, "r%dH", i);
106         cpu_gprh[i] = tcg_global_mem_new(cpu_env,
107                                          offsetof(CPUPPCState, gprh[i]), p);
108         p += (i < 10) ? 4 : 5;
109         cpu_reg_names_size -= (i < 10) ? 4 : 5;
110     }
111 
112     cpu_nip = tcg_global_mem_new(cpu_env,
113                                  offsetof(CPUPPCState, nip), "nip");
114 
115     cpu_msr = tcg_global_mem_new(cpu_env,
116                                  offsetof(CPUPPCState, msr), "msr");
117 
118     cpu_ctr = tcg_global_mem_new(cpu_env,
119                                  offsetof(CPUPPCState, ctr), "ctr");
120 
121     cpu_lr = tcg_global_mem_new(cpu_env,
122                                 offsetof(CPUPPCState, lr), "lr");
123 
124 #if defined(TARGET_PPC64)
125     cpu_cfar = tcg_global_mem_new(cpu_env,
126                                   offsetof(CPUPPCState, cfar), "cfar");
127 #endif
128 
129     cpu_xer = tcg_global_mem_new(cpu_env,
130                                  offsetof(CPUPPCState, xer), "xer");
131     cpu_so = tcg_global_mem_new(cpu_env,
132                                 offsetof(CPUPPCState, so), "SO");
133     cpu_ov = tcg_global_mem_new(cpu_env,
134                                 offsetof(CPUPPCState, ov), "OV");
135     cpu_ca = tcg_global_mem_new(cpu_env,
136                                 offsetof(CPUPPCState, ca), "CA");
137     cpu_ov32 = tcg_global_mem_new(cpu_env,
138                                   offsetof(CPUPPCState, ov32), "OV32");
139     cpu_ca32 = tcg_global_mem_new(cpu_env,
140                                   offsetof(CPUPPCState, ca32), "CA32");
141 
142     cpu_reserve = tcg_global_mem_new(cpu_env,
143                                      offsetof(CPUPPCState, reserve_addr),
144                                      "reserve_addr");
145     cpu_reserve_length = tcg_global_mem_new(cpu_env,
146                                             offsetof(CPUPPCState,
147                                                      reserve_length),
148                                             "reserve_length");
149     cpu_reserve_val = tcg_global_mem_new(cpu_env,
150                                          offsetof(CPUPPCState, reserve_val),
151                                          "reserve_val");
152     cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
153                                           offsetof(CPUPPCState, reserve_val2),
154                                           "reserve_val2");
155 
156     cpu_fpscr = tcg_global_mem_new(cpu_env,
157                                    offsetof(CPUPPCState, fpscr), "fpscr");
158 
159     cpu_access_type = tcg_global_mem_new_i32(cpu_env,
160                                              offsetof(CPUPPCState, access_type),
161                                              "access_type");
162 }
163 
164 /* internal defines */
165 struct DisasContext {
166     DisasContextBase base;
167     target_ulong cia;  /* current instruction address */
168     uint32_t opcode;
169     /* Routine used to access memory */
170     bool pr, hv, dr, le_mode;
171     bool lazy_tlb_flush;
172     bool need_access_type;
173     int mem_idx;
174     int access_type;
175     /* Translation flags */
176     MemOp default_tcg_memop_mask;
177 #if defined(TARGET_PPC64)
178     bool sf_mode;
179     bool has_cfar;
180 #endif
181     bool fpu_enabled;
182     bool altivec_enabled;
183     bool vsx_enabled;
184     bool spe_enabled;
185     bool tm_enabled;
186     bool gtse;
187     bool hr;
188     bool mmcr0_pmcc0;
189     bool mmcr0_pmcc1;
190     bool mmcr0_pmcjce;
191     bool pmc_other;
192     bool pmu_insn_cnt;
193     ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
194     int singlestep_enabled;
195     uint32_t flags;
196     uint64_t insns_flags;
197     uint64_t insns_flags2;
198 };
199 
200 #define DISAS_EXIT         DISAS_TARGET_0  /* exit to main loop, pc updated */
201 #define DISAS_EXIT_UPDATE  DISAS_TARGET_1  /* exit to main loop, pc stale */
202 #define DISAS_CHAIN        DISAS_TARGET_2  /* lookup next tb, pc updated */
203 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3  /* lookup next tb, pc stale */
204 
205 /* Return true iff byteswap is needed in a scalar memop */
206 static inline bool need_byteswap(const DisasContext *ctx)
207 {
208 #if TARGET_BIG_ENDIAN
209      return ctx->le_mode;
210 #else
211      return !ctx->le_mode;
212 #endif
213 }
214 
215 /* True when active word size < size of target_long.  */
216 #ifdef TARGET_PPC64
217 # define NARROW_MODE(C)  (!(C)->sf_mode)
218 #else
219 # define NARROW_MODE(C)  0
220 #endif
221 
222 struct opc_handler_t {
223     /* invalid bits for instruction 1 (Rc(opcode) == 0) */
224     uint32_t inval1;
225     /* invalid bits for instruction 2 (Rc(opcode) == 1) */
226     uint32_t inval2;
227     /* instruction type */
228     uint64_t type;
229     /* extended instruction type */
230     uint64_t type2;
231     /* handler */
232     void (*handler)(DisasContext *ctx);
233 };
234 
235 static inline bool gen_serialize(DisasContext *ctx)
236 {
237     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
238         /* Restart with exclusive lock.  */
239         gen_helper_exit_atomic(cpu_env);
240         ctx->base.is_jmp = DISAS_NORETURN;
241         return false;
242     }
243     return true;
244 }
245 
246 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
247 static inline bool gen_serialize_core_lpar(DisasContext *ctx)
248 {
249     if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
250         return gen_serialize(ctx);
251     }
252 
253     return true;
254 }
255 #endif
256 
257 /* SPR load/store helpers */
258 static inline void gen_load_spr(TCGv t, int reg)
259 {
260     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
261 }
262 
263 static inline void gen_store_spr(int reg, TCGv t)
264 {
265     tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
266 }
267 
268 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
269 {
270     if (ctx->need_access_type && ctx->access_type != access_type) {
271         tcg_gen_movi_i32(cpu_access_type, access_type);
272         ctx->access_type = access_type;
273     }
274 }
275 
276 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
277 {
278     if (NARROW_MODE(ctx)) {
279         nip = (uint32_t)nip;
280     }
281     tcg_gen_movi_tl(cpu_nip, nip);
282 }
283 
284 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
285 {
286     TCGv_i32 t0, t1;
287 
288     /*
289      * These are all synchronous exceptions, we set the PC back to the
290      * faulting instruction
291      */
292     gen_update_nip(ctx, ctx->cia);
293     t0 = tcg_constant_i32(excp);
294     t1 = tcg_constant_i32(error);
295     gen_helper_raise_exception_err(cpu_env, t0, t1);
296     ctx->base.is_jmp = DISAS_NORETURN;
297 }
298 
299 static void gen_exception(DisasContext *ctx, uint32_t excp)
300 {
301     TCGv_i32 t0;
302 
303     /*
304      * These are all synchronous exceptions, we set the PC back to the
305      * faulting instruction
306      */
307     gen_update_nip(ctx, ctx->cia);
308     t0 = tcg_constant_i32(excp);
309     gen_helper_raise_exception(cpu_env, t0);
310     ctx->base.is_jmp = DISAS_NORETURN;
311 }
312 
313 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
314                               target_ulong nip)
315 {
316     TCGv_i32 t0;
317 
318     gen_update_nip(ctx, nip);
319     t0 = tcg_constant_i32(excp);
320     gen_helper_raise_exception(cpu_env, t0);
321     ctx->base.is_jmp = DISAS_NORETURN;
322 }
323 
324 #if !defined(CONFIG_USER_ONLY)
325 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
326 {
327     translator_io_start(&ctx->base);
328     gen_helper_ppc_maybe_interrupt(cpu_env);
329 }
330 #endif
331 
332 /*
333  * Tells the caller what is the appropriate exception to generate and prepares
334  * SPR registers for this exception.
335  *
336  * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
337  * POWERPC_EXCP_DEBUG (on BookE).
338  */
339 static void gen_debug_exception(DisasContext *ctx, bool rfi_type)
340 {
341 #if !defined(CONFIG_USER_ONLY)
342     if (ctx->flags & POWERPC_FLAG_DE) {
343         target_ulong dbsr = 0;
344         if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
345             dbsr = DBCR0_ICMP;
346         } else {
347             /* Must have been branch */
348             dbsr = DBCR0_BRT;
349         }
350         TCGv t0 = tcg_temp_new();
351         gen_load_spr(t0, SPR_BOOKE_DBSR);
352         tcg_gen_ori_tl(t0, t0, dbsr);
353         gen_store_spr(SPR_BOOKE_DBSR, t0);
354         gen_helper_raise_exception(cpu_env,
355                                    tcg_constant_i32(POWERPC_EXCP_DEBUG));
356         ctx->base.is_jmp = DISAS_NORETURN;
357     } else {
358         if (!rfi_type) { /* BookS does not single step rfi type instructions */
359             TCGv t0 = tcg_temp_new();
360             tcg_gen_movi_tl(t0, ctx->cia);
361             gen_helper_book3s_trace(cpu_env, t0);
362             ctx->base.is_jmp = DISAS_NORETURN;
363         }
364     }
365 #endif
366 }
367 
368 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
369 {
370     /* Will be converted to program check if needed */
371     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
372 }
373 
374 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
375 {
376     gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
377 }
378 
379 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
380 {
381     /* Will be converted to program check if needed */
382     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
383 }
384 
385 /*****************************************************************************/
386 /* SPR READ/WRITE CALLBACKS */
387 
388 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
389 {
390 #if 0
391     sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
392     printf("ERROR: try to access SPR %d !\n", sprn);
393 #endif
394 }
395 
396 /* #define PPC_DUMP_SPR_ACCESSES */
397 
398 /*
399  * Generic callbacks:
400  * do nothing but store/retrieve spr value
401  */
402 static void spr_load_dump_spr(int sprn)
403 {
404 #ifdef PPC_DUMP_SPR_ACCESSES
405     TCGv_i32 t0 = tcg_constant_i32(sprn);
406     gen_helper_load_dump_spr(cpu_env, t0);
407 #endif
408 }
409 
410 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
411 {
412     gen_load_spr(cpu_gpr[gprn], sprn);
413     spr_load_dump_spr(sprn);
414 }
415 
416 static void spr_store_dump_spr(int sprn)
417 {
418 #ifdef PPC_DUMP_SPR_ACCESSES
419     TCGv_i32 t0 = tcg_constant_i32(sprn);
420     gen_helper_store_dump_spr(cpu_env, t0);
421 #endif
422 }
423 
424 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
425 {
426     gen_store_spr(sprn, cpu_gpr[gprn]);
427     spr_store_dump_spr(sprn);
428 }
429 
430 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
431 {
432 #ifdef TARGET_PPC64
433     TCGv t0 = tcg_temp_new();
434     tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
435     gen_store_spr(sprn, t0);
436     spr_store_dump_spr(sprn);
437 #else
438     spr_write_generic(ctx, sprn, gprn);
439 #endif
440 }
441 
442 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn)
443 {
444     if (!(ctx->flags & POWERPC_FLAG_SMT)) {
445         spr_write_generic(ctx, sprn, gprn);
446         return;
447     }
448 
449     if (!gen_serialize(ctx)) {
450         return;
451     }
452 
453     gen_helper_spr_core_write_generic(cpu_env, tcg_constant_i32(sprn),
454                                       cpu_gpr[gprn]);
455     spr_store_dump_spr(sprn);
456 }
457 
458 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
459 {
460     /* This does not implement >1 thread */
461     TCGv t0 = tcg_temp_new();
462     TCGv t1 = tcg_temp_new();
463     tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
464     tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
465     tcg_gen_or_tl(t1, t1, t0);
466     gen_store_spr(sprn, t1);
467 }
468 
469 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
470 {
471     if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) {
472         /* CTRL behaves as 1-thread in LPAR-per-thread mode */
473         spr_write_CTRL_ST(ctx, sprn, gprn);
474         goto out;
475     }
476 
477     if (!gen_serialize(ctx)) {
478         return;
479     }
480 
481     gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn),
482                               cpu_gpr[gprn]);
483 out:
484     spr_store_dump_spr(sprn);
485 
486     /*
487      * SPR_CTRL writes must force a new translation block,
488      * allowing the PMU to calculate the run latch events with
489      * more accuracy.
490      */
491     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
492 }
493 
494 #if !defined(CONFIG_USER_ONLY)
495 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
496 {
497     TCGv t0 = tcg_temp_new();
498     TCGv t1 = tcg_temp_new();
499     gen_load_spr(t0, sprn);
500     tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
501     tcg_gen_and_tl(t0, t0, t1);
502     gen_store_spr(sprn, t0);
503 }
504 
505 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
506 {
507 }
508 
509 #endif
510 
511 /* SPR common to all PowerPC */
512 /* XER */
513 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
514 {
515     TCGv dst = cpu_gpr[gprn];
516     TCGv t0 = tcg_temp_new();
517     TCGv t1 = tcg_temp_new();
518     TCGv t2 = tcg_temp_new();
519     tcg_gen_mov_tl(dst, cpu_xer);
520     tcg_gen_shli_tl(t0, cpu_so, XER_SO);
521     tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
522     tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
523     tcg_gen_or_tl(t0, t0, t1);
524     tcg_gen_or_tl(dst, dst, t2);
525     tcg_gen_or_tl(dst, dst, t0);
526     if (is_isa300(ctx)) {
527         tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
528         tcg_gen_or_tl(dst, dst, t0);
529         tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
530         tcg_gen_or_tl(dst, dst, t0);
531     }
532 }
533 
534 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
535 {
536     TCGv src = cpu_gpr[gprn];
537     /* Write all flags, while reading back check for isa300 */
538     tcg_gen_andi_tl(cpu_xer, src,
539                     ~((1u << XER_SO) |
540                       (1u << XER_OV) | (1u << XER_OV32) |
541                       (1u << XER_CA) | (1u << XER_CA32)));
542     tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
543     tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
544     tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
545     tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
546     tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
547 }
548 
549 /* LR */
550 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
551 {
552     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
553 }
554 
555 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
556 {
557     tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
558 }
559 
560 /* CFAR */
561 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
562 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
563 {
564     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
565 }
566 
567 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
568 {
569     tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
570 }
571 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
572 
573 /* CTR */
574 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
575 {
576     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
577 }
578 
579 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
580 {
581     tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
582 }
583 
584 /* User read access to SPR */
585 /* USPRx */
586 /* UMMCRx */
587 /* UPMCx */
588 /* USIA */
589 /* UDECR */
590 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
591 {
592     gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
593 }
594 
595 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
596 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
597 {
598     gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
599 }
600 #endif
601 
602 /* SPR common to all non-embedded PowerPC */
603 /* DECR */
604 #if !defined(CONFIG_USER_ONLY)
605 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
606 {
607     translator_io_start(&ctx->base);
608     gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
609 }
610 
611 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
612 {
613     translator_io_start(&ctx->base);
614     gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
615 }
616 #endif
617 
618 /* SPR common to all non-embedded PowerPC, except 601 */
619 /* Time base */
620 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
621 {
622     translator_io_start(&ctx->base);
623     gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
624 }
625 
626 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
627 {
628     translator_io_start(&ctx->base);
629     gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
630 }
631 
632 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
633 {
634     gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
635 }
636 
637 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
638 {
639     gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
640 }
641 
642 #if !defined(CONFIG_USER_ONLY)
643 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
644 {
645     translator_io_start(&ctx->base);
646     gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
647 }
648 
649 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
650 {
651     translator_io_start(&ctx->base);
652     gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
653 }
654 
655 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
656 {
657     gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
658 }
659 
660 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
661 {
662     gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
663 }
664 
665 #if defined(TARGET_PPC64)
666 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
667 {
668     translator_io_start(&ctx->base);
669     gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
670 }
671 
672 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
673 {
674     translator_io_start(&ctx->base);
675     gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
676 }
677 
678 /* HDECR */
679 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
680 {
681     translator_io_start(&ctx->base);
682     gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
683 }
684 
685 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
686 {
687     translator_io_start(&ctx->base);
688     gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
689 }
690 
691 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
692 {
693     translator_io_start(&ctx->base);
694     gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
695 }
696 
697 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
698 {
699     translator_io_start(&ctx->base);
700     gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
701 }
702 
703 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
704 {
705     translator_io_start(&ctx->base);
706     gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
707 }
708 
709 #endif
710 #endif
711 
712 #if !defined(CONFIG_USER_ONLY)
713 /* IBAT0U...IBAT0U */
714 /* IBAT0L...IBAT7L */
715 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
716 {
717     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
718                   offsetof(CPUPPCState,
719                            IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
720 }
721 
722 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
723 {
724     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
725                   offsetof(CPUPPCState,
726                            IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
727 }
728 
729 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
730 {
731     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
732     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
733 }
734 
735 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
736 {
737     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
738     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
739 }
740 
741 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
742 {
743     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
744     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
745 }
746 
747 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
748 {
749     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
750     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
751 }
752 
753 /* DBAT0U...DBAT7U */
754 /* DBAT0L...DBAT7L */
755 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
756 {
757     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
758                   offsetof(CPUPPCState,
759                            DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
760 }
761 
762 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
763 {
764     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
765                   offsetof(CPUPPCState,
766                            DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
767 }
768 
769 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
770 {
771     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
772     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
773 }
774 
775 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
776 {
777     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
778     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
779 }
780 
781 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
782 {
783     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
784     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
785 }
786 
787 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
788 {
789     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
790     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
791 }
792 
793 /* SDR1 */
794 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
795 {
796     gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
797 }
798 
799 #if defined(TARGET_PPC64)
800 /* 64 bits PowerPC specific SPRs */
801 /* PIDR */
802 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
803 {
804     gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
805 }
806 
807 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
808 {
809     gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
810 }
811 
812 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
813 {
814     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
815 }
816 
817 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
818 {
819     TCGv t0 = tcg_temp_new();
820     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
821     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
822 }
823 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
824 {
825     gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
826 }
827 
828 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
829 {
830     gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
831 }
832 
833 /* DPDES */
834 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
835 {
836     if (!gen_serialize_core_lpar(ctx)) {
837         return;
838     }
839 
840     gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
841 }
842 
843 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
844 {
845     if (!gen_serialize_core_lpar(ctx)) {
846         return;
847     }
848 
849     gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
850 }
851 #endif
852 #endif
853 
854 /* PowerPC 40x specific registers */
855 #if !defined(CONFIG_USER_ONLY)
856 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
857 {
858     translator_io_start(&ctx->base);
859     gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
860 }
861 
862 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
863 {
864     translator_io_start(&ctx->base);
865     gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
866 }
867 
868 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
869 {
870     translator_io_start(&ctx->base);
871     gen_store_spr(sprn, cpu_gpr[gprn]);
872     gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
873     /* We must stop translation as we may have rebooted */
874     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
875 }
876 
877 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
878 {
879     translator_io_start(&ctx->base);
880     gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
881 }
882 
883 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
884 {
885     translator_io_start(&ctx->base);
886     gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
887 }
888 
889 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
890 {
891     translator_io_start(&ctx->base);
892     gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
893 }
894 
895 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
896 {
897     TCGv t0 = tcg_temp_new();
898     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
899     gen_helper_store_40x_pid(cpu_env, t0);
900 }
901 
902 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
903 {
904     translator_io_start(&ctx->base);
905     gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
906 }
907 
908 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
909 {
910     translator_io_start(&ctx->base);
911     gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
912 }
913 #endif
914 
915 /* PIR */
916 #if !defined(CONFIG_USER_ONLY)
917 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
918 {
919     TCGv t0 = tcg_temp_new();
920     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
921     gen_store_spr(SPR_PIR, t0);
922 }
923 #endif
924 
925 /* SPE specific registers */
926 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
927 {
928     TCGv_i32 t0 = tcg_temp_new_i32();
929     tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
930     tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
931 }
932 
933 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
934 {
935     TCGv_i32 t0 = tcg_temp_new_i32();
936     tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
937     tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
938 }
939 
940 #if !defined(CONFIG_USER_ONLY)
941 /* Callback used to write the exception vector base */
942 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
943 {
944     TCGv t0 = tcg_temp_new();
945     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
946     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
947     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
948     gen_store_spr(sprn, t0);
949 }
950 
951 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
952 {
953     int sprn_offs;
954 
955     if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
956         sprn_offs = sprn - SPR_BOOKE_IVOR0;
957     } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
958         sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
959     } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
960         sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
961     } else {
962         qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
963                       " vector 0x%03x\n", sprn);
964         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
965         return;
966     }
967 
968     TCGv t0 = tcg_temp_new();
969     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
970     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
971     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
972     gen_store_spr(sprn, t0);
973 }
974 #endif
975 
976 #ifdef TARGET_PPC64
977 #ifndef CONFIG_USER_ONLY
978 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
979 {
980     TCGv t0 = tcg_temp_new();
981     TCGv t1 = tcg_temp_new();
982     TCGv t2 = tcg_temp_new();
983 
984     /*
985      * Note, the HV=1 PR=0 case is handled earlier by simply using
986      * spr_write_generic for HV mode in the SPR table
987      */
988 
989     /* Build insertion mask into t1 based on context */
990     if (ctx->pr) {
991         gen_load_spr(t1, SPR_UAMOR);
992     } else {
993         gen_load_spr(t1, SPR_AMOR);
994     }
995 
996     /* Mask new bits into t2 */
997     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
998 
999     /* Load AMR and clear new bits in t0 */
1000     gen_load_spr(t0, SPR_AMR);
1001     tcg_gen_andc_tl(t0, t0, t1);
1002 
1003     /* Or'in new bits and write it out */
1004     tcg_gen_or_tl(t0, t0, t2);
1005     gen_store_spr(SPR_AMR, t0);
1006     spr_store_dump_spr(SPR_AMR);
1007 }
1008 
1009 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
1010 {
1011     TCGv t0 = tcg_temp_new();
1012     TCGv t1 = tcg_temp_new();
1013     TCGv t2 = tcg_temp_new();
1014 
1015     /*
1016      * Note, the HV=1 case is handled earlier by simply using
1017      * spr_write_generic for HV mode in the SPR table
1018      */
1019 
1020     /* Build insertion mask into t1 based on context */
1021     gen_load_spr(t1, SPR_AMOR);
1022 
1023     /* Mask new bits into t2 */
1024     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1025 
1026     /* Load AMR and clear new bits in t0 */
1027     gen_load_spr(t0, SPR_UAMOR);
1028     tcg_gen_andc_tl(t0, t0, t1);
1029 
1030     /* Or'in new bits and write it out */
1031     tcg_gen_or_tl(t0, t0, t2);
1032     gen_store_spr(SPR_UAMOR, t0);
1033     spr_store_dump_spr(SPR_UAMOR);
1034 }
1035 
1036 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1037 {
1038     TCGv t0 = tcg_temp_new();
1039     TCGv t1 = tcg_temp_new();
1040     TCGv t2 = tcg_temp_new();
1041 
1042     /*
1043      * Note, the HV=1 case is handled earlier by simply using
1044      * spr_write_generic for HV mode in the SPR table
1045      */
1046 
1047     /* Build insertion mask into t1 based on context */
1048     gen_load_spr(t1, SPR_AMOR);
1049 
1050     /* Mask new bits into t2 */
1051     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1052 
1053     /* Load AMR and clear new bits in t0 */
1054     gen_load_spr(t0, SPR_IAMR);
1055     tcg_gen_andc_tl(t0, t0, t1);
1056 
1057     /* Or'in new bits and write it out */
1058     tcg_gen_or_tl(t0, t0, t2);
1059     gen_store_spr(SPR_IAMR, t0);
1060     spr_store_dump_spr(SPR_IAMR);
1061 }
1062 #endif
1063 #endif
1064 
1065 #ifndef CONFIG_USER_ONLY
1066 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1067 {
1068     gen_helper_fixup_thrm(cpu_env);
1069     gen_load_spr(cpu_gpr[gprn], sprn);
1070     spr_load_dump_spr(sprn);
1071 }
1072 #endif /* !CONFIG_USER_ONLY */
1073 
1074 #if !defined(CONFIG_USER_ONLY)
1075 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1076 {
1077     TCGv t0 = tcg_temp_new();
1078 
1079     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1080     gen_store_spr(sprn, t0);
1081 }
1082 
1083 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1084 {
1085     TCGv t0 = tcg_temp_new();
1086 
1087     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1088     gen_store_spr(sprn, t0);
1089 }
1090 
1091 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1092 {
1093     TCGv t0 = tcg_temp_new();
1094 
1095     tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1096                     ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1097     gen_store_spr(sprn, t0);
1098 }
1099 
1100 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1101 {
1102     gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1103 }
1104 
1105 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1106 {
1107     TCGv_i32 t0 = tcg_constant_i32(sprn);
1108     gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1109 }
1110 
1111 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1112 {
1113     gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1114 }
1115 
1116 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1117 {
1118     gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1119 }
1120 
1121 #endif
1122 
1123 #if !defined(CONFIG_USER_ONLY)
1124 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1125 {
1126     TCGv val = tcg_temp_new();
1127     tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1128     gen_store_spr(SPR_BOOKE_MAS3, val);
1129     tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1130     gen_store_spr(SPR_BOOKE_MAS7, val);
1131 }
1132 
1133 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1134 {
1135     TCGv mas7 = tcg_temp_new();
1136     TCGv mas3 = tcg_temp_new();
1137     gen_load_spr(mas7, SPR_BOOKE_MAS7);
1138     tcg_gen_shli_tl(mas7, mas7, 32);
1139     gen_load_spr(mas3, SPR_BOOKE_MAS3);
1140     tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1141 }
1142 
1143 #endif
1144 
1145 #ifdef TARGET_PPC64
1146 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1147                                     int bit, int sprn, int cause)
1148 {
1149     TCGv_i32 t1 = tcg_constant_i32(bit);
1150     TCGv_i32 t2 = tcg_constant_i32(sprn);
1151     TCGv_i32 t3 = tcg_constant_i32(cause);
1152 
1153     gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1154 }
1155 
1156 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1157                                    int bit, int sprn, int cause)
1158 {
1159     TCGv_i32 t1 = tcg_constant_i32(bit);
1160     TCGv_i32 t2 = tcg_constant_i32(sprn);
1161     TCGv_i32 t3 = tcg_constant_i32(cause);
1162 
1163     gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1164 }
1165 
1166 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1167 {
1168     TCGv spr_up = tcg_temp_new();
1169     TCGv spr = tcg_temp_new();
1170 
1171     gen_load_spr(spr, sprn - 1);
1172     tcg_gen_shri_tl(spr_up, spr, 32);
1173     tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1174 }
1175 
1176 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1177 {
1178     TCGv spr = tcg_temp_new();
1179 
1180     gen_load_spr(spr, sprn - 1);
1181     tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1182     gen_store_spr(sprn - 1, spr);
1183 }
1184 
1185 #if !defined(CONFIG_USER_ONLY)
1186 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1187 {
1188     TCGv hmer = tcg_temp_new();
1189 
1190     gen_load_spr(hmer, sprn);
1191     tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1192     gen_store_spr(sprn, hmer);
1193     spr_store_dump_spr(sprn);
1194 }
1195 
1196 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
1197 {
1198     gen_helper_load_tfmr(cpu_gpr[gprn], cpu_env);
1199 }
1200 
1201 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
1202 {
1203     gen_helper_store_tfmr(cpu_env, cpu_gpr[gprn]);
1204 }
1205 
1206 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1207 {
1208     translator_io_start(&ctx->base);
1209     gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1210 }
1211 #endif /* !defined(CONFIG_USER_ONLY) */
1212 
1213 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1214 {
1215     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1216     spr_read_generic(ctx, gprn, sprn);
1217 }
1218 
1219 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1220 {
1221     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1222     spr_write_generic(ctx, sprn, gprn);
1223 }
1224 
1225 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1226 {
1227     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1228     spr_read_generic(ctx, gprn, sprn);
1229 }
1230 
1231 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1232 {
1233     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1234     spr_write_generic(ctx, sprn, gprn);
1235 }
1236 
1237 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1238 {
1239     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1240     spr_read_prev_upper32(ctx, gprn, sprn);
1241 }
1242 
1243 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1244 {
1245     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1246     spr_write_prev_upper32(ctx, sprn, gprn);
1247 }
1248 
1249 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1250 {
1251     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1252     spr_read_generic(ctx, gprn, sprn);
1253 }
1254 
1255 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1256 {
1257     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1258     spr_write_generic(ctx, sprn, gprn);
1259 }
1260 
1261 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1262 {
1263     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1264     spr_read_prev_upper32(ctx, gprn, sprn);
1265 }
1266 
1267 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1268 {
1269     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1270     spr_write_prev_upper32(ctx, sprn, gprn);
1271 }
1272 
1273 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1274 {
1275     TCGv t0 = tcg_temp_new();
1276 
1277     /*
1278      * Access to the (H)DEXCR in problem state is done using separated
1279      * SPR indexes which are 16 below the SPR indexes which have full
1280      * access to the (H)DEXCR in privileged state. Problem state can
1281      * only read bits 32:63, bits 0:31 return 0.
1282      *
1283      * See section 9.3.1-9.3.2 of PowerISA v3.1B
1284      */
1285 
1286     gen_load_spr(t0, sprn + 16);
1287     tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1288 }
1289 #endif
1290 
1291 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
1292 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1293 
1294 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
1295 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1296 
1297 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
1298 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1299 
1300 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
1301 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1302 
1303 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
1304 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1305 
1306 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1307 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1308 
1309 typedef struct opcode_t {
1310     unsigned char opc1, opc2, opc3, opc4;
1311 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1312     unsigned char pad[4];
1313 #endif
1314     opc_handler_t handler;
1315     const char *oname;
1316 } opcode_t;
1317 
1318 static void gen_priv_opc(DisasContext *ctx)
1319 {
1320     gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1321 }
1322 
1323 /* Helpers for priv. check */
1324 #define GEN_PRIV(CTX)              \
1325     do {                           \
1326         gen_priv_opc(CTX); return; \
1327     } while (0)
1328 
1329 #if defined(CONFIG_USER_ONLY)
1330 #define CHK_HV(CTX) GEN_PRIV(CTX)
1331 #define CHK_SV(CTX) GEN_PRIV(CTX)
1332 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1333 #else
1334 #define CHK_HV(CTX)                         \
1335     do {                                    \
1336         if (unlikely(ctx->pr || !ctx->hv)) {\
1337             GEN_PRIV(CTX);                  \
1338         }                                   \
1339     } while (0)
1340 #define CHK_SV(CTX)              \
1341     do {                         \
1342         if (unlikely(ctx->pr)) { \
1343             GEN_PRIV(CTX);       \
1344         }                        \
1345     } while (0)
1346 #define CHK_HVRM(CTX)                                   \
1347     do {                                                \
1348         if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1349             GEN_PRIV(CTX);                              \
1350         }                                               \
1351     } while (0)
1352 #endif
1353 
1354 #define CHK_NONE(CTX)
1355 
1356 /*****************************************************************************/
1357 /* PowerPC instructions table                                                */
1358 
1359 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
1360 {                                                                             \
1361     .opc1 = op1,                                                              \
1362     .opc2 = op2,                                                              \
1363     .opc3 = op3,                                                              \
1364     .opc4 = 0xff,                                                             \
1365     .handler = {                                                              \
1366         .inval1  = invl,                                                      \
1367         .type = _typ,                                                         \
1368         .type2 = _typ2,                                                       \
1369         .handler = &gen_##name,                                               \
1370     },                                                                        \
1371     .oname = stringify(name),                                                 \
1372 }
1373 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
1374 {                                                                             \
1375     .opc1 = op1,                                                              \
1376     .opc2 = op2,                                                              \
1377     .opc3 = op3,                                                              \
1378     .opc4 = 0xff,                                                             \
1379     .handler = {                                                              \
1380         .inval1  = invl1,                                                     \
1381         .inval2  = invl2,                                                     \
1382         .type = _typ,                                                         \
1383         .type2 = _typ2,                                                       \
1384         .handler = &gen_##name,                                               \
1385     },                                                                        \
1386     .oname = stringify(name),                                                 \
1387 }
1388 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
1389 {                                                                             \
1390     .opc1 = op1,                                                              \
1391     .opc2 = op2,                                                              \
1392     .opc3 = op3,                                                              \
1393     .opc4 = 0xff,                                                             \
1394     .handler = {                                                              \
1395         .inval1  = invl,                                                      \
1396         .type = _typ,                                                         \
1397         .type2 = _typ2,                                                       \
1398         .handler = &gen_##name,                                               \
1399     },                                                                        \
1400     .oname = onam,                                                            \
1401 }
1402 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
1403 {                                                                             \
1404     .opc1 = op1,                                                              \
1405     .opc2 = op2,                                                              \
1406     .opc3 = op3,                                                              \
1407     .opc4 = op4,                                                              \
1408     .handler = {                                                              \
1409         .inval1  = invl,                                                      \
1410         .type = _typ,                                                         \
1411         .type2 = _typ2,                                                       \
1412         .handler = &gen_##name,                                               \
1413     },                                                                        \
1414     .oname = stringify(name),                                                 \
1415 }
1416 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
1417 {                                                                             \
1418     .opc1 = op1,                                                              \
1419     .opc2 = op2,                                                              \
1420     .opc3 = op3,                                                              \
1421     .opc4 = op4,                                                              \
1422     .handler = {                                                              \
1423         .inval1  = invl,                                                      \
1424         .type = _typ,                                                         \
1425         .type2 = _typ2,                                                       \
1426         .handler = &gen_##name,                                               \
1427     },                                                                        \
1428     .oname = onam,                                                            \
1429 }
1430 
1431 /* Invalid instruction */
1432 static void gen_invalid(DisasContext *ctx)
1433 {
1434     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1435 }
1436 
1437 static opc_handler_t invalid_handler = {
1438     .inval1  = 0xFFFFFFFF,
1439     .inval2  = 0xFFFFFFFF,
1440     .type    = PPC_NONE,
1441     .type2   = PPC_NONE,
1442     .handler = gen_invalid,
1443 };
1444 
1445 /***                           Integer comparison                          ***/
1446 
1447 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1448 {
1449     TCGv t0 = tcg_temp_new();
1450     TCGv t1 = tcg_temp_new();
1451     TCGv_i32 t = tcg_temp_new_i32();
1452 
1453     tcg_gen_movi_tl(t0, CRF_EQ);
1454     tcg_gen_movi_tl(t1, CRF_LT);
1455     tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1456                        t0, arg0, arg1, t1, t0);
1457     tcg_gen_movi_tl(t1, CRF_GT);
1458     tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1459                        t0, arg0, arg1, t1, t0);
1460 
1461     tcg_gen_trunc_tl_i32(t, t0);
1462     tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1463     tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1464 }
1465 
1466 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1467 {
1468     TCGv t0 = tcg_constant_tl(arg1);
1469     gen_op_cmp(arg0, t0, s, crf);
1470 }
1471 
1472 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1473 {
1474     TCGv t0, t1;
1475     t0 = tcg_temp_new();
1476     t1 = tcg_temp_new();
1477     if (s) {
1478         tcg_gen_ext32s_tl(t0, arg0);
1479         tcg_gen_ext32s_tl(t1, arg1);
1480     } else {
1481         tcg_gen_ext32u_tl(t0, arg0);
1482         tcg_gen_ext32u_tl(t1, arg1);
1483     }
1484     gen_op_cmp(t0, t1, s, crf);
1485 }
1486 
1487 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1488 {
1489     TCGv t0 = tcg_constant_tl(arg1);
1490     gen_op_cmp32(arg0, t0, s, crf);
1491 }
1492 
1493 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1494 {
1495     if (NARROW_MODE(ctx)) {
1496         gen_op_cmpi32(reg, 0, 1, 0);
1497     } else {
1498         gen_op_cmpi(reg, 0, 1, 0);
1499     }
1500 }
1501 
1502 /* cmprb - range comparison: isupper, isaplha, islower*/
1503 static void gen_cmprb(DisasContext *ctx)
1504 {
1505     TCGv_i32 src1 = tcg_temp_new_i32();
1506     TCGv_i32 src2 = tcg_temp_new_i32();
1507     TCGv_i32 src2lo = tcg_temp_new_i32();
1508     TCGv_i32 src2hi = tcg_temp_new_i32();
1509     TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1510 
1511     tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1512     tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1513 
1514     tcg_gen_andi_i32(src1, src1, 0xFF);
1515     tcg_gen_ext8u_i32(src2lo, src2);
1516     tcg_gen_shri_i32(src2, src2, 8);
1517     tcg_gen_ext8u_i32(src2hi, src2);
1518 
1519     tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1520     tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1521     tcg_gen_and_i32(crf, src2lo, src2hi);
1522 
1523     if (ctx->opcode & 0x00200000) {
1524         tcg_gen_shri_i32(src2, src2, 8);
1525         tcg_gen_ext8u_i32(src2lo, src2);
1526         tcg_gen_shri_i32(src2, src2, 8);
1527         tcg_gen_ext8u_i32(src2hi, src2);
1528         tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1529         tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1530         tcg_gen_and_i32(src2lo, src2lo, src2hi);
1531         tcg_gen_or_i32(crf, crf, src2lo);
1532     }
1533     tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1534 }
1535 
1536 #if defined(TARGET_PPC64)
1537 /* cmpeqb */
1538 static void gen_cmpeqb(DisasContext *ctx)
1539 {
1540     gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1541                       cpu_gpr[rB(ctx->opcode)]);
1542 }
1543 #endif
1544 
1545 /* isel (PowerPC 2.03 specification) */
1546 static void gen_isel(DisasContext *ctx)
1547 {
1548     uint32_t bi = rC(ctx->opcode);
1549     uint32_t mask = 0x08 >> (bi & 0x03);
1550     TCGv t0 = tcg_temp_new();
1551     TCGv zr;
1552 
1553     tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1554     tcg_gen_andi_tl(t0, t0, mask);
1555 
1556     zr = tcg_constant_tl(0);
1557     tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1558                        rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1559                        cpu_gpr[rB(ctx->opcode)]);
1560 }
1561 
1562 /* cmpb: PowerPC 2.05 specification */
1563 static void gen_cmpb(DisasContext *ctx)
1564 {
1565     gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1566                     cpu_gpr[rB(ctx->opcode)]);
1567 }
1568 
1569 /***                           Integer arithmetic                          ***/
1570 
1571 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1572                                            TCGv arg1, TCGv arg2, int sub)
1573 {
1574     TCGv t0 = tcg_temp_new();
1575 
1576     tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1577     tcg_gen_xor_tl(t0, arg1, arg2);
1578     if (sub) {
1579         tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1580     } else {
1581         tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1582     }
1583     if (NARROW_MODE(ctx)) {
1584         tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1585         if (is_isa300(ctx)) {
1586             tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1587         }
1588     } else {
1589         if (is_isa300(ctx)) {
1590             tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1591         }
1592         tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1593     }
1594     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1595 }
1596 
1597 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1598                                              TCGv res, TCGv arg0, TCGv arg1,
1599                                              TCGv ca32, int sub)
1600 {
1601     TCGv t0;
1602 
1603     if (!is_isa300(ctx)) {
1604         return;
1605     }
1606 
1607     t0 = tcg_temp_new();
1608     if (sub) {
1609         tcg_gen_eqv_tl(t0, arg0, arg1);
1610     } else {
1611         tcg_gen_xor_tl(t0, arg0, arg1);
1612     }
1613     tcg_gen_xor_tl(t0, t0, res);
1614     tcg_gen_extract_tl(ca32, t0, 32, 1);
1615 }
1616 
1617 /* Common add function */
1618 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1619                                     TCGv arg2, TCGv ca, TCGv ca32,
1620                                     bool add_ca, bool compute_ca,
1621                                     bool compute_ov, bool compute_rc0)
1622 {
1623     TCGv t0 = ret;
1624 
1625     if (compute_ca || compute_ov) {
1626         t0 = tcg_temp_new();
1627     }
1628 
1629     if (compute_ca) {
1630         if (NARROW_MODE(ctx)) {
1631             /*
1632              * Caution: a non-obvious corner case of the spec is that
1633              * we must produce the *entire* 64-bit addition, but
1634              * produce the carry into bit 32.
1635              */
1636             TCGv t1 = tcg_temp_new();
1637             tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
1638             tcg_gen_add_tl(t0, arg1, arg2);
1639             if (add_ca) {
1640                 tcg_gen_add_tl(t0, t0, ca);
1641             }
1642             tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
1643             tcg_gen_extract_tl(ca, ca, 32, 1);
1644             if (is_isa300(ctx)) {
1645                 tcg_gen_mov_tl(ca32, ca);
1646             }
1647         } else {
1648             TCGv zero = tcg_constant_tl(0);
1649             if (add_ca) {
1650                 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1651                 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1652             } else {
1653                 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1654             }
1655             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1656         }
1657     } else {
1658         tcg_gen_add_tl(t0, arg1, arg2);
1659         if (add_ca) {
1660             tcg_gen_add_tl(t0, t0, ca);
1661         }
1662     }
1663 
1664     if (compute_ov) {
1665         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1666     }
1667     if (unlikely(compute_rc0)) {
1668         gen_set_Rc0(ctx, t0);
1669     }
1670 
1671     if (t0 != ret) {
1672         tcg_gen_mov_tl(ret, t0);
1673     }
1674 }
1675 /* Add functions with two operands */
1676 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
1677 static void glue(gen_, name)(DisasContext *ctx)                               \
1678 {                                                                             \
1679     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1680                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1681                      ca, glue(ca, 32),                                        \
1682                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1683 }
1684 /* Add functions with one operand and one immediate */
1685 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
1686                                 add_ca, compute_ca, compute_ov)               \
1687 static void glue(gen_, name)(DisasContext *ctx)                               \
1688 {                                                                             \
1689     TCGv t0 = tcg_constant_tl(const_val);                                     \
1690     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1691                      cpu_gpr[rA(ctx->opcode)], t0,                            \
1692                      ca, glue(ca, 32),                                        \
1693                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1694 }
1695 
1696 /* add  add.  addo  addo. */
1697 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1698 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1699 /* addc  addc.  addco  addco. */
1700 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1701 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1702 /* adde  adde.  addeo  addeo. */
1703 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1704 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1705 /* addme  addme.  addmeo  addmeo.  */
1706 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1707 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1708 /* addex */
1709 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1710 /* addze  addze.  addzeo  addzeo.*/
1711 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1712 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1713 /* addic  addic.*/
1714 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1715 {
1716     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
1717     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1718                      c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1719 }
1720 
1721 static void gen_addic(DisasContext *ctx)
1722 {
1723     gen_op_addic(ctx, 0);
1724 }
1725 
1726 static void gen_addic_(DisasContext *ctx)
1727 {
1728     gen_op_addic(ctx, 1);
1729 }
1730 
1731 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1732                                      TCGv arg2, int sign, int compute_ov)
1733 {
1734     TCGv_i32 t0 = tcg_temp_new_i32();
1735     TCGv_i32 t1 = tcg_temp_new_i32();
1736     TCGv_i32 t2 = tcg_temp_new_i32();
1737     TCGv_i32 t3 = tcg_temp_new_i32();
1738 
1739     tcg_gen_trunc_tl_i32(t0, arg1);
1740     tcg_gen_trunc_tl_i32(t1, arg2);
1741     if (sign) {
1742         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1743         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1744         tcg_gen_and_i32(t2, t2, t3);
1745         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1746         tcg_gen_or_i32(t2, t2, t3);
1747         tcg_gen_movi_i32(t3, 0);
1748         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1749         tcg_gen_div_i32(t3, t0, t1);
1750         tcg_gen_extu_i32_tl(ret, t3);
1751     } else {
1752         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1753         tcg_gen_movi_i32(t3, 0);
1754         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1755         tcg_gen_divu_i32(t3, t0, t1);
1756         tcg_gen_extu_i32_tl(ret, t3);
1757     }
1758     if (compute_ov) {
1759         tcg_gen_extu_i32_tl(cpu_ov, t2);
1760         if (is_isa300(ctx)) {
1761             tcg_gen_extu_i32_tl(cpu_ov32, t2);
1762         }
1763         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1764     }
1765 
1766     if (unlikely(Rc(ctx->opcode) != 0)) {
1767         gen_set_Rc0(ctx, ret);
1768     }
1769 }
1770 /* Div functions */
1771 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1772 static void glue(gen_, name)(DisasContext *ctx)                               \
1773 {                                                                             \
1774     gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1775                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1776                      sign, compute_ov);                                       \
1777 }
1778 /* divwu  divwu.  divwuo  divwuo.   */
1779 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1780 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1781 /* divw  divw.  divwo  divwo.   */
1782 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1783 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1784 
1785 /* div[wd]eu[o][.] */
1786 #define GEN_DIVE(name, hlpr, compute_ov)                                      \
1787 static void gen_##name(DisasContext *ctx)                                     \
1788 {                                                                             \
1789     TCGv_i32 t0 = tcg_constant_i32(compute_ov);                               \
1790     gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env,                      \
1791                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1792     if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1793         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1794     }                                                                         \
1795 }
1796 
1797 GEN_DIVE(divweu, divweu, 0);
1798 GEN_DIVE(divweuo, divweu, 1);
1799 GEN_DIVE(divwe, divwe, 0);
1800 GEN_DIVE(divweo, divwe, 1);
1801 
1802 #if defined(TARGET_PPC64)
1803 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1804                                      TCGv arg2, int sign, int compute_ov)
1805 {
1806     TCGv_i64 t0 = tcg_temp_new_i64();
1807     TCGv_i64 t1 = tcg_temp_new_i64();
1808     TCGv_i64 t2 = tcg_temp_new_i64();
1809     TCGv_i64 t3 = tcg_temp_new_i64();
1810 
1811     tcg_gen_mov_i64(t0, arg1);
1812     tcg_gen_mov_i64(t1, arg2);
1813     if (sign) {
1814         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1815         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1816         tcg_gen_and_i64(t2, t2, t3);
1817         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1818         tcg_gen_or_i64(t2, t2, t3);
1819         tcg_gen_movi_i64(t3, 0);
1820         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1821         tcg_gen_div_i64(ret, t0, t1);
1822     } else {
1823         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1824         tcg_gen_movi_i64(t3, 0);
1825         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1826         tcg_gen_divu_i64(ret, t0, t1);
1827     }
1828     if (compute_ov) {
1829         tcg_gen_mov_tl(cpu_ov, t2);
1830         if (is_isa300(ctx)) {
1831             tcg_gen_mov_tl(cpu_ov32, t2);
1832         }
1833         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1834     }
1835 
1836     if (unlikely(Rc(ctx->opcode) != 0)) {
1837         gen_set_Rc0(ctx, ret);
1838     }
1839 }
1840 
1841 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1842 static void glue(gen_, name)(DisasContext *ctx)                               \
1843 {                                                                             \
1844     gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1845                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1846                       sign, compute_ov);                                      \
1847 }
1848 /* divdu  divdu.  divduo  divduo.   */
1849 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1850 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1851 /* divd  divd.  divdo  divdo.   */
1852 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1853 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1854 
1855 GEN_DIVE(divdeu, divdeu, 0);
1856 GEN_DIVE(divdeuo, divdeu, 1);
1857 GEN_DIVE(divde, divde, 0);
1858 GEN_DIVE(divdeo, divde, 1);
1859 #endif
1860 
1861 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1862                                      TCGv arg2, int sign)
1863 {
1864     TCGv_i32 t0 = tcg_temp_new_i32();
1865     TCGv_i32 t1 = tcg_temp_new_i32();
1866 
1867     tcg_gen_trunc_tl_i32(t0, arg1);
1868     tcg_gen_trunc_tl_i32(t1, arg2);
1869     if (sign) {
1870         TCGv_i32 t2 = tcg_temp_new_i32();
1871         TCGv_i32 t3 = tcg_temp_new_i32();
1872         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1873         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1874         tcg_gen_and_i32(t2, t2, t3);
1875         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1876         tcg_gen_or_i32(t2, t2, t3);
1877         tcg_gen_movi_i32(t3, 0);
1878         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1879         tcg_gen_rem_i32(t3, t0, t1);
1880         tcg_gen_ext_i32_tl(ret, t3);
1881     } else {
1882         TCGv_i32 t2 = tcg_constant_i32(1);
1883         TCGv_i32 t3 = tcg_constant_i32(0);
1884         tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1885         tcg_gen_remu_i32(t0, t0, t1);
1886         tcg_gen_extu_i32_tl(ret, t0);
1887     }
1888 }
1889 
1890 #define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1891 static void glue(gen_, name)(DisasContext *ctx)                             \
1892 {                                                                           \
1893     gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1894                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1895                       sign);                                                \
1896 }
1897 
1898 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1899 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1900 
1901 #if defined(TARGET_PPC64)
1902 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1903                                      TCGv arg2, int sign)
1904 {
1905     TCGv_i64 t0 = tcg_temp_new_i64();
1906     TCGv_i64 t1 = tcg_temp_new_i64();
1907 
1908     tcg_gen_mov_i64(t0, arg1);
1909     tcg_gen_mov_i64(t1, arg2);
1910     if (sign) {
1911         TCGv_i64 t2 = tcg_temp_new_i64();
1912         TCGv_i64 t3 = tcg_temp_new_i64();
1913         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1914         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1915         tcg_gen_and_i64(t2, t2, t3);
1916         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1917         tcg_gen_or_i64(t2, t2, t3);
1918         tcg_gen_movi_i64(t3, 0);
1919         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1920         tcg_gen_rem_i64(ret, t0, t1);
1921     } else {
1922         TCGv_i64 t2 = tcg_constant_i64(1);
1923         TCGv_i64 t3 = tcg_constant_i64(0);
1924         tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1925         tcg_gen_remu_i64(ret, t0, t1);
1926     }
1927 }
1928 
1929 #define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1930 static void glue(gen_, name)(DisasContext *ctx)                           \
1931 {                                                                         \
1932   gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1933                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1934                     sign);                                                \
1935 }
1936 
1937 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1938 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1939 #endif
1940 
1941 /* mulhw  mulhw. */
1942 static void gen_mulhw(DisasContext *ctx)
1943 {
1944     TCGv_i32 t0 = tcg_temp_new_i32();
1945     TCGv_i32 t1 = tcg_temp_new_i32();
1946 
1947     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1948     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1949     tcg_gen_muls2_i32(t0, t1, t0, t1);
1950     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1951     if (unlikely(Rc(ctx->opcode) != 0)) {
1952         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1953     }
1954 }
1955 
1956 /* mulhwu  mulhwu.  */
1957 static void gen_mulhwu(DisasContext *ctx)
1958 {
1959     TCGv_i32 t0 = tcg_temp_new_i32();
1960     TCGv_i32 t1 = tcg_temp_new_i32();
1961 
1962     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1963     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1964     tcg_gen_mulu2_i32(t0, t1, t0, t1);
1965     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1966     if (unlikely(Rc(ctx->opcode) != 0)) {
1967         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1968     }
1969 }
1970 
1971 /* mullw  mullw. */
1972 static void gen_mullw(DisasContext *ctx)
1973 {
1974 #if defined(TARGET_PPC64)
1975     TCGv_i64 t0, t1;
1976     t0 = tcg_temp_new_i64();
1977     t1 = tcg_temp_new_i64();
1978     tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1979     tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1980     tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1981 #else
1982     tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1983                     cpu_gpr[rB(ctx->opcode)]);
1984 #endif
1985     if (unlikely(Rc(ctx->opcode) != 0)) {
1986         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1987     }
1988 }
1989 
1990 /* mullwo  mullwo. */
1991 static void gen_mullwo(DisasContext *ctx)
1992 {
1993     TCGv_i32 t0 = tcg_temp_new_i32();
1994     TCGv_i32 t1 = tcg_temp_new_i32();
1995 
1996     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1997     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1998     tcg_gen_muls2_i32(t0, t1, t0, t1);
1999 #if defined(TARGET_PPC64)
2000     tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2001 #else
2002     tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
2003 #endif
2004 
2005     tcg_gen_sari_i32(t0, t0, 31);
2006     tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
2007     tcg_gen_extu_i32_tl(cpu_ov, t0);
2008     if (is_isa300(ctx)) {
2009         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2010     }
2011     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2012 
2013     if (unlikely(Rc(ctx->opcode) != 0)) {
2014         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2015     }
2016 }
2017 
2018 /* mulli */
2019 static void gen_mulli(DisasContext *ctx)
2020 {
2021     tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2022                     SIMM(ctx->opcode));
2023 }
2024 
2025 #if defined(TARGET_PPC64)
2026 /* mulhd  mulhd. */
2027 static void gen_mulhd(DisasContext *ctx)
2028 {
2029     TCGv lo = tcg_temp_new();
2030     tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2031                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2032     if (unlikely(Rc(ctx->opcode) != 0)) {
2033         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2034     }
2035 }
2036 
2037 /* mulhdu  mulhdu. */
2038 static void gen_mulhdu(DisasContext *ctx)
2039 {
2040     TCGv lo = tcg_temp_new();
2041     tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2042                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2043     if (unlikely(Rc(ctx->opcode) != 0)) {
2044         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2045     }
2046 }
2047 
2048 /* mulld  mulld. */
2049 static void gen_mulld(DisasContext *ctx)
2050 {
2051     tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2052                    cpu_gpr[rB(ctx->opcode)]);
2053     if (unlikely(Rc(ctx->opcode) != 0)) {
2054         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2055     }
2056 }
2057 
2058 /* mulldo  mulldo. */
2059 static void gen_mulldo(DisasContext *ctx)
2060 {
2061     TCGv_i64 t0 = tcg_temp_new_i64();
2062     TCGv_i64 t1 = tcg_temp_new_i64();
2063 
2064     tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2065                       cpu_gpr[rB(ctx->opcode)]);
2066     tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2067 
2068     tcg_gen_sari_i64(t0, t0, 63);
2069     tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2070     if (is_isa300(ctx)) {
2071         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2072     }
2073     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2074 
2075     if (unlikely(Rc(ctx->opcode) != 0)) {
2076         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2077     }
2078 }
2079 #endif
2080 
2081 /* Common subf function */
2082 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2083                                      TCGv arg2, bool add_ca, bool compute_ca,
2084                                      bool compute_ov, bool compute_rc0)
2085 {
2086     TCGv t0 = ret;
2087 
2088     if (compute_ca || compute_ov) {
2089         t0 = tcg_temp_new();
2090     }
2091 
2092     if (compute_ca) {
2093         /* dest = ~arg1 + arg2 [+ ca].  */
2094         if (NARROW_MODE(ctx)) {
2095             /*
2096              * Caution: a non-obvious corner case of the spec is that
2097              * we must produce the *entire* 64-bit addition, but
2098              * produce the carry into bit 32.
2099              */
2100             TCGv inv1 = tcg_temp_new();
2101             TCGv t1 = tcg_temp_new();
2102             tcg_gen_not_tl(inv1, arg1);
2103             if (add_ca) {
2104                 tcg_gen_add_tl(t0, arg2, cpu_ca);
2105             } else {
2106                 tcg_gen_addi_tl(t0, arg2, 1);
2107             }
2108             tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
2109             tcg_gen_add_tl(t0, t0, inv1);
2110             tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
2111             tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2112             if (is_isa300(ctx)) {
2113                 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2114             }
2115         } else if (add_ca) {
2116             TCGv zero, inv1 = tcg_temp_new();
2117             tcg_gen_not_tl(inv1, arg1);
2118             zero = tcg_constant_tl(0);
2119             tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2120             tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2121             gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2122         } else {
2123             tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2124             tcg_gen_sub_tl(t0, arg2, arg1);
2125             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2126         }
2127     } else if (add_ca) {
2128         /*
2129          * Since we're ignoring carry-out, we can simplify the
2130          * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2131          */
2132         tcg_gen_sub_tl(t0, arg2, arg1);
2133         tcg_gen_add_tl(t0, t0, cpu_ca);
2134         tcg_gen_subi_tl(t0, t0, 1);
2135     } else {
2136         tcg_gen_sub_tl(t0, arg2, arg1);
2137     }
2138 
2139     if (compute_ov) {
2140         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2141     }
2142     if (unlikely(compute_rc0)) {
2143         gen_set_Rc0(ctx, t0);
2144     }
2145 
2146     if (t0 != ret) {
2147         tcg_gen_mov_tl(ret, t0);
2148     }
2149 }
2150 /* Sub functions with Two operands functions */
2151 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
2152 static void glue(gen_, name)(DisasContext *ctx)                               \
2153 {                                                                             \
2154     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2155                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
2156                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2157 }
2158 /* Sub functions with one operand and one immediate */
2159 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
2160                                 add_ca, compute_ca, compute_ov)               \
2161 static void glue(gen_, name)(DisasContext *ctx)                               \
2162 {                                                                             \
2163     TCGv t0 = tcg_constant_tl(const_val);                                     \
2164     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2165                       cpu_gpr[rA(ctx->opcode)], t0,                           \
2166                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2167 }
2168 /* subf  subf.  subfo  subfo. */
2169 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2170 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2171 /* subfc  subfc.  subfco  subfco. */
2172 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2173 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2174 /* subfe  subfe.  subfeo  subfo. */
2175 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2176 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2177 /* subfme  subfme.  subfmeo  subfmeo.  */
2178 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2179 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2180 /* subfze  subfze.  subfzeo  subfzeo.*/
2181 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2182 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2183 
2184 /* subfic */
2185 static void gen_subfic(DisasContext *ctx)
2186 {
2187     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
2188     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2189                       c, 0, 1, 0, 0);
2190 }
2191 
2192 /* neg neg. nego nego. */
2193 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2194 {
2195     TCGv zero = tcg_constant_tl(0);
2196     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2197                       zero, 0, 0, compute_ov, Rc(ctx->opcode));
2198 }
2199 
2200 static void gen_neg(DisasContext *ctx)
2201 {
2202     tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2203     if (unlikely(Rc(ctx->opcode))) {
2204         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2205     }
2206 }
2207 
2208 static void gen_nego(DisasContext *ctx)
2209 {
2210     gen_op_arith_neg(ctx, 1);
2211 }
2212 
2213 /***                            Integer logical                            ***/
2214 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
2215 static void glue(gen_, name)(DisasContext *ctx)                               \
2216 {                                                                             \
2217     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
2218        cpu_gpr[rB(ctx->opcode)]);                                             \
2219     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2220         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2221 }
2222 
2223 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
2224 static void glue(gen_, name)(DisasContext *ctx)                               \
2225 {                                                                             \
2226     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
2227     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2228         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2229 }
2230 
2231 /* and & and. */
2232 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2233 /* andc & andc. */
2234 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2235 
2236 /* andi. */
2237 static void gen_andi_(DisasContext *ctx)
2238 {
2239     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2240                     UIMM(ctx->opcode));
2241     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2242 }
2243 
2244 /* andis. */
2245 static void gen_andis_(DisasContext *ctx)
2246 {
2247     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2248                     UIMM(ctx->opcode) << 16);
2249     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2250 }
2251 
2252 /* cntlzw */
2253 static void gen_cntlzw(DisasContext *ctx)
2254 {
2255     TCGv_i32 t = tcg_temp_new_i32();
2256 
2257     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2258     tcg_gen_clzi_i32(t, t, 32);
2259     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2260 
2261     if (unlikely(Rc(ctx->opcode) != 0)) {
2262         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2263     }
2264 }
2265 
2266 /* cnttzw */
2267 static void gen_cnttzw(DisasContext *ctx)
2268 {
2269     TCGv_i32 t = tcg_temp_new_i32();
2270 
2271     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2272     tcg_gen_ctzi_i32(t, t, 32);
2273     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2274 
2275     if (unlikely(Rc(ctx->opcode) != 0)) {
2276         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2277     }
2278 }
2279 
2280 /* eqv & eqv. */
2281 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2282 /* extsb & extsb. */
2283 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2284 /* extsh & extsh. */
2285 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2286 /* nand & nand. */
2287 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2288 /* nor & nor. */
2289 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2290 
2291 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2292 static void gen_pause(DisasContext *ctx)
2293 {
2294     TCGv_i32 t0 = tcg_constant_i32(0);
2295     tcg_gen_st_i32(t0, cpu_env,
2296                    -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2297 
2298     /* Stop translation, this gives other CPUs a chance to run */
2299     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2300 }
2301 #endif /* defined(TARGET_PPC64) */
2302 
2303 /* or & or. */
2304 static void gen_or(DisasContext *ctx)
2305 {
2306     int rs, ra, rb;
2307 
2308     rs = rS(ctx->opcode);
2309     ra = rA(ctx->opcode);
2310     rb = rB(ctx->opcode);
2311     /* Optimisation for mr. ri case */
2312     if (rs != ra || rs != rb) {
2313         if (rs != rb) {
2314             tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2315         } else {
2316             tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2317         }
2318         if (unlikely(Rc(ctx->opcode) != 0)) {
2319             gen_set_Rc0(ctx, cpu_gpr[ra]);
2320         }
2321     } else if (unlikely(Rc(ctx->opcode) != 0)) {
2322         gen_set_Rc0(ctx, cpu_gpr[rs]);
2323 #if defined(TARGET_PPC64)
2324     } else if (rs != 0) { /* 0 is nop */
2325         int prio = 0;
2326 
2327         switch (rs) {
2328         case 1:
2329             /* Set process priority to low */
2330             prio = 2;
2331             break;
2332         case 6:
2333             /* Set process priority to medium-low */
2334             prio = 3;
2335             break;
2336         case 2:
2337             /* Set process priority to normal */
2338             prio = 4;
2339             break;
2340 #if !defined(CONFIG_USER_ONLY)
2341         case 31:
2342             if (!ctx->pr) {
2343                 /* Set process priority to very low */
2344                 prio = 1;
2345             }
2346             break;
2347         case 5:
2348             if (!ctx->pr) {
2349                 /* Set process priority to medium-hight */
2350                 prio = 5;
2351             }
2352             break;
2353         case 3:
2354             if (!ctx->pr) {
2355                 /* Set process priority to high */
2356                 prio = 6;
2357             }
2358             break;
2359         case 7:
2360             if (ctx->hv && !ctx->pr) {
2361                 /* Set process priority to very high */
2362                 prio = 7;
2363             }
2364             break;
2365 #endif
2366         default:
2367             break;
2368         }
2369         if (prio) {
2370             TCGv t0 = tcg_temp_new();
2371             gen_load_spr(t0, SPR_PPR);
2372             tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2373             tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2374             gen_store_spr(SPR_PPR, t0);
2375         }
2376 #if !defined(CONFIG_USER_ONLY)
2377         /*
2378          * Pause out of TCG otherwise spin loops with smt_low eat too
2379          * much CPU and the kernel hangs.  This applies to all
2380          * encodings other than no-op, e.g., miso(rs=26), yield(27),
2381          * mdoio(29), mdoom(30), and all currently undefined.
2382          */
2383         gen_pause(ctx);
2384 #endif
2385 #endif
2386     }
2387 }
2388 /* orc & orc. */
2389 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2390 
2391 /* xor & xor. */
2392 static void gen_xor(DisasContext *ctx)
2393 {
2394     /* Optimisation for "set to zero" case */
2395     if (rS(ctx->opcode) != rB(ctx->opcode)) {
2396         tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2397                        cpu_gpr[rB(ctx->opcode)]);
2398     } else {
2399         tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2400     }
2401     if (unlikely(Rc(ctx->opcode) != 0)) {
2402         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2403     }
2404 }
2405 
2406 /* ori */
2407 static void gen_ori(DisasContext *ctx)
2408 {
2409     target_ulong uimm = UIMM(ctx->opcode);
2410 
2411     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2412         return;
2413     }
2414     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2415 }
2416 
2417 /* oris */
2418 static void gen_oris(DisasContext *ctx)
2419 {
2420     target_ulong uimm = UIMM(ctx->opcode);
2421 
2422     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2423         /* NOP */
2424         return;
2425     }
2426     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2427                    uimm << 16);
2428 }
2429 
2430 /* xori */
2431 static void gen_xori(DisasContext *ctx)
2432 {
2433     target_ulong uimm = UIMM(ctx->opcode);
2434 
2435     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2436         /* NOP */
2437         return;
2438     }
2439     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2440 }
2441 
2442 /* xoris */
2443 static void gen_xoris(DisasContext *ctx)
2444 {
2445     target_ulong uimm = UIMM(ctx->opcode);
2446 
2447     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2448         /* NOP */
2449         return;
2450     }
2451     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2452                     uimm << 16);
2453 }
2454 
2455 /* popcntb : PowerPC 2.03 specification */
2456 static void gen_popcntb(DisasContext *ctx)
2457 {
2458     gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2459 }
2460 
2461 static void gen_popcntw(DisasContext *ctx)
2462 {
2463 #if defined(TARGET_PPC64)
2464     gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2465 #else
2466     tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2467 #endif
2468 }
2469 
2470 #if defined(TARGET_PPC64)
2471 /* popcntd: PowerPC 2.06 specification */
2472 static void gen_popcntd(DisasContext *ctx)
2473 {
2474     tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2475 }
2476 #endif
2477 
2478 /* prtyw: PowerPC 2.05 specification */
2479 static void gen_prtyw(DisasContext *ctx)
2480 {
2481     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2482     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2483     TCGv t0 = tcg_temp_new();
2484     tcg_gen_shri_tl(t0, rs, 16);
2485     tcg_gen_xor_tl(ra, rs, t0);
2486     tcg_gen_shri_tl(t0, ra, 8);
2487     tcg_gen_xor_tl(ra, ra, t0);
2488     tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2489 }
2490 
2491 #if defined(TARGET_PPC64)
2492 /* prtyd: PowerPC 2.05 specification */
2493 static void gen_prtyd(DisasContext *ctx)
2494 {
2495     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2496     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2497     TCGv t0 = tcg_temp_new();
2498     tcg_gen_shri_tl(t0, rs, 32);
2499     tcg_gen_xor_tl(ra, rs, t0);
2500     tcg_gen_shri_tl(t0, ra, 16);
2501     tcg_gen_xor_tl(ra, ra, t0);
2502     tcg_gen_shri_tl(t0, ra, 8);
2503     tcg_gen_xor_tl(ra, ra, t0);
2504     tcg_gen_andi_tl(ra, ra, 1);
2505 }
2506 #endif
2507 
2508 #if defined(TARGET_PPC64)
2509 /* bpermd */
2510 static void gen_bpermd(DisasContext *ctx)
2511 {
2512     gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2513                       cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2514 }
2515 #endif
2516 
2517 #if defined(TARGET_PPC64)
2518 /* extsw & extsw. */
2519 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2520 
2521 /* cntlzd */
2522 static void gen_cntlzd(DisasContext *ctx)
2523 {
2524     tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2525     if (unlikely(Rc(ctx->opcode) != 0)) {
2526         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2527     }
2528 }
2529 
2530 /* cnttzd */
2531 static void gen_cnttzd(DisasContext *ctx)
2532 {
2533     tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2534     if (unlikely(Rc(ctx->opcode) != 0)) {
2535         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2536     }
2537 }
2538 
2539 /* darn */
2540 static void gen_darn(DisasContext *ctx)
2541 {
2542     int l = L(ctx->opcode);
2543 
2544     if (l > 2) {
2545         tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2546     } else {
2547         translator_io_start(&ctx->base);
2548         if (l == 0) {
2549             gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2550         } else {
2551             /* Return 64-bit random for both CRN and RRN */
2552             gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2553         }
2554     }
2555 }
2556 #endif
2557 
2558 /***                             Integer rotate                            ***/
2559 
2560 /* rlwimi & rlwimi. */
2561 static void gen_rlwimi(DisasContext *ctx)
2562 {
2563     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2564     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2565     uint32_t sh = SH(ctx->opcode);
2566     uint32_t mb = MB(ctx->opcode);
2567     uint32_t me = ME(ctx->opcode);
2568 
2569     if (sh == (31 - me) && mb <= me) {
2570         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2571     } else {
2572         target_ulong mask;
2573         bool mask_in_32b = true;
2574         TCGv t1;
2575 
2576 #if defined(TARGET_PPC64)
2577         mb += 32;
2578         me += 32;
2579 #endif
2580         mask = MASK(mb, me);
2581 
2582 #if defined(TARGET_PPC64)
2583         if (mask > 0xffffffffu) {
2584             mask_in_32b = false;
2585         }
2586 #endif
2587         t1 = tcg_temp_new();
2588         if (mask_in_32b) {
2589             TCGv_i32 t0 = tcg_temp_new_i32();
2590             tcg_gen_trunc_tl_i32(t0, t_rs);
2591             tcg_gen_rotli_i32(t0, t0, sh);
2592             tcg_gen_extu_i32_tl(t1, t0);
2593         } else {
2594 #if defined(TARGET_PPC64)
2595             tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2596             tcg_gen_rotli_i64(t1, t1, sh);
2597 #else
2598             g_assert_not_reached();
2599 #endif
2600         }
2601 
2602         tcg_gen_andi_tl(t1, t1, mask);
2603         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2604         tcg_gen_or_tl(t_ra, t_ra, t1);
2605     }
2606     if (unlikely(Rc(ctx->opcode) != 0)) {
2607         gen_set_Rc0(ctx, t_ra);
2608     }
2609 }
2610 
2611 /* rlwinm & rlwinm. */
2612 static void gen_rlwinm(DisasContext *ctx)
2613 {
2614     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2615     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2616     int sh = SH(ctx->opcode);
2617     int mb = MB(ctx->opcode);
2618     int me = ME(ctx->opcode);
2619     int len = me - mb + 1;
2620     int rsh = (32 - sh) & 31;
2621 
2622     if (sh != 0 && len > 0 && me == (31 - sh)) {
2623         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2624     } else if (me == 31 && rsh + len <= 32) {
2625         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2626     } else {
2627         target_ulong mask;
2628         bool mask_in_32b = true;
2629 #if defined(TARGET_PPC64)
2630         mb += 32;
2631         me += 32;
2632 #endif
2633         mask = MASK(mb, me);
2634 #if defined(TARGET_PPC64)
2635         if (mask > 0xffffffffu) {
2636             mask_in_32b = false;
2637         }
2638 #endif
2639         if (mask_in_32b) {
2640             if (sh == 0) {
2641                 tcg_gen_andi_tl(t_ra, t_rs, mask);
2642             } else {
2643                 TCGv_i32 t0 = tcg_temp_new_i32();
2644                 tcg_gen_trunc_tl_i32(t0, t_rs);
2645                 tcg_gen_rotli_i32(t0, t0, sh);
2646                 tcg_gen_andi_i32(t0, t0, mask);
2647                 tcg_gen_extu_i32_tl(t_ra, t0);
2648             }
2649         } else {
2650 #if defined(TARGET_PPC64)
2651             tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2652             tcg_gen_rotli_i64(t_ra, t_ra, sh);
2653             tcg_gen_andi_i64(t_ra, t_ra, mask);
2654 #else
2655             g_assert_not_reached();
2656 #endif
2657         }
2658     }
2659     if (unlikely(Rc(ctx->opcode) != 0)) {
2660         gen_set_Rc0(ctx, t_ra);
2661     }
2662 }
2663 
2664 /* rlwnm & rlwnm. */
2665 static void gen_rlwnm(DisasContext *ctx)
2666 {
2667     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2668     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2669     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2670     uint32_t mb = MB(ctx->opcode);
2671     uint32_t me = ME(ctx->opcode);
2672     target_ulong mask;
2673     bool mask_in_32b = true;
2674 
2675 #if defined(TARGET_PPC64)
2676     mb += 32;
2677     me += 32;
2678 #endif
2679     mask = MASK(mb, me);
2680 
2681 #if defined(TARGET_PPC64)
2682     if (mask > 0xffffffffu) {
2683         mask_in_32b = false;
2684     }
2685 #endif
2686     if (mask_in_32b) {
2687         TCGv_i32 t0 = tcg_temp_new_i32();
2688         TCGv_i32 t1 = tcg_temp_new_i32();
2689         tcg_gen_trunc_tl_i32(t0, t_rb);
2690         tcg_gen_trunc_tl_i32(t1, t_rs);
2691         tcg_gen_andi_i32(t0, t0, 0x1f);
2692         tcg_gen_rotl_i32(t1, t1, t0);
2693         tcg_gen_extu_i32_tl(t_ra, t1);
2694     } else {
2695 #if defined(TARGET_PPC64)
2696         TCGv_i64 t0 = tcg_temp_new_i64();
2697         tcg_gen_andi_i64(t0, t_rb, 0x1f);
2698         tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2699         tcg_gen_rotl_i64(t_ra, t_ra, t0);
2700 #else
2701         g_assert_not_reached();
2702 #endif
2703     }
2704 
2705     tcg_gen_andi_tl(t_ra, t_ra, mask);
2706 
2707     if (unlikely(Rc(ctx->opcode) != 0)) {
2708         gen_set_Rc0(ctx, t_ra);
2709     }
2710 }
2711 
2712 #if defined(TARGET_PPC64)
2713 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
2714 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2715 {                                                                             \
2716     gen_##name(ctx, 0);                                                       \
2717 }                                                                             \
2718                                                                               \
2719 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2720 {                                                                             \
2721     gen_##name(ctx, 1);                                                       \
2722 }
2723 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
2724 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2725 {                                                                             \
2726     gen_##name(ctx, 0, 0);                                                    \
2727 }                                                                             \
2728                                                                               \
2729 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2730 {                                                                             \
2731     gen_##name(ctx, 0, 1);                                                    \
2732 }                                                                             \
2733                                                                               \
2734 static void glue(gen_, name##2)(DisasContext *ctx)                            \
2735 {                                                                             \
2736     gen_##name(ctx, 1, 0);                                                    \
2737 }                                                                             \
2738                                                                               \
2739 static void glue(gen_, name##3)(DisasContext *ctx)                            \
2740 {                                                                             \
2741     gen_##name(ctx, 1, 1);                                                    \
2742 }
2743 
2744 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2745 {
2746     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2747     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2748     int len = me - mb + 1;
2749     int rsh = (64 - sh) & 63;
2750 
2751     if (sh != 0 && len > 0 && me == (63 - sh)) {
2752         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2753     } else if (me == 63 && rsh + len <= 64) {
2754         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2755     } else {
2756         tcg_gen_rotli_tl(t_ra, t_rs, sh);
2757         tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2758     }
2759     if (unlikely(Rc(ctx->opcode) != 0)) {
2760         gen_set_Rc0(ctx, t_ra);
2761     }
2762 }
2763 
2764 /* rldicl - rldicl. */
2765 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2766 {
2767     uint32_t sh, mb;
2768 
2769     sh = SH(ctx->opcode) | (shn << 5);
2770     mb = MB(ctx->opcode) | (mbn << 5);
2771     gen_rldinm(ctx, mb, 63, sh);
2772 }
2773 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2774 
2775 /* rldicr - rldicr. */
2776 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2777 {
2778     uint32_t sh, me;
2779 
2780     sh = SH(ctx->opcode) | (shn << 5);
2781     me = MB(ctx->opcode) | (men << 5);
2782     gen_rldinm(ctx, 0, me, sh);
2783 }
2784 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2785 
2786 /* rldic - rldic. */
2787 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2788 {
2789     uint32_t sh, mb;
2790 
2791     sh = SH(ctx->opcode) | (shn << 5);
2792     mb = MB(ctx->opcode) | (mbn << 5);
2793     gen_rldinm(ctx, mb, 63 - sh, sh);
2794 }
2795 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2796 
2797 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2798 {
2799     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2800     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2801     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2802     TCGv t0;
2803 
2804     t0 = tcg_temp_new();
2805     tcg_gen_andi_tl(t0, t_rb, 0x3f);
2806     tcg_gen_rotl_tl(t_ra, t_rs, t0);
2807 
2808     tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2809     if (unlikely(Rc(ctx->opcode) != 0)) {
2810         gen_set_Rc0(ctx, t_ra);
2811     }
2812 }
2813 
2814 /* rldcl - rldcl. */
2815 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2816 {
2817     uint32_t mb;
2818 
2819     mb = MB(ctx->opcode) | (mbn << 5);
2820     gen_rldnm(ctx, mb, 63);
2821 }
2822 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2823 
2824 /* rldcr - rldcr. */
2825 static inline void gen_rldcr(DisasContext *ctx, int men)
2826 {
2827     uint32_t me;
2828 
2829     me = MB(ctx->opcode) | (men << 5);
2830     gen_rldnm(ctx, 0, me);
2831 }
2832 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2833 
2834 /* rldimi - rldimi. */
2835 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2836 {
2837     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2838     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2839     uint32_t sh = SH(ctx->opcode) | (shn << 5);
2840     uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2841     uint32_t me = 63 - sh;
2842 
2843     if (mb <= me) {
2844         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2845     } else {
2846         target_ulong mask = MASK(mb, me);
2847         TCGv t1 = tcg_temp_new();
2848 
2849         tcg_gen_rotli_tl(t1, t_rs, sh);
2850         tcg_gen_andi_tl(t1, t1, mask);
2851         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2852         tcg_gen_or_tl(t_ra, t_ra, t1);
2853     }
2854     if (unlikely(Rc(ctx->opcode) != 0)) {
2855         gen_set_Rc0(ctx, t_ra);
2856     }
2857 }
2858 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2859 #endif
2860 
2861 /***                             Integer shift                             ***/
2862 
2863 /* slw & slw. */
2864 static void gen_slw(DisasContext *ctx)
2865 {
2866     TCGv t0, t1;
2867 
2868     t0 = tcg_temp_new();
2869     /* AND rS with a mask that is 0 when rB >= 0x20 */
2870 #if defined(TARGET_PPC64)
2871     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2872     tcg_gen_sari_tl(t0, t0, 0x3f);
2873 #else
2874     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2875     tcg_gen_sari_tl(t0, t0, 0x1f);
2876 #endif
2877     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2878     t1 = tcg_temp_new();
2879     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2880     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2881     tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2882     if (unlikely(Rc(ctx->opcode) != 0)) {
2883         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2884     }
2885 }
2886 
2887 /* sraw & sraw. */
2888 static void gen_sraw(DisasContext *ctx)
2889 {
2890     gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2891                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2892     if (unlikely(Rc(ctx->opcode) != 0)) {
2893         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2894     }
2895 }
2896 
2897 /* srawi & srawi. */
2898 static void gen_srawi(DisasContext *ctx)
2899 {
2900     int sh = SH(ctx->opcode);
2901     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2902     TCGv src = cpu_gpr[rS(ctx->opcode)];
2903     if (sh == 0) {
2904         tcg_gen_ext32s_tl(dst, src);
2905         tcg_gen_movi_tl(cpu_ca, 0);
2906         if (is_isa300(ctx)) {
2907             tcg_gen_movi_tl(cpu_ca32, 0);
2908         }
2909     } else {
2910         TCGv t0;
2911         tcg_gen_ext32s_tl(dst, src);
2912         tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2913         t0 = tcg_temp_new();
2914         tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2915         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2916         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2917         if (is_isa300(ctx)) {
2918             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2919         }
2920         tcg_gen_sari_tl(dst, dst, sh);
2921     }
2922     if (unlikely(Rc(ctx->opcode) != 0)) {
2923         gen_set_Rc0(ctx, dst);
2924     }
2925 }
2926 
2927 /* srw & srw. */
2928 static void gen_srw(DisasContext *ctx)
2929 {
2930     TCGv t0, t1;
2931 
2932     t0 = tcg_temp_new();
2933     /* AND rS with a mask that is 0 when rB >= 0x20 */
2934 #if defined(TARGET_PPC64)
2935     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2936     tcg_gen_sari_tl(t0, t0, 0x3f);
2937 #else
2938     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2939     tcg_gen_sari_tl(t0, t0, 0x1f);
2940 #endif
2941     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2942     tcg_gen_ext32u_tl(t0, t0);
2943     t1 = tcg_temp_new();
2944     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2945     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2946     if (unlikely(Rc(ctx->opcode) != 0)) {
2947         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2948     }
2949 }
2950 
2951 #if defined(TARGET_PPC64)
2952 /* sld & sld. */
2953 static void gen_sld(DisasContext *ctx)
2954 {
2955     TCGv t0, t1;
2956 
2957     t0 = tcg_temp_new();
2958     /* AND rS with a mask that is 0 when rB >= 0x40 */
2959     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2960     tcg_gen_sari_tl(t0, t0, 0x3f);
2961     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2962     t1 = tcg_temp_new();
2963     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2964     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2965     if (unlikely(Rc(ctx->opcode) != 0)) {
2966         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2967     }
2968 }
2969 
2970 /* srad & srad. */
2971 static void gen_srad(DisasContext *ctx)
2972 {
2973     gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2974                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2975     if (unlikely(Rc(ctx->opcode) != 0)) {
2976         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2977     }
2978 }
2979 /* sradi & sradi. */
2980 static inline void gen_sradi(DisasContext *ctx, int n)
2981 {
2982     int sh = SH(ctx->opcode) + (n << 5);
2983     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2984     TCGv src = cpu_gpr[rS(ctx->opcode)];
2985     if (sh == 0) {
2986         tcg_gen_mov_tl(dst, src);
2987         tcg_gen_movi_tl(cpu_ca, 0);
2988         if (is_isa300(ctx)) {
2989             tcg_gen_movi_tl(cpu_ca32, 0);
2990         }
2991     } else {
2992         TCGv t0;
2993         tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
2994         t0 = tcg_temp_new();
2995         tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
2996         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2997         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2998         if (is_isa300(ctx)) {
2999             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
3000         }
3001         tcg_gen_sari_tl(dst, src, sh);
3002     }
3003     if (unlikely(Rc(ctx->opcode) != 0)) {
3004         gen_set_Rc0(ctx, dst);
3005     }
3006 }
3007 
3008 static void gen_sradi0(DisasContext *ctx)
3009 {
3010     gen_sradi(ctx, 0);
3011 }
3012 
3013 static void gen_sradi1(DisasContext *ctx)
3014 {
3015     gen_sradi(ctx, 1);
3016 }
3017 
3018 /* extswsli & extswsli. */
3019 static inline void gen_extswsli(DisasContext *ctx, int n)
3020 {
3021     int sh = SH(ctx->opcode) + (n << 5);
3022     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3023     TCGv src = cpu_gpr[rS(ctx->opcode)];
3024 
3025     tcg_gen_ext32s_tl(dst, src);
3026     tcg_gen_shli_tl(dst, dst, sh);
3027     if (unlikely(Rc(ctx->opcode) != 0)) {
3028         gen_set_Rc0(ctx, dst);
3029     }
3030 }
3031 
3032 static void gen_extswsli0(DisasContext *ctx)
3033 {
3034     gen_extswsli(ctx, 0);
3035 }
3036 
3037 static void gen_extswsli1(DisasContext *ctx)
3038 {
3039     gen_extswsli(ctx, 1);
3040 }
3041 
3042 /* srd & srd. */
3043 static void gen_srd(DisasContext *ctx)
3044 {
3045     TCGv t0, t1;
3046 
3047     t0 = tcg_temp_new();
3048     /* AND rS with a mask that is 0 when rB >= 0x40 */
3049     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3050     tcg_gen_sari_tl(t0, t0, 0x3f);
3051     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3052     t1 = tcg_temp_new();
3053     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3054     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3055     if (unlikely(Rc(ctx->opcode) != 0)) {
3056         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3057     }
3058 }
3059 #endif
3060 
3061 /***                           Addressing modes                            ***/
3062 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3063 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3064                                       target_long maskl)
3065 {
3066     target_long simm = SIMM(ctx->opcode);
3067 
3068     simm &= ~maskl;
3069     if (rA(ctx->opcode) == 0) {
3070         if (NARROW_MODE(ctx)) {
3071             simm = (uint32_t)simm;
3072         }
3073         tcg_gen_movi_tl(EA, simm);
3074     } else if (likely(simm != 0)) {
3075         tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3076         if (NARROW_MODE(ctx)) {
3077             tcg_gen_ext32u_tl(EA, EA);
3078         }
3079     } else {
3080         if (NARROW_MODE(ctx)) {
3081             tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3082         } else {
3083             tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3084         }
3085     }
3086 }
3087 
3088 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3089 {
3090     if (rA(ctx->opcode) == 0) {
3091         if (NARROW_MODE(ctx)) {
3092             tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3093         } else {
3094             tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3095         }
3096     } else {
3097         tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3098         if (NARROW_MODE(ctx)) {
3099             tcg_gen_ext32u_tl(EA, EA);
3100         }
3101     }
3102 }
3103 
3104 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3105 {
3106     if (rA(ctx->opcode) == 0) {
3107         tcg_gen_movi_tl(EA, 0);
3108     } else if (NARROW_MODE(ctx)) {
3109         tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3110     } else {
3111         tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3112     }
3113 }
3114 
3115 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3116                                 target_long val)
3117 {
3118     tcg_gen_addi_tl(ret, arg1, val);
3119     if (NARROW_MODE(ctx)) {
3120         tcg_gen_ext32u_tl(ret, ret);
3121     }
3122 }
3123 
3124 static inline void gen_align_no_le(DisasContext *ctx)
3125 {
3126     gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3127                       (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3128 }
3129 
3130 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3131 {
3132     TCGv ea = tcg_temp_new();
3133     if (ra) {
3134         tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3135     } else {
3136         tcg_gen_mov_tl(ea, displ);
3137     }
3138     if (NARROW_MODE(ctx)) {
3139         tcg_gen_ext32u_tl(ea, ea);
3140     }
3141     return ea;
3142 }
3143 
3144 /***                             Integer load                              ***/
3145 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3146 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3147 
3148 #define GEN_QEMU_LOAD_TL(ldop, op)                                      \
3149 static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
3150                                   TCGv val,                             \
3151                                   TCGv addr)                            \
3152 {                                                                       \
3153     tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
3154 }
3155 
3156 GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
3157 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3158 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3159 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3160 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3161 
3162 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3163 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3164 
3165 #define GEN_QEMU_LOAD_64(ldop, op)                                  \
3166 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
3167                                              TCGv_i64 val,          \
3168                                              TCGv addr)             \
3169 {                                                                   \
3170     tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
3171 }
3172 
3173 GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
3174 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3175 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3176 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3177 GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_UQ))
3178 
3179 #if defined(TARGET_PPC64)
3180 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3181 #endif
3182 
3183 #define GEN_QEMU_STORE_TL(stop, op)                                     \
3184 static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
3185                                   TCGv val,                             \
3186                                   TCGv addr)                            \
3187 {                                                                       \
3188     tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
3189 }
3190 
3191 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3192 GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
3193 #endif
3194 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3195 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3196 
3197 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3198 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3199 
3200 #define GEN_QEMU_STORE_64(stop, op)                               \
3201 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
3202                                               TCGv_i64 val,       \
3203                                               TCGv addr)          \
3204 {                                                                 \
3205     tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
3206 }
3207 
3208 GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
3209 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3210 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3211 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3212 
3213 #if defined(TARGET_PPC64)
3214 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3215 #endif
3216 
3217 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
3218 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3219 {                                                                             \
3220     TCGv EA;                                                                  \
3221     chk(ctx);                                                                 \
3222     gen_set_access_type(ctx, ACCESS_INT);                                     \
3223     EA = tcg_temp_new();                                                      \
3224     gen_addr_reg_index(ctx, EA);                                              \
3225     gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
3226 }
3227 
3228 #define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
3229     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3230 
3231 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
3232     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3233 
3234 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
3235 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3236 {                                                                             \
3237     TCGv EA;                                                                  \
3238     CHK_SV(ctx);                                                              \
3239     gen_set_access_type(ctx, ACCESS_INT);                                     \
3240     EA = tcg_temp_new();                                                      \
3241     gen_addr_reg_index(ctx, EA);                                              \
3242     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3243 }
3244 
3245 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3246 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3247 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3248 #if defined(TARGET_PPC64)
3249 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3250 #endif
3251 
3252 #if defined(TARGET_PPC64)
3253 /* CI load/store variants */
3254 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3255 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3256 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3257 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3258 #endif
3259 
3260 /***                              Integer store                            ***/
3261 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
3262 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3263 {                                                                             \
3264     TCGv EA;                                                                  \
3265     chk(ctx);                                                                 \
3266     gen_set_access_type(ctx, ACCESS_INT);                                     \
3267     EA = tcg_temp_new();                                                      \
3268     gen_addr_reg_index(ctx, EA);                                              \
3269     gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
3270 }
3271 #define GEN_STX(name, stop, opc2, opc3, type)                                 \
3272     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3273 
3274 #define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
3275     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3276 
3277 #define GEN_STEPX(name, stop, opc2, opc3)                                     \
3278 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3279 {                                                                             \
3280     TCGv EA;                                                                  \
3281     CHK_SV(ctx);                                                              \
3282     gen_set_access_type(ctx, ACCESS_INT);                                     \
3283     EA = tcg_temp_new();                                                      \
3284     gen_addr_reg_index(ctx, EA);                                              \
3285     tcg_gen_qemu_st_tl(                                                       \
3286         cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
3287 }
3288 
3289 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3290 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3291 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3292 #if defined(TARGET_PPC64)
3293 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3294 #endif
3295 
3296 #if defined(TARGET_PPC64)
3297 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3298 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3299 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3300 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3301 #endif
3302 /***                Integer load and store with byte reverse               ***/
3303 
3304 /* lhbrx */
3305 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3306 
3307 /* lwbrx */
3308 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3309 
3310 #if defined(TARGET_PPC64)
3311 /* ldbrx */
3312 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3313 /* stdbrx */
3314 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3315 #endif  /* TARGET_PPC64 */
3316 
3317 /* sthbrx */
3318 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3319 /* stwbrx */
3320 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3321 
3322 /***                    Integer load and store multiple                    ***/
3323 
3324 /* lmw */
3325 static void gen_lmw(DisasContext *ctx)
3326 {
3327     TCGv t0;
3328     TCGv_i32 t1;
3329 
3330     if (ctx->le_mode) {
3331         gen_align_no_le(ctx);
3332         return;
3333     }
3334     gen_set_access_type(ctx, ACCESS_INT);
3335     t0 = tcg_temp_new();
3336     t1 = tcg_constant_i32(rD(ctx->opcode));
3337     gen_addr_imm_index(ctx, t0, 0);
3338     gen_helper_lmw(cpu_env, t0, t1);
3339 }
3340 
3341 /* stmw */
3342 static void gen_stmw(DisasContext *ctx)
3343 {
3344     TCGv t0;
3345     TCGv_i32 t1;
3346 
3347     if (ctx->le_mode) {
3348         gen_align_no_le(ctx);
3349         return;
3350     }
3351     gen_set_access_type(ctx, ACCESS_INT);
3352     t0 = tcg_temp_new();
3353     t1 = tcg_constant_i32(rS(ctx->opcode));
3354     gen_addr_imm_index(ctx, t0, 0);
3355     gen_helper_stmw(cpu_env, t0, t1);
3356 }
3357 
3358 /***                    Integer load and store strings                     ***/
3359 
3360 /* lswi */
3361 /*
3362  * PowerPC32 specification says we must generate an exception if rA is
3363  * in the range of registers to be loaded.  In an other hand, IBM says
3364  * this is valid, but rA won't be loaded.  For now, I'll follow the
3365  * spec...
3366  */
3367 static void gen_lswi(DisasContext *ctx)
3368 {
3369     TCGv t0;
3370     TCGv_i32 t1, t2;
3371     int nb = NB(ctx->opcode);
3372     int start = rD(ctx->opcode);
3373     int ra = rA(ctx->opcode);
3374     int nr;
3375 
3376     if (ctx->le_mode) {
3377         gen_align_no_le(ctx);
3378         return;
3379     }
3380     if (nb == 0) {
3381         nb = 32;
3382     }
3383     nr = DIV_ROUND_UP(nb, 4);
3384     if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3385         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3386         return;
3387     }
3388     gen_set_access_type(ctx, ACCESS_INT);
3389     t0 = tcg_temp_new();
3390     gen_addr_register(ctx, t0);
3391     t1 = tcg_constant_i32(nb);
3392     t2 = tcg_constant_i32(start);
3393     gen_helper_lsw(cpu_env, t0, t1, t2);
3394 }
3395 
3396 /* lswx */
3397 static void gen_lswx(DisasContext *ctx)
3398 {
3399     TCGv t0;
3400     TCGv_i32 t1, t2, t3;
3401 
3402     if (ctx->le_mode) {
3403         gen_align_no_le(ctx);
3404         return;
3405     }
3406     gen_set_access_type(ctx, ACCESS_INT);
3407     t0 = tcg_temp_new();
3408     gen_addr_reg_index(ctx, t0);
3409     t1 = tcg_constant_i32(rD(ctx->opcode));
3410     t2 = tcg_constant_i32(rA(ctx->opcode));
3411     t3 = tcg_constant_i32(rB(ctx->opcode));
3412     gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3413 }
3414 
3415 /* stswi */
3416 static void gen_stswi(DisasContext *ctx)
3417 {
3418     TCGv t0;
3419     TCGv_i32 t1, t2;
3420     int nb = NB(ctx->opcode);
3421 
3422     if (ctx->le_mode) {
3423         gen_align_no_le(ctx);
3424         return;
3425     }
3426     gen_set_access_type(ctx, ACCESS_INT);
3427     t0 = tcg_temp_new();
3428     gen_addr_register(ctx, t0);
3429     if (nb == 0) {
3430         nb = 32;
3431     }
3432     t1 = tcg_constant_i32(nb);
3433     t2 = tcg_constant_i32(rS(ctx->opcode));
3434     gen_helper_stsw(cpu_env, t0, t1, t2);
3435 }
3436 
3437 /* stswx */
3438 static void gen_stswx(DisasContext *ctx)
3439 {
3440     TCGv t0;
3441     TCGv_i32 t1, t2;
3442 
3443     if (ctx->le_mode) {
3444         gen_align_no_le(ctx);
3445         return;
3446     }
3447     gen_set_access_type(ctx, ACCESS_INT);
3448     t0 = tcg_temp_new();
3449     gen_addr_reg_index(ctx, t0);
3450     t1 = tcg_temp_new_i32();
3451     tcg_gen_trunc_tl_i32(t1, cpu_xer);
3452     tcg_gen_andi_i32(t1, t1, 0x7F);
3453     t2 = tcg_constant_i32(rS(ctx->opcode));
3454     gen_helper_stsw(cpu_env, t0, t1, t2);
3455 }
3456 
3457 /***                        Memory synchronisation                         ***/
3458 /* eieio */
3459 static void gen_eieio(DisasContext *ctx)
3460 {
3461     TCGBar bar = TCG_MO_ALL;
3462 
3463     /*
3464      * eieio has complex semanitcs. It provides memory ordering between
3465      * operations in the set:
3466      * - loads from CI memory.
3467      * - stores to CI memory.
3468      * - stores to WT memory.
3469      *
3470      * It separately also orders memory for operations in the set:
3471      * - stores to cacheble memory.
3472      *
3473      * It also serializes instructions:
3474      * - dcbt and dcbst.
3475      *
3476      * It separately serializes:
3477      * - tlbie and tlbsync.
3478      *
3479      * And separately serializes:
3480      * - slbieg, slbiag, and slbsync.
3481      *
3482      * The end result is that CI memory ordering requires TCG_MO_ALL
3483      * and it is not possible to special-case more relaxed ordering for
3484      * cacheable accesses. TCG_BAR_SC is required to provide this
3485      * serialization.
3486      */
3487 
3488     /*
3489      * POWER9 has a eieio instruction variant using bit 6 as a hint to
3490      * tell the CPU it is a store-forwarding barrier.
3491      */
3492     if (ctx->opcode & 0x2000000) {
3493         /*
3494          * ISA says that "Reserved fields in instructions are ignored
3495          * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3496          * as this is not an instruction software should be using,
3497          * complain to the user.
3498          */
3499         if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3500             qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3501                           TARGET_FMT_lx "\n", ctx->cia);
3502         } else {
3503             bar = TCG_MO_ST_LD;
3504         }
3505     }
3506 
3507     tcg_gen_mb(bar | TCG_BAR_SC);
3508 }
3509 
3510 #if !defined(CONFIG_USER_ONLY)
3511 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3512 {
3513     TCGv_i32 t;
3514     TCGLabel *l;
3515 
3516     if (!ctx->lazy_tlb_flush) {
3517         return;
3518     }
3519     l = gen_new_label();
3520     t = tcg_temp_new_i32();
3521     tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3522     tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3523     if (global) {
3524         gen_helper_check_tlb_flush_global(cpu_env);
3525     } else {
3526         gen_helper_check_tlb_flush_local(cpu_env);
3527     }
3528     gen_set_label(l);
3529 }
3530 #else
3531 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3532 #endif
3533 
3534 /* isync */
3535 static void gen_isync(DisasContext *ctx)
3536 {
3537     /*
3538      * We need to check for a pending TLB flush. This can only happen in
3539      * kernel mode however so check MSR_PR
3540      */
3541     if (!ctx->pr) {
3542         gen_check_tlb_flush(ctx, false);
3543     }
3544     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3545     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3546 }
3547 
3548 #define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3549 
3550 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3551 {
3552     TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3553     TCGv t0 = tcg_temp_new();
3554 
3555     gen_set_access_type(ctx, ACCESS_RES);
3556     gen_addr_reg_index(ctx, t0);
3557     tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3558     tcg_gen_mov_tl(cpu_reserve, t0);
3559     tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
3560     tcg_gen_mov_tl(cpu_reserve_val, gpr);
3561 }
3562 
3563 #define LARX(name, memop)                  \
3564 static void gen_##name(DisasContext *ctx)  \
3565 {                                          \
3566     gen_load_locked(ctx, memop);           \
3567 }
3568 
3569 /* lwarx */
3570 LARX(lbarx, DEF_MEMOP(MO_UB))
3571 LARX(lharx, DEF_MEMOP(MO_UW))
3572 LARX(lwarx, DEF_MEMOP(MO_UL))
3573 
3574 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3575                                       TCGv EA, TCGCond cond, int addend)
3576 {
3577     TCGv t = tcg_temp_new();
3578     TCGv t2 = tcg_temp_new();
3579     TCGv u = tcg_temp_new();
3580 
3581     tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3582     tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3583     tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3584     tcg_gen_addi_tl(u, t, addend);
3585 
3586     /* E.g. for fetch and increment bounded... */
3587     /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3588     tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3589     tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3590 
3591     /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3592     tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3593     tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3594 }
3595 
3596 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3597 {
3598     uint32_t gpr_FC = FC(ctx->opcode);
3599     TCGv EA = tcg_temp_new();
3600     int rt = rD(ctx->opcode);
3601     bool need_serial;
3602     TCGv src, dst;
3603 
3604     gen_addr_register(ctx, EA);
3605     dst = cpu_gpr[rt];
3606     src = cpu_gpr[(rt + 1) & 31];
3607 
3608     need_serial = false;
3609     memop |= MO_ALIGN;
3610     switch (gpr_FC) {
3611     case 0: /* Fetch and add */
3612         tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3613         break;
3614     case 1: /* Fetch and xor */
3615         tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3616         break;
3617     case 2: /* Fetch and or */
3618         tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3619         break;
3620     case 3: /* Fetch and 'and' */
3621         tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3622         break;
3623     case 4:  /* Fetch and max unsigned */
3624         tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3625         break;
3626     case 5:  /* Fetch and max signed */
3627         tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3628         break;
3629     case 6:  /* Fetch and min unsigned */
3630         tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3631         break;
3632     case 7:  /* Fetch and min signed */
3633         tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3634         break;
3635     case 8: /* Swap */
3636         tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3637         break;
3638 
3639     case 16: /* Compare and swap not equal */
3640         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3641             need_serial = true;
3642         } else {
3643             TCGv t0 = tcg_temp_new();
3644             TCGv t1 = tcg_temp_new();
3645 
3646             tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3647             if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3648                 tcg_gen_mov_tl(t1, src);
3649             } else {
3650                 tcg_gen_ext32u_tl(t1, src);
3651             }
3652             tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3653                                cpu_gpr[(rt + 2) & 31], t0);
3654             tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3655             tcg_gen_mov_tl(dst, t0);
3656         }
3657         break;
3658 
3659     case 24: /* Fetch and increment bounded */
3660         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3661             need_serial = true;
3662         } else {
3663             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3664         }
3665         break;
3666     case 25: /* Fetch and increment equal */
3667         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3668             need_serial = true;
3669         } else {
3670             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3671         }
3672         break;
3673     case 28: /* Fetch and decrement bounded */
3674         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3675             need_serial = true;
3676         } else {
3677             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3678         }
3679         break;
3680 
3681     default:
3682         /* invoke data storage error handler */
3683         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3684     }
3685 
3686     if (need_serial) {
3687         /* Restart with exclusive lock.  */
3688         gen_helper_exit_atomic(cpu_env);
3689         ctx->base.is_jmp = DISAS_NORETURN;
3690     }
3691 }
3692 
3693 static void gen_lwat(DisasContext *ctx)
3694 {
3695     gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3696 }
3697 
3698 #ifdef TARGET_PPC64
3699 static void gen_ldat(DisasContext *ctx)
3700 {
3701     gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3702 }
3703 #endif
3704 
3705 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3706 {
3707     uint32_t gpr_FC = FC(ctx->opcode);
3708     TCGv EA = tcg_temp_new();
3709     TCGv src, discard;
3710 
3711     gen_addr_register(ctx, EA);
3712     src = cpu_gpr[rD(ctx->opcode)];
3713     discard = tcg_temp_new();
3714 
3715     memop |= MO_ALIGN;
3716     switch (gpr_FC) {
3717     case 0: /* add and Store */
3718         tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3719         break;
3720     case 1: /* xor and Store */
3721         tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3722         break;
3723     case 2: /* Or and Store */
3724         tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3725         break;
3726     case 3: /* 'and' and Store */
3727         tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3728         break;
3729     case 4:  /* Store max unsigned */
3730         tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3731         break;
3732     case 5:  /* Store max signed */
3733         tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3734         break;
3735     case 6:  /* Store min unsigned */
3736         tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3737         break;
3738     case 7:  /* Store min signed */
3739         tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3740         break;
3741     case 24: /* Store twin  */
3742         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3743             /* Restart with exclusive lock.  */
3744             gen_helper_exit_atomic(cpu_env);
3745             ctx->base.is_jmp = DISAS_NORETURN;
3746         } else {
3747             TCGv t = tcg_temp_new();
3748             TCGv t2 = tcg_temp_new();
3749             TCGv s = tcg_temp_new();
3750             TCGv s2 = tcg_temp_new();
3751             TCGv ea_plus_s = tcg_temp_new();
3752 
3753             tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3754             tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3755             tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3756             tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3757             tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3758             tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3759             tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3760         }
3761         break;
3762     default:
3763         /* invoke data storage error handler */
3764         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3765     }
3766 }
3767 
3768 static void gen_stwat(DisasContext *ctx)
3769 {
3770     gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3771 }
3772 
3773 #ifdef TARGET_PPC64
3774 static void gen_stdat(DisasContext *ctx)
3775 {
3776     gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3777 }
3778 #endif
3779 
3780 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3781 {
3782     TCGLabel *lfail;
3783     TCGv EA;
3784     TCGv cr0;
3785     TCGv t0;
3786     int rs = rS(ctx->opcode);
3787 
3788     lfail = gen_new_label();
3789     EA = tcg_temp_new();
3790     cr0 = tcg_temp_new();
3791     t0 = tcg_temp_new();
3792 
3793     tcg_gen_mov_tl(cr0, cpu_so);
3794     gen_set_access_type(ctx, ACCESS_RES);
3795     gen_addr_reg_index(ctx, EA);
3796     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3797     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3798 
3799     tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3800                               cpu_gpr[rs], ctx->mem_idx,
3801                               DEF_MEMOP(memop) | MO_ALIGN);
3802     tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3803     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3804     tcg_gen_or_tl(cr0, cr0, t0);
3805 
3806     gen_set_label(lfail);
3807     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3808     tcg_gen_movi_tl(cpu_reserve, -1);
3809 }
3810 
3811 #define STCX(name, memop)                  \
3812 static void gen_##name(DisasContext *ctx)  \
3813 {                                          \
3814     gen_conditional_store(ctx, memop);     \
3815 }
3816 
3817 STCX(stbcx_, DEF_MEMOP(MO_UB))
3818 STCX(sthcx_, DEF_MEMOP(MO_UW))
3819 STCX(stwcx_, DEF_MEMOP(MO_UL))
3820 
3821 #if defined(TARGET_PPC64)
3822 /* ldarx */
3823 LARX(ldarx, DEF_MEMOP(MO_UQ))
3824 /* stdcx. */
3825 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3826 
3827 /* lqarx */
3828 static void gen_lqarx(DisasContext *ctx)
3829 {
3830     int rd = rD(ctx->opcode);
3831     TCGv EA, hi, lo;
3832     TCGv_i128 t16;
3833 
3834     if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3835                  (rd == rB(ctx->opcode)))) {
3836         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3837         return;
3838     }
3839 
3840     gen_set_access_type(ctx, ACCESS_RES);
3841     EA = tcg_temp_new();
3842     gen_addr_reg_index(ctx, EA);
3843 
3844     /* Note that the low part is always in RD+1, even in LE mode.  */
3845     lo = cpu_gpr[rd + 1];
3846     hi = cpu_gpr[rd];
3847 
3848     t16 = tcg_temp_new_i128();
3849     tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3850     tcg_gen_extr_i128_i64(lo, hi, t16);
3851 
3852     tcg_gen_mov_tl(cpu_reserve, EA);
3853     tcg_gen_movi_tl(cpu_reserve_length, 16);
3854     tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3855     tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3856 }
3857 
3858 /* stqcx. */
3859 static void gen_stqcx_(DisasContext *ctx)
3860 {
3861     TCGLabel *lfail;
3862     TCGv EA, t0, t1;
3863     TCGv cr0;
3864     TCGv_i128 cmp, val;
3865     int rs = rS(ctx->opcode);
3866 
3867     if (unlikely(rs & 1)) {
3868         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3869         return;
3870     }
3871 
3872     lfail = gen_new_label();
3873     EA = tcg_temp_new();
3874     cr0 = tcg_temp_new();
3875 
3876     tcg_gen_mov_tl(cr0, cpu_so);
3877     gen_set_access_type(ctx, ACCESS_RES);
3878     gen_addr_reg_index(ctx, EA);
3879     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3880     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3881 
3882     cmp = tcg_temp_new_i128();
3883     val = tcg_temp_new_i128();
3884 
3885     tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3886 
3887     /* Note that the low part is always in RS+1, even in LE mode.  */
3888     tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3889 
3890     tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3891                                 DEF_MEMOP(MO_128 | MO_ALIGN));
3892 
3893     t0 = tcg_temp_new();
3894     t1 = tcg_temp_new();
3895     tcg_gen_extr_i128_i64(t1, t0, val);
3896 
3897     tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3898     tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3899     tcg_gen_or_tl(t0, t0, t1);
3900 
3901     tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3902     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3903     tcg_gen_or_tl(cr0, cr0, t0);
3904 
3905     gen_set_label(lfail);
3906     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3907     tcg_gen_movi_tl(cpu_reserve, -1);
3908 }
3909 #endif /* defined(TARGET_PPC64) */
3910 
3911 /* sync */
3912 static void gen_sync(DisasContext *ctx)
3913 {
3914     TCGBar bar = TCG_MO_ALL;
3915     uint32_t l = (ctx->opcode >> 21) & 3;
3916 
3917     if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3918         bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3919     }
3920 
3921     /*
3922      * We may need to check for a pending TLB flush.
3923      *
3924      * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3925      *
3926      * Additionally, this can only happen in kernel mode however so
3927      * check MSR_PR as well.
3928      */
3929     if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3930         gen_check_tlb_flush(ctx, true);
3931     }
3932 
3933     tcg_gen_mb(bar | TCG_BAR_SC);
3934 }
3935 
3936 /* wait */
3937 static void gen_wait(DisasContext *ctx)
3938 {
3939     uint32_t wc;
3940 
3941     if (ctx->insns_flags & PPC_WAIT) {
3942         /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3943 
3944         if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3945             /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3946             wc = WC(ctx->opcode);
3947         } else {
3948             wc = 0;
3949         }
3950 
3951     } else if (ctx->insns_flags2 & PPC2_ISA300) {
3952         /* v3.0 defines a new 'wait' encoding. */
3953         wc = WC(ctx->opcode);
3954         if (ctx->insns_flags2 & PPC2_ISA310) {
3955             uint32_t pl = PL(ctx->opcode);
3956 
3957             /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3958             if (wc == 3) {
3959                 gen_invalid(ctx);
3960                 return;
3961             }
3962 
3963             /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3964             if (pl > 0 && wc != 2) {
3965                 gen_invalid(ctx);
3966                 return;
3967             }
3968 
3969         } else { /* ISA300 */
3970             /* WC 1-3 are reserved */
3971             if (wc > 0) {
3972                 gen_invalid(ctx);
3973                 return;
3974             }
3975         }
3976 
3977     } else {
3978         warn_report("wait instruction decoded with wrong ISA flags.");
3979         gen_invalid(ctx);
3980         return;
3981     }
3982 
3983     /*
3984      * wait without WC field or with WC=0 waits for an exception / interrupt
3985      * to occur.
3986      */
3987     if (wc == 0) {
3988         TCGv_i32 t0 = tcg_constant_i32(1);
3989         tcg_gen_st_i32(t0, cpu_env,
3990                        -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
3991         /* Stop translation, as the CPU is supposed to sleep from now */
3992         gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
3993     }
3994 
3995     /*
3996      * Other wait types must not just wait until an exception occurs because
3997      * ignoring their other wake-up conditions could cause a hang.
3998      *
3999      * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
4000      * no-ops.
4001      *
4002      * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
4003      *
4004      * wc=2 waits for an implementation-specific condition, such could be
4005      * always true, so it can be implemented as a no-op.
4006      *
4007      * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
4008      *
4009      * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
4010      * Reservation-loss may have implementation-specific conditions, so it
4011      * can be implemented as a no-op.
4012      *
4013      * wc=2 waits for an exception or an amount of time to pass. This
4014      * amount is implementation-specific so it can be implemented as a
4015      * no-op.
4016      *
4017      * ISA v3.1 allows for execution to resume "in the rare case of
4018      * an implementation-dependent event", so in any case software must
4019      * not depend on the architected resumption condition to become
4020      * true, so no-op implementations should be architecturally correct
4021      * (if suboptimal).
4022      */
4023 }
4024 
4025 #if defined(TARGET_PPC64)
4026 static void gen_doze(DisasContext *ctx)
4027 {
4028 #if defined(CONFIG_USER_ONLY)
4029     GEN_PRIV(ctx);
4030 #else
4031     TCGv_i32 t;
4032 
4033     CHK_HV(ctx);
4034     translator_io_start(&ctx->base);
4035     t = tcg_constant_i32(PPC_PM_DOZE);
4036     gen_helper_pminsn(cpu_env, t);
4037     /* Stop translation, as the CPU is supposed to sleep from now */
4038     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4039 #endif /* defined(CONFIG_USER_ONLY) */
4040 }
4041 
4042 static void gen_nap(DisasContext *ctx)
4043 {
4044 #if defined(CONFIG_USER_ONLY)
4045     GEN_PRIV(ctx);
4046 #else
4047     TCGv_i32 t;
4048 
4049     CHK_HV(ctx);
4050     translator_io_start(&ctx->base);
4051     t = tcg_constant_i32(PPC_PM_NAP);
4052     gen_helper_pminsn(cpu_env, t);
4053     /* Stop translation, as the CPU is supposed to sleep from now */
4054     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4055 #endif /* defined(CONFIG_USER_ONLY) */
4056 }
4057 
4058 static void gen_stop(DisasContext *ctx)
4059 {
4060 #if defined(CONFIG_USER_ONLY)
4061     GEN_PRIV(ctx);
4062 #else
4063     TCGv_i32 t;
4064 
4065     CHK_HV(ctx);
4066     translator_io_start(&ctx->base);
4067     t = tcg_constant_i32(PPC_PM_STOP);
4068     gen_helper_pminsn(cpu_env, t);
4069     /* Stop translation, as the CPU is supposed to sleep from now */
4070     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4071 #endif /* defined(CONFIG_USER_ONLY) */
4072 }
4073 
4074 static void gen_sleep(DisasContext *ctx)
4075 {
4076 #if defined(CONFIG_USER_ONLY)
4077     GEN_PRIV(ctx);
4078 #else
4079     TCGv_i32 t;
4080 
4081     CHK_HV(ctx);
4082     translator_io_start(&ctx->base);
4083     t = tcg_constant_i32(PPC_PM_SLEEP);
4084     gen_helper_pminsn(cpu_env, t);
4085     /* Stop translation, as the CPU is supposed to sleep from now */
4086     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4087 #endif /* defined(CONFIG_USER_ONLY) */
4088 }
4089 
4090 static void gen_rvwinkle(DisasContext *ctx)
4091 {
4092 #if defined(CONFIG_USER_ONLY)
4093     GEN_PRIV(ctx);
4094 #else
4095     TCGv_i32 t;
4096 
4097     CHK_HV(ctx);
4098     translator_io_start(&ctx->base);
4099     t = tcg_constant_i32(PPC_PM_RVWINKLE);
4100     gen_helper_pminsn(cpu_env, t);
4101     /* Stop translation, as the CPU is supposed to sleep from now */
4102     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4103 #endif /* defined(CONFIG_USER_ONLY) */
4104 }
4105 #endif /* #if defined(TARGET_PPC64) */
4106 
4107 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4108 {
4109 #if defined(TARGET_PPC64)
4110     if (ctx->has_cfar) {
4111         tcg_gen_movi_tl(cpu_cfar, nip);
4112     }
4113 #endif
4114 }
4115 
4116 #if defined(TARGET_PPC64)
4117 static void pmu_count_insns(DisasContext *ctx)
4118 {
4119     /*
4120      * Do not bother calling the helper if the PMU isn't counting
4121      * instructions.
4122      */
4123     if (!ctx->pmu_insn_cnt) {
4124         return;
4125     }
4126 
4127  #if !defined(CONFIG_USER_ONLY)
4128     TCGLabel *l;
4129     TCGv t0;
4130 
4131     /*
4132      * The PMU insns_inc() helper stops the internal PMU timer if a
4133      * counter overflows happens. In that case, if the guest is
4134      * running with icount and we do not handle it beforehand,
4135      * the helper can trigger a 'bad icount read'.
4136      */
4137     translator_io_start(&ctx->base);
4138 
4139     /* Avoid helper calls when only PMC5-6 are enabled. */
4140     if (!ctx->pmc_other) {
4141         l = gen_new_label();
4142         t0 = tcg_temp_new();
4143 
4144         gen_load_spr(t0, SPR_POWER_PMC5);
4145         tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4146         gen_store_spr(SPR_POWER_PMC5, t0);
4147         /* Check for overflow, if it's enabled */
4148         if (ctx->mmcr0_pmcjce) {
4149             tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4150             gen_helper_handle_pmc5_overflow(cpu_env);
4151         }
4152 
4153         gen_set_label(l);
4154     } else {
4155         gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4156     }
4157   #else
4158     /*
4159      * User mode can read (but not write) PMC5 and start/stop
4160      * the PMU via MMCR0_FC. In this case just increment
4161      * PMC5 with base.num_insns.
4162      */
4163     TCGv t0 = tcg_temp_new();
4164 
4165     gen_load_spr(t0, SPR_POWER_PMC5);
4166     tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4167     gen_store_spr(SPR_POWER_PMC5, t0);
4168   #endif /* #if !defined(CONFIG_USER_ONLY) */
4169 }
4170 #else
4171 static void pmu_count_insns(DisasContext *ctx)
4172 {
4173     return;
4174 }
4175 #endif /* #if defined(TARGET_PPC64) */
4176 
4177 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4178 {
4179     if (unlikely(ctx->singlestep_enabled)) {
4180         return false;
4181     }
4182     return translator_use_goto_tb(&ctx->base, dest);
4183 }
4184 
4185 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4186 {
4187     if (unlikely(ctx->singlestep_enabled)) {
4188         gen_debug_exception(ctx, false);
4189     } else {
4190         /*
4191          * tcg_gen_lookup_and_goto_ptr will exit the TB if
4192          * CF_NO_GOTO_PTR is set. Count insns now.
4193          */
4194         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4195             pmu_count_insns(ctx);
4196         }
4197 
4198         tcg_gen_lookup_and_goto_ptr();
4199     }
4200 }
4201 
4202 /***                                Branch                                 ***/
4203 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4204 {
4205     if (NARROW_MODE(ctx)) {
4206         dest = (uint32_t) dest;
4207     }
4208     if (use_goto_tb(ctx, dest)) {
4209         pmu_count_insns(ctx);
4210         tcg_gen_goto_tb(n);
4211         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4212         tcg_gen_exit_tb(ctx->base.tb, n);
4213     } else {
4214         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4215         gen_lookup_and_goto_ptr(ctx);
4216     }
4217 }
4218 
4219 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4220 {
4221     if (NARROW_MODE(ctx)) {
4222         nip = (uint32_t)nip;
4223     }
4224     tcg_gen_movi_tl(cpu_lr, nip);
4225 }
4226 
4227 /* b ba bl bla */
4228 static void gen_b(DisasContext *ctx)
4229 {
4230     target_ulong li, target;
4231 
4232     /* sign extend LI */
4233     li = LI(ctx->opcode);
4234     li = (li ^ 0x02000000) - 0x02000000;
4235     if (likely(AA(ctx->opcode) == 0)) {
4236         target = ctx->cia + li;
4237     } else {
4238         target = li;
4239     }
4240     if (LK(ctx->opcode)) {
4241         gen_setlr(ctx, ctx->base.pc_next);
4242     }
4243     gen_update_cfar(ctx, ctx->cia);
4244     gen_goto_tb(ctx, 0, target);
4245     ctx->base.is_jmp = DISAS_NORETURN;
4246 }
4247 
4248 #define BCOND_IM  0
4249 #define BCOND_LR  1
4250 #define BCOND_CTR 2
4251 #define BCOND_TAR 3
4252 
4253 static void gen_bcond(DisasContext *ctx, int type)
4254 {
4255     uint32_t bo = BO(ctx->opcode);
4256     TCGLabel *l1;
4257     TCGv target;
4258 
4259     if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4260         target = tcg_temp_new();
4261         if (type == BCOND_CTR) {
4262             tcg_gen_mov_tl(target, cpu_ctr);
4263         } else if (type == BCOND_TAR) {
4264             gen_load_spr(target, SPR_TAR);
4265         } else {
4266             tcg_gen_mov_tl(target, cpu_lr);
4267         }
4268     } else {
4269         target = NULL;
4270     }
4271     if (LK(ctx->opcode)) {
4272         gen_setlr(ctx, ctx->base.pc_next);
4273     }
4274     l1 = gen_new_label();
4275     if ((bo & 0x4) == 0) {
4276         /* Decrement and test CTR */
4277         TCGv temp = tcg_temp_new();
4278 
4279         if (type == BCOND_CTR) {
4280             /*
4281              * All ISAs up to v3 describe this form of bcctr as invalid but
4282              * some processors, ie. 64-bit server processors compliant with
4283              * arch 2.x, do implement a "test and decrement" logic instead,
4284              * as described in their respective UMs. This logic involves CTR
4285              * to act as both the branch target and a counter, which makes
4286              * it basically useless and thus never used in real code.
4287              *
4288              * This form was hence chosen to trigger extra micro-architectural
4289              * side-effect on real HW needed for the Spectre v2 workaround.
4290              * It is up to guests that implement such workaround, ie. linux, to
4291              * use this form in a way it just triggers the side-effect without
4292              * doing anything else harmful.
4293              */
4294             if (unlikely(!is_book3s_arch2x(ctx))) {
4295                 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4296                 return;
4297             }
4298 
4299             if (NARROW_MODE(ctx)) {
4300                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4301             } else {
4302                 tcg_gen_mov_tl(temp, cpu_ctr);
4303             }
4304             if (bo & 0x2) {
4305                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4306             } else {
4307                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4308             }
4309             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4310         } else {
4311             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4312             if (NARROW_MODE(ctx)) {
4313                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4314             } else {
4315                 tcg_gen_mov_tl(temp, cpu_ctr);
4316             }
4317             if (bo & 0x2) {
4318                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4319             } else {
4320                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4321             }
4322         }
4323     }
4324     if ((bo & 0x10) == 0) {
4325         /* Test CR */
4326         uint32_t bi = BI(ctx->opcode);
4327         uint32_t mask = 0x08 >> (bi & 0x03);
4328         TCGv_i32 temp = tcg_temp_new_i32();
4329 
4330         if (bo & 0x8) {
4331             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4332             tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4333         } else {
4334             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4335             tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4336         }
4337     }
4338     gen_update_cfar(ctx, ctx->cia);
4339     if (type == BCOND_IM) {
4340         target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4341         if (likely(AA(ctx->opcode) == 0)) {
4342             gen_goto_tb(ctx, 0, ctx->cia + li);
4343         } else {
4344             gen_goto_tb(ctx, 0, li);
4345         }
4346     } else {
4347         if (NARROW_MODE(ctx)) {
4348             tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4349         } else {
4350             tcg_gen_andi_tl(cpu_nip, target, ~3);
4351         }
4352         gen_lookup_and_goto_ptr(ctx);
4353     }
4354     if ((bo & 0x14) != 0x14) {
4355         /* fallthrough case */
4356         gen_set_label(l1);
4357         gen_goto_tb(ctx, 1, ctx->base.pc_next);
4358     }
4359     ctx->base.is_jmp = DISAS_NORETURN;
4360 }
4361 
4362 static void gen_bc(DisasContext *ctx)
4363 {
4364     gen_bcond(ctx, BCOND_IM);
4365 }
4366 
4367 static void gen_bcctr(DisasContext *ctx)
4368 {
4369     gen_bcond(ctx, BCOND_CTR);
4370 }
4371 
4372 static void gen_bclr(DisasContext *ctx)
4373 {
4374     gen_bcond(ctx, BCOND_LR);
4375 }
4376 
4377 static void gen_bctar(DisasContext *ctx)
4378 {
4379     gen_bcond(ctx, BCOND_TAR);
4380 }
4381 
4382 /***                      Condition register logical                       ***/
4383 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
4384 static void glue(gen_, name)(DisasContext *ctx)                               \
4385 {                                                                             \
4386     uint8_t bitmask;                                                          \
4387     int sh;                                                                   \
4388     TCGv_i32 t0, t1;                                                          \
4389     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
4390     t0 = tcg_temp_new_i32();                                                  \
4391     if (sh > 0)                                                               \
4392         tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
4393     else if (sh < 0)                                                          \
4394         tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
4395     else                                                                      \
4396         tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
4397     t1 = tcg_temp_new_i32();                                                  \
4398     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
4399     if (sh > 0)                                                               \
4400         tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
4401     else if (sh < 0)                                                          \
4402         tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
4403     else                                                                      \
4404         tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
4405     tcg_op(t0, t0, t1);                                                       \
4406     bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
4407     tcg_gen_andi_i32(t0, t0, bitmask);                                        \
4408     tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
4409     tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
4410 }
4411 
4412 /* crand */
4413 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4414 /* crandc */
4415 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4416 /* creqv */
4417 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4418 /* crnand */
4419 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4420 /* crnor */
4421 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4422 /* cror */
4423 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4424 /* crorc */
4425 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4426 /* crxor */
4427 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4428 
4429 /* mcrf */
4430 static void gen_mcrf(DisasContext *ctx)
4431 {
4432     tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4433 }
4434 
4435 /***                           System linkage                              ***/
4436 
4437 /* rfi (supervisor only) */
4438 static void gen_rfi(DisasContext *ctx)
4439 {
4440 #if defined(CONFIG_USER_ONLY)
4441     GEN_PRIV(ctx);
4442 #else
4443     /*
4444      * This instruction doesn't exist anymore on 64-bit server
4445      * processors compliant with arch 2.x
4446      */
4447     if (is_book3s_arch2x(ctx)) {
4448         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4449         return;
4450     }
4451     /* Restore CPU state */
4452     CHK_SV(ctx);
4453     translator_io_start(&ctx->base);
4454     gen_update_cfar(ctx, ctx->cia);
4455     gen_helper_rfi(cpu_env);
4456     ctx->base.is_jmp = DISAS_EXIT;
4457 #endif
4458 }
4459 
4460 #if defined(TARGET_PPC64)
4461 static void gen_rfid(DisasContext *ctx)
4462 {
4463 #if defined(CONFIG_USER_ONLY)
4464     GEN_PRIV(ctx);
4465 #else
4466     /* Restore CPU state */
4467     CHK_SV(ctx);
4468     translator_io_start(&ctx->base);
4469     gen_update_cfar(ctx, ctx->cia);
4470     gen_helper_rfid(cpu_env);
4471     ctx->base.is_jmp = DISAS_EXIT;
4472 #endif
4473 }
4474 
4475 #if !defined(CONFIG_USER_ONLY)
4476 static void gen_rfscv(DisasContext *ctx)
4477 {
4478 #if defined(CONFIG_USER_ONLY)
4479     GEN_PRIV(ctx);
4480 #else
4481     /* Restore CPU state */
4482     CHK_SV(ctx);
4483     translator_io_start(&ctx->base);
4484     gen_update_cfar(ctx, ctx->cia);
4485     gen_helper_rfscv(cpu_env);
4486     ctx->base.is_jmp = DISAS_EXIT;
4487 #endif
4488 }
4489 #endif
4490 
4491 static void gen_hrfid(DisasContext *ctx)
4492 {
4493 #if defined(CONFIG_USER_ONLY)
4494     GEN_PRIV(ctx);
4495 #else
4496     /* Restore CPU state */
4497     CHK_HV(ctx);
4498     translator_io_start(&ctx->base);
4499     gen_helper_hrfid(cpu_env);
4500     ctx->base.is_jmp = DISAS_EXIT;
4501 #endif
4502 }
4503 #endif
4504 
4505 /* sc */
4506 #if defined(CONFIG_USER_ONLY)
4507 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4508 #else
4509 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4510 #endif
4511 static void gen_sc(DisasContext *ctx)
4512 {
4513     uint32_t lev;
4514 
4515     /*
4516      * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
4517      * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
4518      * for Ultravisor which TCG does not support, so just ignore the top 6.
4519      */
4520     lev = (ctx->opcode >> 5) & 0x1;
4521     gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4522 }
4523 
4524 #if defined(TARGET_PPC64)
4525 #if !defined(CONFIG_USER_ONLY)
4526 static void gen_scv(DisasContext *ctx)
4527 {
4528     uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4529 
4530     /* Set the PC back to the faulting instruction. */
4531     gen_update_nip(ctx, ctx->cia);
4532     gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4533 
4534     ctx->base.is_jmp = DISAS_NORETURN;
4535 }
4536 #endif
4537 #endif
4538 
4539 /***                                Trap                                   ***/
4540 
4541 /* Check for unconditional traps (always or never) */
4542 static bool check_unconditional_trap(DisasContext *ctx)
4543 {
4544     /* Trap never */
4545     if (TO(ctx->opcode) == 0) {
4546         return true;
4547     }
4548     /* Trap always */
4549     if (TO(ctx->opcode) == 31) {
4550         gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4551         return true;
4552     }
4553     return false;
4554 }
4555 
4556 /* tw */
4557 static void gen_tw(DisasContext *ctx)
4558 {
4559     TCGv_i32 t0;
4560 
4561     if (check_unconditional_trap(ctx)) {
4562         return;
4563     }
4564     t0 = tcg_constant_i32(TO(ctx->opcode));
4565     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4566                   t0);
4567 }
4568 
4569 /* twi */
4570 static void gen_twi(DisasContext *ctx)
4571 {
4572     TCGv t0;
4573     TCGv_i32 t1;
4574 
4575     if (check_unconditional_trap(ctx)) {
4576         return;
4577     }
4578     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4579     t1 = tcg_constant_i32(TO(ctx->opcode));
4580     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4581 }
4582 
4583 #if defined(TARGET_PPC64)
4584 /* td */
4585 static void gen_td(DisasContext *ctx)
4586 {
4587     TCGv_i32 t0;
4588 
4589     if (check_unconditional_trap(ctx)) {
4590         return;
4591     }
4592     t0 = tcg_constant_i32(TO(ctx->opcode));
4593     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4594                   t0);
4595 }
4596 
4597 /* tdi */
4598 static void gen_tdi(DisasContext *ctx)
4599 {
4600     TCGv t0;
4601     TCGv_i32 t1;
4602 
4603     if (check_unconditional_trap(ctx)) {
4604         return;
4605     }
4606     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4607     t1 = tcg_constant_i32(TO(ctx->opcode));
4608     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4609 }
4610 #endif
4611 
4612 /***                          Processor control                            ***/
4613 
4614 /* mcrxr */
4615 static void gen_mcrxr(DisasContext *ctx)
4616 {
4617     TCGv_i32 t0 = tcg_temp_new_i32();
4618     TCGv_i32 t1 = tcg_temp_new_i32();
4619     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4620 
4621     tcg_gen_trunc_tl_i32(t0, cpu_so);
4622     tcg_gen_trunc_tl_i32(t1, cpu_ov);
4623     tcg_gen_trunc_tl_i32(dst, cpu_ca);
4624     tcg_gen_shli_i32(t0, t0, 3);
4625     tcg_gen_shli_i32(t1, t1, 2);
4626     tcg_gen_shli_i32(dst, dst, 1);
4627     tcg_gen_or_i32(dst, dst, t0);
4628     tcg_gen_or_i32(dst, dst, t1);
4629 
4630     tcg_gen_movi_tl(cpu_so, 0);
4631     tcg_gen_movi_tl(cpu_ov, 0);
4632     tcg_gen_movi_tl(cpu_ca, 0);
4633 }
4634 
4635 #ifdef TARGET_PPC64
4636 /* mcrxrx */
4637 static void gen_mcrxrx(DisasContext *ctx)
4638 {
4639     TCGv t0 = tcg_temp_new();
4640     TCGv t1 = tcg_temp_new();
4641     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4642 
4643     /* copy OV and OV32 */
4644     tcg_gen_shli_tl(t0, cpu_ov, 1);
4645     tcg_gen_or_tl(t0, t0, cpu_ov32);
4646     tcg_gen_shli_tl(t0, t0, 2);
4647     /* copy CA and CA32 */
4648     tcg_gen_shli_tl(t1, cpu_ca, 1);
4649     tcg_gen_or_tl(t1, t1, cpu_ca32);
4650     tcg_gen_or_tl(t0, t0, t1);
4651     tcg_gen_trunc_tl_i32(dst, t0);
4652 }
4653 #endif
4654 
4655 /* mfcr mfocrf */
4656 static void gen_mfcr(DisasContext *ctx)
4657 {
4658     uint32_t crm, crn;
4659 
4660     if (likely(ctx->opcode & 0x00100000)) {
4661         crm = CRM(ctx->opcode);
4662         if (likely(crm && ((crm & (crm - 1)) == 0))) {
4663             crn = ctz32(crm);
4664             tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4665             tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4666                             cpu_gpr[rD(ctx->opcode)], crn * 4);
4667         }
4668     } else {
4669         TCGv_i32 t0 = tcg_temp_new_i32();
4670         tcg_gen_mov_i32(t0, cpu_crf[0]);
4671         tcg_gen_shli_i32(t0, t0, 4);
4672         tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4673         tcg_gen_shli_i32(t0, t0, 4);
4674         tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4675         tcg_gen_shli_i32(t0, t0, 4);
4676         tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4677         tcg_gen_shli_i32(t0, t0, 4);
4678         tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4679         tcg_gen_shli_i32(t0, t0, 4);
4680         tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4681         tcg_gen_shli_i32(t0, t0, 4);
4682         tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4683         tcg_gen_shli_i32(t0, t0, 4);
4684         tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4685         tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4686     }
4687 }
4688 
4689 /* mfmsr */
4690 static void gen_mfmsr(DisasContext *ctx)
4691 {
4692     CHK_SV(ctx);
4693     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4694 }
4695 
4696 /* mfspr */
4697 static inline void gen_op_mfspr(DisasContext *ctx)
4698 {
4699     void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4700     uint32_t sprn = SPR(ctx->opcode);
4701 
4702 #if defined(CONFIG_USER_ONLY)
4703     read_cb = ctx->spr_cb[sprn].uea_read;
4704 #else
4705     if (ctx->pr) {
4706         read_cb = ctx->spr_cb[sprn].uea_read;
4707     } else if (ctx->hv) {
4708         read_cb = ctx->spr_cb[sprn].hea_read;
4709     } else {
4710         read_cb = ctx->spr_cb[sprn].oea_read;
4711     }
4712 #endif
4713     if (likely(read_cb != NULL)) {
4714         if (likely(read_cb != SPR_NOACCESS)) {
4715             (*read_cb)(ctx, rD(ctx->opcode), sprn);
4716         } else {
4717             /* Privilege exception */
4718             /*
4719              * This is a hack to avoid warnings when running Linux:
4720              * this OS breaks the PowerPC virtualisation model,
4721              * allowing userland application to read the PVR
4722              */
4723             if (sprn != SPR_PVR) {
4724                 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4725                               "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4726                               ctx->cia);
4727             }
4728             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4729         }
4730     } else {
4731         /* ISA 2.07 defines these as no-ops */
4732         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4733             (sprn >= 808 && sprn <= 811)) {
4734             /* This is a nop */
4735             return;
4736         }
4737         /* Not defined */
4738         qemu_log_mask(LOG_GUEST_ERROR,
4739                       "Trying to read invalid spr %d (0x%03x) at "
4740                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4741 
4742         /*
4743          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4744          * generate a priv, a hv emu or a no-op
4745          */
4746         if (sprn & 0x10) {
4747             if (ctx->pr) {
4748                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4749             }
4750         } else {
4751             if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4752                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4753             }
4754         }
4755     }
4756 }
4757 
4758 static void gen_mfspr(DisasContext *ctx)
4759 {
4760     gen_op_mfspr(ctx);
4761 }
4762 
4763 /* mftb */
4764 static void gen_mftb(DisasContext *ctx)
4765 {
4766     gen_op_mfspr(ctx);
4767 }
4768 
4769 /* mtcrf mtocrf*/
4770 static void gen_mtcrf(DisasContext *ctx)
4771 {
4772     uint32_t crm, crn;
4773 
4774     crm = CRM(ctx->opcode);
4775     if (likely((ctx->opcode & 0x00100000))) {
4776         if (crm && ((crm & (crm - 1)) == 0)) {
4777             TCGv_i32 temp = tcg_temp_new_i32();
4778             crn = ctz32(crm);
4779             tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4780             tcg_gen_shri_i32(temp, temp, crn * 4);
4781             tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4782         }
4783     } else {
4784         TCGv_i32 temp = tcg_temp_new_i32();
4785         tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4786         for (crn = 0 ; crn < 8 ; crn++) {
4787             if (crm & (1 << crn)) {
4788                     tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4789                     tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4790             }
4791         }
4792     }
4793 }
4794 
4795 /* mtmsr */
4796 #if defined(TARGET_PPC64)
4797 static void gen_mtmsrd(DisasContext *ctx)
4798 {
4799     if (unlikely(!is_book3s_arch2x(ctx))) {
4800         gen_invalid(ctx);
4801         return;
4802     }
4803 
4804     CHK_SV(ctx);
4805 
4806 #if !defined(CONFIG_USER_ONLY)
4807     TCGv t0, t1;
4808     target_ulong mask;
4809 
4810     t0 = tcg_temp_new();
4811     t1 = tcg_temp_new();
4812 
4813     translator_io_start(&ctx->base);
4814 
4815     if (ctx->opcode & 0x00010000) {
4816         /* L=1 form only updates EE and RI */
4817         mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4818     } else {
4819         /* mtmsrd does not alter HV, S, ME, or LE */
4820         mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4821                  (1ULL << MSR_HV));
4822         /*
4823          * XXX: we need to update nip before the store if we enter
4824          *      power saving mode, we will exit the loop directly from
4825          *      ppc_store_msr
4826          */
4827         gen_update_nip(ctx, ctx->base.pc_next);
4828     }
4829 
4830     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4831     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4832     tcg_gen_or_tl(t0, t0, t1);
4833 
4834     gen_helper_store_msr(cpu_env, t0);
4835 
4836     /* Must stop the translation as machine state (may have) changed */
4837     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4838 #endif /* !defined(CONFIG_USER_ONLY) */
4839 }
4840 #endif /* defined(TARGET_PPC64) */
4841 
4842 static void gen_mtmsr(DisasContext *ctx)
4843 {
4844     CHK_SV(ctx);
4845 
4846 #if !defined(CONFIG_USER_ONLY)
4847     TCGv t0, t1;
4848     target_ulong mask = 0xFFFFFFFF;
4849 
4850     t0 = tcg_temp_new();
4851     t1 = tcg_temp_new();
4852 
4853     translator_io_start(&ctx->base);
4854     if (ctx->opcode & 0x00010000) {
4855         /* L=1 form only updates EE and RI */
4856         mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4857     } else {
4858         /* mtmsr does not alter S, ME, or LE */
4859         mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4860 
4861         /*
4862          * XXX: we need to update nip before the store if we enter
4863          *      power saving mode, we will exit the loop directly from
4864          *      ppc_store_msr
4865          */
4866         gen_update_nip(ctx, ctx->base.pc_next);
4867     }
4868 
4869     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4870     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4871     tcg_gen_or_tl(t0, t0, t1);
4872 
4873     gen_helper_store_msr(cpu_env, t0);
4874 
4875     /* Must stop the translation as machine state (may have) changed */
4876     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4877 #endif
4878 }
4879 
4880 /* mtspr */
4881 static void gen_mtspr(DisasContext *ctx)
4882 {
4883     void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4884     uint32_t sprn = SPR(ctx->opcode);
4885 
4886 #if defined(CONFIG_USER_ONLY)
4887     write_cb = ctx->spr_cb[sprn].uea_write;
4888 #else
4889     if (ctx->pr) {
4890         write_cb = ctx->spr_cb[sprn].uea_write;
4891     } else if (ctx->hv) {
4892         write_cb = ctx->spr_cb[sprn].hea_write;
4893     } else {
4894         write_cb = ctx->spr_cb[sprn].oea_write;
4895     }
4896 #endif
4897     if (likely(write_cb != NULL)) {
4898         if (likely(write_cb != SPR_NOACCESS)) {
4899             (*write_cb)(ctx, sprn, rS(ctx->opcode));
4900         } else {
4901             /* Privilege exception */
4902             qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4903                           "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4904                           ctx->cia);
4905             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4906         }
4907     } else {
4908         /* ISA 2.07 defines these as no-ops */
4909         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4910             (sprn >= 808 && sprn <= 811)) {
4911             /* This is a nop */
4912             return;
4913         }
4914 
4915         /* Not defined */
4916         qemu_log_mask(LOG_GUEST_ERROR,
4917                       "Trying to write invalid spr %d (0x%03x) at "
4918                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4919 
4920 
4921         /*
4922          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4923          * generate a priv, a hv emu or a no-op
4924          */
4925         if (sprn & 0x10) {
4926             if (ctx->pr) {
4927                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4928             }
4929         } else {
4930             if (ctx->pr || sprn == 0) {
4931                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4932             }
4933         }
4934     }
4935 }
4936 
4937 #if defined(TARGET_PPC64)
4938 /* setb */
4939 static void gen_setb(DisasContext *ctx)
4940 {
4941     TCGv_i32 t0 = tcg_temp_new_i32();
4942     TCGv_i32 t8 = tcg_constant_i32(8);
4943     TCGv_i32 tm1 = tcg_constant_i32(-1);
4944     int crf = crfS(ctx->opcode);
4945 
4946     tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4947     tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4948     tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4949 }
4950 #endif
4951 
4952 /***                         Cache management                              ***/
4953 
4954 /* dcbf */
4955 static void gen_dcbf(DisasContext *ctx)
4956 {
4957     /* XXX: specification says this is treated as a load by the MMU */
4958     TCGv t0;
4959     gen_set_access_type(ctx, ACCESS_CACHE);
4960     t0 = tcg_temp_new();
4961     gen_addr_reg_index(ctx, t0);
4962     gen_qemu_ld8u(ctx, t0, t0);
4963 }
4964 
4965 /* dcbfep (external PID dcbf) */
4966 static void gen_dcbfep(DisasContext *ctx)
4967 {
4968     /* XXX: specification says this is treated as a load by the MMU */
4969     TCGv t0;
4970     CHK_SV(ctx);
4971     gen_set_access_type(ctx, ACCESS_CACHE);
4972     t0 = tcg_temp_new();
4973     gen_addr_reg_index(ctx, t0);
4974     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4975 }
4976 
4977 /* dcbi (Supervisor only) */
4978 static void gen_dcbi(DisasContext *ctx)
4979 {
4980 #if defined(CONFIG_USER_ONLY)
4981     GEN_PRIV(ctx);
4982 #else
4983     TCGv EA, val;
4984 
4985     CHK_SV(ctx);
4986     EA = tcg_temp_new();
4987     gen_set_access_type(ctx, ACCESS_CACHE);
4988     gen_addr_reg_index(ctx, EA);
4989     val = tcg_temp_new();
4990     /* XXX: specification says this should be treated as a store by the MMU */
4991     gen_qemu_ld8u(ctx, val, EA);
4992     gen_qemu_st8(ctx, val, EA);
4993 #endif /* defined(CONFIG_USER_ONLY) */
4994 }
4995 
4996 /* dcdst */
4997 static void gen_dcbst(DisasContext *ctx)
4998 {
4999     /* XXX: specification say this is treated as a load by the MMU */
5000     TCGv t0;
5001     gen_set_access_type(ctx, ACCESS_CACHE);
5002     t0 = tcg_temp_new();
5003     gen_addr_reg_index(ctx, t0);
5004     gen_qemu_ld8u(ctx, t0, t0);
5005 }
5006 
5007 /* dcbstep (dcbstep External PID version) */
5008 static void gen_dcbstep(DisasContext *ctx)
5009 {
5010     /* XXX: specification say this is treated as a load by the MMU */
5011     TCGv t0;
5012     gen_set_access_type(ctx, ACCESS_CACHE);
5013     t0 = tcg_temp_new();
5014     gen_addr_reg_index(ctx, t0);
5015     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5016 }
5017 
5018 /* dcbt */
5019 static void gen_dcbt(DisasContext *ctx)
5020 {
5021     /*
5022      * interpreted as no-op
5023      * XXX: specification say this is treated as a load by the MMU but
5024      *      does not generate any exception
5025      */
5026 }
5027 
5028 /* dcbtep */
5029 static void gen_dcbtep(DisasContext *ctx)
5030 {
5031     /*
5032      * interpreted as no-op
5033      * XXX: specification say this is treated as a load by the MMU but
5034      *      does not generate any exception
5035      */
5036 }
5037 
5038 /* dcbtst */
5039 static void gen_dcbtst(DisasContext *ctx)
5040 {
5041     /*
5042      * interpreted as no-op
5043      * XXX: specification say this is treated as a load by the MMU but
5044      *      does not generate any exception
5045      */
5046 }
5047 
5048 /* dcbtstep */
5049 static void gen_dcbtstep(DisasContext *ctx)
5050 {
5051     /*
5052      * interpreted as no-op
5053      * XXX: specification say this is treated as a load by the MMU but
5054      *      does not generate any exception
5055      */
5056 }
5057 
5058 /* dcbtls */
5059 static void gen_dcbtls(DisasContext *ctx)
5060 {
5061     /* Always fails locking the cache */
5062     TCGv t0 = tcg_temp_new();
5063     gen_load_spr(t0, SPR_Exxx_L1CSR0);
5064     tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5065     gen_store_spr(SPR_Exxx_L1CSR0, t0);
5066 }
5067 
5068 /* dcblc */
5069 static void gen_dcblc(DisasContext *ctx)
5070 {
5071     /*
5072      * interpreted as no-op
5073      */
5074 }
5075 
5076 /* dcbz */
5077 static void gen_dcbz(DisasContext *ctx)
5078 {
5079     TCGv tcgv_addr;
5080     TCGv_i32 tcgv_op;
5081 
5082     gen_set_access_type(ctx, ACCESS_CACHE);
5083     tcgv_addr = tcg_temp_new();
5084     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5085     gen_addr_reg_index(ctx, tcgv_addr);
5086     gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5087 }
5088 
5089 /* dcbzep */
5090 static void gen_dcbzep(DisasContext *ctx)
5091 {
5092     TCGv tcgv_addr;
5093     TCGv_i32 tcgv_op;
5094 
5095     gen_set_access_type(ctx, ACCESS_CACHE);
5096     tcgv_addr = tcg_temp_new();
5097     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5098     gen_addr_reg_index(ctx, tcgv_addr);
5099     gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5100 }
5101 
5102 /* dst / dstt */
5103 static void gen_dst(DisasContext *ctx)
5104 {
5105     if (rA(ctx->opcode) == 0) {
5106         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5107     } else {
5108         /* interpreted as no-op */
5109     }
5110 }
5111 
5112 /* dstst /dststt */
5113 static void gen_dstst(DisasContext *ctx)
5114 {
5115     if (rA(ctx->opcode) == 0) {
5116         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5117     } else {
5118         /* interpreted as no-op */
5119     }
5120 
5121 }
5122 
5123 /* dss / dssall */
5124 static void gen_dss(DisasContext *ctx)
5125 {
5126     /* interpreted as no-op */
5127 }
5128 
5129 /* icbi */
5130 static void gen_icbi(DisasContext *ctx)
5131 {
5132     TCGv t0;
5133     gen_set_access_type(ctx, ACCESS_CACHE);
5134     t0 = tcg_temp_new();
5135     gen_addr_reg_index(ctx, t0);
5136     gen_helper_icbi(cpu_env, t0);
5137 }
5138 
5139 /* icbiep */
5140 static void gen_icbiep(DisasContext *ctx)
5141 {
5142     TCGv t0;
5143     gen_set_access_type(ctx, ACCESS_CACHE);
5144     t0 = tcg_temp_new();
5145     gen_addr_reg_index(ctx, t0);
5146     gen_helper_icbiep(cpu_env, t0);
5147 }
5148 
5149 /* Optional: */
5150 /* dcba */
5151 static void gen_dcba(DisasContext *ctx)
5152 {
5153     /*
5154      * interpreted as no-op
5155      * XXX: specification say this is treated as a store by the MMU
5156      *      but does not generate any exception
5157      */
5158 }
5159 
5160 /***                    Segment register manipulation                      ***/
5161 /* Supervisor only: */
5162 
5163 /* mfsr */
5164 static void gen_mfsr(DisasContext *ctx)
5165 {
5166 #if defined(CONFIG_USER_ONLY)
5167     GEN_PRIV(ctx);
5168 #else
5169     TCGv t0;
5170 
5171     CHK_SV(ctx);
5172     t0 = tcg_constant_tl(SR(ctx->opcode));
5173     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5174 #endif /* defined(CONFIG_USER_ONLY) */
5175 }
5176 
5177 /* mfsrin */
5178 static void gen_mfsrin(DisasContext *ctx)
5179 {
5180 #if defined(CONFIG_USER_ONLY)
5181     GEN_PRIV(ctx);
5182 #else
5183     TCGv t0;
5184 
5185     CHK_SV(ctx);
5186     t0 = tcg_temp_new();
5187     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5188     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5189 #endif /* defined(CONFIG_USER_ONLY) */
5190 }
5191 
5192 /* mtsr */
5193 static void gen_mtsr(DisasContext *ctx)
5194 {
5195 #if defined(CONFIG_USER_ONLY)
5196     GEN_PRIV(ctx);
5197 #else
5198     TCGv t0;
5199 
5200     CHK_SV(ctx);
5201     t0 = tcg_constant_tl(SR(ctx->opcode));
5202     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5203 #endif /* defined(CONFIG_USER_ONLY) */
5204 }
5205 
5206 /* mtsrin */
5207 static void gen_mtsrin(DisasContext *ctx)
5208 {
5209 #if defined(CONFIG_USER_ONLY)
5210     GEN_PRIV(ctx);
5211 #else
5212     TCGv t0;
5213     CHK_SV(ctx);
5214 
5215     t0 = tcg_temp_new();
5216     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5217     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5218 #endif /* defined(CONFIG_USER_ONLY) */
5219 }
5220 
5221 #if defined(TARGET_PPC64)
5222 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5223 
5224 /* mfsr */
5225 static void gen_mfsr_64b(DisasContext *ctx)
5226 {
5227 #if defined(CONFIG_USER_ONLY)
5228     GEN_PRIV(ctx);
5229 #else
5230     TCGv t0;
5231 
5232     CHK_SV(ctx);
5233     t0 = tcg_constant_tl(SR(ctx->opcode));
5234     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5235 #endif /* defined(CONFIG_USER_ONLY) */
5236 }
5237 
5238 /* mfsrin */
5239 static void gen_mfsrin_64b(DisasContext *ctx)
5240 {
5241 #if defined(CONFIG_USER_ONLY)
5242     GEN_PRIV(ctx);
5243 #else
5244     TCGv t0;
5245 
5246     CHK_SV(ctx);
5247     t0 = tcg_temp_new();
5248     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5249     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5250 #endif /* defined(CONFIG_USER_ONLY) */
5251 }
5252 
5253 /* mtsr */
5254 static void gen_mtsr_64b(DisasContext *ctx)
5255 {
5256 #if defined(CONFIG_USER_ONLY)
5257     GEN_PRIV(ctx);
5258 #else
5259     TCGv t0;
5260 
5261     CHK_SV(ctx);
5262     t0 = tcg_constant_tl(SR(ctx->opcode));
5263     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5264 #endif /* defined(CONFIG_USER_ONLY) */
5265 }
5266 
5267 /* mtsrin */
5268 static void gen_mtsrin_64b(DisasContext *ctx)
5269 {
5270 #if defined(CONFIG_USER_ONLY)
5271     GEN_PRIV(ctx);
5272 #else
5273     TCGv t0;
5274 
5275     CHK_SV(ctx);
5276     t0 = tcg_temp_new();
5277     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5278     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5279 #endif /* defined(CONFIG_USER_ONLY) */
5280 }
5281 
5282 #endif /* defined(TARGET_PPC64) */
5283 
5284 /***                      Lookaside buffer management                      ***/
5285 /* Optional & supervisor only: */
5286 
5287 /* tlbia */
5288 static void gen_tlbia(DisasContext *ctx)
5289 {
5290 #if defined(CONFIG_USER_ONLY)
5291     GEN_PRIV(ctx);
5292 #else
5293     CHK_HV(ctx);
5294 
5295     gen_helper_tlbia(cpu_env);
5296 #endif  /* defined(CONFIG_USER_ONLY) */
5297 }
5298 
5299 /* tlbsync */
5300 static void gen_tlbsync(DisasContext *ctx)
5301 {
5302 #if defined(CONFIG_USER_ONLY)
5303     GEN_PRIV(ctx);
5304 #else
5305 
5306     if (ctx->gtse) {
5307         CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5308     } else {
5309         CHK_HV(ctx); /* Else hypervisor privileged */
5310     }
5311 
5312     /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5313     if (ctx->insns_flags & PPC_BOOKE) {
5314         gen_check_tlb_flush(ctx, true);
5315     }
5316 #endif /* defined(CONFIG_USER_ONLY) */
5317 }
5318 
5319 /***                              External control                         ***/
5320 /* Optional: */
5321 
5322 /* eciwx */
5323 static void gen_eciwx(DisasContext *ctx)
5324 {
5325     TCGv t0;
5326     /* Should check EAR[E] ! */
5327     gen_set_access_type(ctx, ACCESS_EXT);
5328     t0 = tcg_temp_new();
5329     gen_addr_reg_index(ctx, t0);
5330     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5331                        DEF_MEMOP(MO_UL | MO_ALIGN));
5332 }
5333 
5334 /* ecowx */
5335 static void gen_ecowx(DisasContext *ctx)
5336 {
5337     TCGv t0;
5338     /* Should check EAR[E] ! */
5339     gen_set_access_type(ctx, ACCESS_EXT);
5340     t0 = tcg_temp_new();
5341     gen_addr_reg_index(ctx, t0);
5342     tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5343                        DEF_MEMOP(MO_UL | MO_ALIGN));
5344 }
5345 
5346 /* 602 - 603 - G2 TLB management */
5347 
5348 /* tlbld */
5349 static void gen_tlbld_6xx(DisasContext *ctx)
5350 {
5351 #if defined(CONFIG_USER_ONLY)
5352     GEN_PRIV(ctx);
5353 #else
5354     CHK_SV(ctx);
5355     gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5356 #endif /* defined(CONFIG_USER_ONLY) */
5357 }
5358 
5359 /* tlbli */
5360 static void gen_tlbli_6xx(DisasContext *ctx)
5361 {
5362 #if defined(CONFIG_USER_ONLY)
5363     GEN_PRIV(ctx);
5364 #else
5365     CHK_SV(ctx);
5366     gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5367 #endif /* defined(CONFIG_USER_ONLY) */
5368 }
5369 
5370 /* BookE specific instructions */
5371 
5372 /* XXX: not implemented on 440 ? */
5373 static void gen_mfapidi(DisasContext *ctx)
5374 {
5375     /* XXX: TODO */
5376     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5377 }
5378 
5379 /* XXX: not implemented on 440 ? */
5380 static void gen_tlbiva(DisasContext *ctx)
5381 {
5382 #if defined(CONFIG_USER_ONLY)
5383     GEN_PRIV(ctx);
5384 #else
5385     TCGv t0;
5386 
5387     CHK_SV(ctx);
5388     t0 = tcg_temp_new();
5389     gen_addr_reg_index(ctx, t0);
5390     gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5391 #endif /* defined(CONFIG_USER_ONLY) */
5392 }
5393 
5394 /* All 405 MAC instructions are translated here */
5395 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5396                                         int ra, int rb, int rt, int Rc)
5397 {
5398     TCGv t0, t1;
5399 
5400     t0 = tcg_temp_new();
5401     t1 = tcg_temp_new();
5402 
5403     switch (opc3 & 0x0D) {
5404     case 0x05:
5405         /* macchw    - macchw.    - macchwo   - macchwo.   */
5406         /* macchws   - macchws.   - macchwso  - macchwso.  */
5407         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
5408         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
5409         /* mulchw - mulchw. */
5410         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5411         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5412         tcg_gen_ext16s_tl(t1, t1);
5413         break;
5414     case 0x04:
5415         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
5416         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
5417         /* mulchwu - mulchwu. */
5418         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5419         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5420         tcg_gen_ext16u_tl(t1, t1);
5421         break;
5422     case 0x01:
5423         /* machhw    - machhw.    - machhwo   - machhwo.   */
5424         /* machhws   - machhws.   - machhwso  - machhwso.  */
5425         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
5426         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
5427         /* mulhhw - mulhhw. */
5428         tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5429         tcg_gen_ext16s_tl(t0, t0);
5430         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5431         tcg_gen_ext16s_tl(t1, t1);
5432         break;
5433     case 0x00:
5434         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
5435         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
5436         /* mulhhwu - mulhhwu. */
5437         tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5438         tcg_gen_ext16u_tl(t0, t0);
5439         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5440         tcg_gen_ext16u_tl(t1, t1);
5441         break;
5442     case 0x0D:
5443         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
5444         /* maclhws   - maclhws.   - maclhwso  - maclhwso.  */
5445         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
5446         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
5447         /* mullhw - mullhw. */
5448         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5449         tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5450         break;
5451     case 0x0C:
5452         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
5453         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
5454         /* mullhwu - mullhwu. */
5455         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5456         tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5457         break;
5458     }
5459     if (opc2 & 0x04) {
5460         /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5461         tcg_gen_mul_tl(t1, t0, t1);
5462         if (opc2 & 0x02) {
5463             /* nmultiply-and-accumulate (0x0E) */
5464             tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5465         } else {
5466             /* multiply-and-accumulate (0x0C) */
5467             tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5468         }
5469 
5470         if (opc3 & 0x12) {
5471             /* Check overflow and/or saturate */
5472             TCGLabel *l1 = gen_new_label();
5473 
5474             if (opc3 & 0x10) {
5475                 /* Start with XER OV disabled, the most likely case */
5476                 tcg_gen_movi_tl(cpu_ov, 0);
5477             }
5478             if (opc3 & 0x01) {
5479                 /* Signed */
5480                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5481                 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5482                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5483                 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5484                 if (opc3 & 0x02) {
5485                     /* Saturate */
5486                     tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5487                     tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5488                 }
5489             } else {
5490                 /* Unsigned */
5491                 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5492                 if (opc3 & 0x02) {
5493                     /* Saturate */
5494                     tcg_gen_movi_tl(t0, UINT32_MAX);
5495                 }
5496             }
5497             if (opc3 & 0x10) {
5498                 /* Check overflow */
5499                 tcg_gen_movi_tl(cpu_ov, 1);
5500                 tcg_gen_movi_tl(cpu_so, 1);
5501             }
5502             gen_set_label(l1);
5503             tcg_gen_mov_tl(cpu_gpr[rt], t0);
5504         }
5505     } else {
5506         tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5507     }
5508     if (unlikely(Rc) != 0) {
5509         /* Update Rc0 */
5510         gen_set_Rc0(ctx, cpu_gpr[rt]);
5511     }
5512 }
5513 
5514 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
5515 static void glue(gen_, name)(DisasContext *ctx)                               \
5516 {                                                                             \
5517     gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode),   \
5518                          rD(ctx->opcode), Rc(ctx->opcode));                   \
5519 }
5520 
5521 /* macchw    - macchw.    */
5522 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5523 /* macchwo   - macchwo.   */
5524 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5525 /* macchws   - macchws.   */
5526 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5527 /* macchwso  - macchwso.  */
5528 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5529 /* macchwsu  - macchwsu.  */
5530 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5531 /* macchwsuo - macchwsuo. */
5532 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5533 /* macchwu   - macchwu.   */
5534 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5535 /* macchwuo  - macchwuo.  */
5536 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5537 /* machhw    - machhw.    */
5538 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5539 /* machhwo   - machhwo.   */
5540 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5541 /* machhws   - machhws.   */
5542 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5543 /* machhwso  - machhwso.  */
5544 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5545 /* machhwsu  - machhwsu.  */
5546 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5547 /* machhwsuo - machhwsuo. */
5548 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5549 /* machhwu   - machhwu.   */
5550 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5551 /* machhwuo  - machhwuo.  */
5552 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5553 /* maclhw    - maclhw.    */
5554 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5555 /* maclhwo   - maclhwo.   */
5556 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5557 /* maclhws   - maclhws.   */
5558 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5559 /* maclhwso  - maclhwso.  */
5560 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5561 /* maclhwu   - maclhwu.   */
5562 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5563 /* maclhwuo  - maclhwuo.  */
5564 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5565 /* maclhwsu  - maclhwsu.  */
5566 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5567 /* maclhwsuo - maclhwsuo. */
5568 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5569 /* nmacchw   - nmacchw.   */
5570 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5571 /* nmacchwo  - nmacchwo.  */
5572 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5573 /* nmacchws  - nmacchws.  */
5574 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5575 /* nmacchwso - nmacchwso. */
5576 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5577 /* nmachhw   - nmachhw.   */
5578 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5579 /* nmachhwo  - nmachhwo.  */
5580 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5581 /* nmachhws  - nmachhws.  */
5582 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5583 /* nmachhwso - nmachhwso. */
5584 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5585 /* nmaclhw   - nmaclhw.   */
5586 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5587 /* nmaclhwo  - nmaclhwo.  */
5588 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5589 /* nmaclhws  - nmaclhws.  */
5590 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5591 /* nmaclhwso - nmaclhwso. */
5592 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5593 
5594 /* mulchw  - mulchw.  */
5595 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5596 /* mulchwu - mulchwu. */
5597 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5598 /* mulhhw  - mulhhw.  */
5599 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5600 /* mulhhwu - mulhhwu. */
5601 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5602 /* mullhw  - mullhw.  */
5603 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5604 /* mullhwu - mullhwu. */
5605 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5606 
5607 /* mfdcr */
5608 static void gen_mfdcr(DisasContext *ctx)
5609 {
5610 #if defined(CONFIG_USER_ONLY)
5611     GEN_PRIV(ctx);
5612 #else
5613     TCGv dcrn;
5614 
5615     CHK_SV(ctx);
5616     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5617     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5618 #endif /* defined(CONFIG_USER_ONLY) */
5619 }
5620 
5621 /* mtdcr */
5622 static void gen_mtdcr(DisasContext *ctx)
5623 {
5624 #if defined(CONFIG_USER_ONLY)
5625     GEN_PRIV(ctx);
5626 #else
5627     TCGv dcrn;
5628 
5629     CHK_SV(ctx);
5630     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5631     gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5632 #endif /* defined(CONFIG_USER_ONLY) */
5633 }
5634 
5635 /* mfdcrx */
5636 /* XXX: not implemented on 440 ? */
5637 static void gen_mfdcrx(DisasContext *ctx)
5638 {
5639 #if defined(CONFIG_USER_ONLY)
5640     GEN_PRIV(ctx);
5641 #else
5642     CHK_SV(ctx);
5643     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5644                         cpu_gpr[rA(ctx->opcode)]);
5645     /* Note: Rc update flag set leads to undefined state of Rc0 */
5646 #endif /* defined(CONFIG_USER_ONLY) */
5647 }
5648 
5649 /* mtdcrx */
5650 /* XXX: not implemented on 440 ? */
5651 static void gen_mtdcrx(DisasContext *ctx)
5652 {
5653 #if defined(CONFIG_USER_ONLY)
5654     GEN_PRIV(ctx);
5655 #else
5656     CHK_SV(ctx);
5657     gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5658                          cpu_gpr[rS(ctx->opcode)]);
5659     /* Note: Rc update flag set leads to undefined state of Rc0 */
5660 #endif /* defined(CONFIG_USER_ONLY) */
5661 }
5662 
5663 /* dccci */
5664 static void gen_dccci(DisasContext *ctx)
5665 {
5666     CHK_SV(ctx);
5667     /* interpreted as no-op */
5668 }
5669 
5670 /* dcread */
5671 static void gen_dcread(DisasContext *ctx)
5672 {
5673 #if defined(CONFIG_USER_ONLY)
5674     GEN_PRIV(ctx);
5675 #else
5676     TCGv EA, val;
5677 
5678     CHK_SV(ctx);
5679     gen_set_access_type(ctx, ACCESS_CACHE);
5680     EA = tcg_temp_new();
5681     gen_addr_reg_index(ctx, EA);
5682     val = tcg_temp_new();
5683     gen_qemu_ld32u(ctx, val, EA);
5684     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5685 #endif /* defined(CONFIG_USER_ONLY) */
5686 }
5687 
5688 /* icbt */
5689 static void gen_icbt_40x(DisasContext *ctx)
5690 {
5691     /*
5692      * interpreted as no-op
5693      * XXX: specification say this is treated as a load by the MMU but
5694      *      does not generate any exception
5695      */
5696 }
5697 
5698 /* iccci */
5699 static void gen_iccci(DisasContext *ctx)
5700 {
5701     CHK_SV(ctx);
5702     /* interpreted as no-op */
5703 }
5704 
5705 /* icread */
5706 static void gen_icread(DisasContext *ctx)
5707 {
5708     CHK_SV(ctx);
5709     /* interpreted as no-op */
5710 }
5711 
5712 /* rfci (supervisor only) */
5713 static void gen_rfci_40x(DisasContext *ctx)
5714 {
5715 #if defined(CONFIG_USER_ONLY)
5716     GEN_PRIV(ctx);
5717 #else
5718     CHK_SV(ctx);
5719     /* Restore CPU state */
5720     gen_helper_40x_rfci(cpu_env);
5721     ctx->base.is_jmp = DISAS_EXIT;
5722 #endif /* defined(CONFIG_USER_ONLY) */
5723 }
5724 
5725 static void gen_rfci(DisasContext *ctx)
5726 {
5727 #if defined(CONFIG_USER_ONLY)
5728     GEN_PRIV(ctx);
5729 #else
5730     CHK_SV(ctx);
5731     /* Restore CPU state */
5732     gen_helper_rfci(cpu_env);
5733     ctx->base.is_jmp = DISAS_EXIT;
5734 #endif /* defined(CONFIG_USER_ONLY) */
5735 }
5736 
5737 /* BookE specific */
5738 
5739 /* XXX: not implemented on 440 ? */
5740 static void gen_rfdi(DisasContext *ctx)
5741 {
5742 #if defined(CONFIG_USER_ONLY)
5743     GEN_PRIV(ctx);
5744 #else
5745     CHK_SV(ctx);
5746     /* Restore CPU state */
5747     gen_helper_rfdi(cpu_env);
5748     ctx->base.is_jmp = DISAS_EXIT;
5749 #endif /* defined(CONFIG_USER_ONLY) */
5750 }
5751 
5752 /* XXX: not implemented on 440 ? */
5753 static void gen_rfmci(DisasContext *ctx)
5754 {
5755 #if defined(CONFIG_USER_ONLY)
5756     GEN_PRIV(ctx);
5757 #else
5758     CHK_SV(ctx);
5759     /* Restore CPU state */
5760     gen_helper_rfmci(cpu_env);
5761     ctx->base.is_jmp = DISAS_EXIT;
5762 #endif /* defined(CONFIG_USER_ONLY) */
5763 }
5764 
5765 /* TLB management - PowerPC 405 implementation */
5766 
5767 /* tlbre */
5768 static void gen_tlbre_40x(DisasContext *ctx)
5769 {
5770 #if defined(CONFIG_USER_ONLY)
5771     GEN_PRIV(ctx);
5772 #else
5773     CHK_SV(ctx);
5774     switch (rB(ctx->opcode)) {
5775     case 0:
5776         gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5777                                 cpu_gpr[rA(ctx->opcode)]);
5778         break;
5779     case 1:
5780         gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5781                                 cpu_gpr[rA(ctx->opcode)]);
5782         break;
5783     default:
5784         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5785         break;
5786     }
5787 #endif /* defined(CONFIG_USER_ONLY) */
5788 }
5789 
5790 /* tlbsx - tlbsx. */
5791 static void gen_tlbsx_40x(DisasContext *ctx)
5792 {
5793 #if defined(CONFIG_USER_ONLY)
5794     GEN_PRIV(ctx);
5795 #else
5796     TCGv t0;
5797 
5798     CHK_SV(ctx);
5799     t0 = tcg_temp_new();
5800     gen_addr_reg_index(ctx, t0);
5801     gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5802     if (Rc(ctx->opcode)) {
5803         TCGLabel *l1 = gen_new_label();
5804         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5805         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5806         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5807         gen_set_label(l1);
5808     }
5809 #endif /* defined(CONFIG_USER_ONLY) */
5810 }
5811 
5812 /* tlbwe */
5813 static void gen_tlbwe_40x(DisasContext *ctx)
5814 {
5815 #if defined(CONFIG_USER_ONLY)
5816     GEN_PRIV(ctx);
5817 #else
5818     CHK_SV(ctx);
5819 
5820     switch (rB(ctx->opcode)) {
5821     case 0:
5822         gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5823                                 cpu_gpr[rS(ctx->opcode)]);
5824         break;
5825     case 1:
5826         gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5827                                 cpu_gpr[rS(ctx->opcode)]);
5828         break;
5829     default:
5830         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5831         break;
5832     }
5833 #endif /* defined(CONFIG_USER_ONLY) */
5834 }
5835 
5836 /* TLB management - PowerPC 440 implementation */
5837 
5838 /* tlbre */
5839 static void gen_tlbre_440(DisasContext *ctx)
5840 {
5841 #if defined(CONFIG_USER_ONLY)
5842     GEN_PRIV(ctx);
5843 #else
5844     CHK_SV(ctx);
5845 
5846     switch (rB(ctx->opcode)) {
5847     case 0:
5848     case 1:
5849     case 2:
5850         {
5851             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5852             gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
5853                                  t0, cpu_gpr[rA(ctx->opcode)]);
5854         }
5855         break;
5856     default:
5857         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5858         break;
5859     }
5860 #endif /* defined(CONFIG_USER_ONLY) */
5861 }
5862 
5863 /* tlbsx - tlbsx. */
5864 static void gen_tlbsx_440(DisasContext *ctx)
5865 {
5866 #if defined(CONFIG_USER_ONLY)
5867     GEN_PRIV(ctx);
5868 #else
5869     TCGv t0;
5870 
5871     CHK_SV(ctx);
5872     t0 = tcg_temp_new();
5873     gen_addr_reg_index(ctx, t0);
5874     gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5875     if (Rc(ctx->opcode)) {
5876         TCGLabel *l1 = gen_new_label();
5877         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5878         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5879         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5880         gen_set_label(l1);
5881     }
5882 #endif /* defined(CONFIG_USER_ONLY) */
5883 }
5884 
5885 /* tlbwe */
5886 static void gen_tlbwe_440(DisasContext *ctx)
5887 {
5888 #if defined(CONFIG_USER_ONLY)
5889     GEN_PRIV(ctx);
5890 #else
5891     CHK_SV(ctx);
5892     switch (rB(ctx->opcode)) {
5893     case 0:
5894     case 1:
5895     case 2:
5896         {
5897             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5898             gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
5899                                  cpu_gpr[rS(ctx->opcode)]);
5900         }
5901         break;
5902     default:
5903         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5904         break;
5905     }
5906 #endif /* defined(CONFIG_USER_ONLY) */
5907 }
5908 
5909 /* TLB management - PowerPC BookE 2.06 implementation */
5910 
5911 /* tlbre */
5912 static void gen_tlbre_booke206(DisasContext *ctx)
5913 {
5914  #if defined(CONFIG_USER_ONLY)
5915     GEN_PRIV(ctx);
5916 #else
5917    CHK_SV(ctx);
5918     gen_helper_booke206_tlbre(cpu_env);
5919 #endif /* defined(CONFIG_USER_ONLY) */
5920 }
5921 
5922 /* tlbsx - tlbsx. */
5923 static void gen_tlbsx_booke206(DisasContext *ctx)
5924 {
5925 #if defined(CONFIG_USER_ONLY)
5926     GEN_PRIV(ctx);
5927 #else
5928     TCGv t0;
5929 
5930     CHK_SV(ctx);
5931     if (rA(ctx->opcode)) {
5932         t0 = tcg_temp_new();
5933         tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5934     } else {
5935         t0 = cpu_gpr[rB(ctx->opcode)];
5936     }
5937     gen_helper_booke206_tlbsx(cpu_env, t0);
5938 #endif /* defined(CONFIG_USER_ONLY) */
5939 }
5940 
5941 /* tlbwe */
5942 static void gen_tlbwe_booke206(DisasContext *ctx)
5943 {
5944 #if defined(CONFIG_USER_ONLY)
5945     GEN_PRIV(ctx);
5946 #else
5947     CHK_SV(ctx);
5948     gen_helper_booke206_tlbwe(cpu_env);
5949 #endif /* defined(CONFIG_USER_ONLY) */
5950 }
5951 
5952 static void gen_tlbivax_booke206(DisasContext *ctx)
5953 {
5954 #if defined(CONFIG_USER_ONLY)
5955     GEN_PRIV(ctx);
5956 #else
5957     TCGv t0;
5958 
5959     CHK_SV(ctx);
5960     t0 = tcg_temp_new();
5961     gen_addr_reg_index(ctx, t0);
5962     gen_helper_booke206_tlbivax(cpu_env, t0);
5963 #endif /* defined(CONFIG_USER_ONLY) */
5964 }
5965 
5966 static void gen_tlbilx_booke206(DisasContext *ctx)
5967 {
5968 #if defined(CONFIG_USER_ONLY)
5969     GEN_PRIV(ctx);
5970 #else
5971     TCGv t0;
5972 
5973     CHK_SV(ctx);
5974     t0 = tcg_temp_new();
5975     gen_addr_reg_index(ctx, t0);
5976 
5977     switch ((ctx->opcode >> 21) & 0x3) {
5978     case 0:
5979         gen_helper_booke206_tlbilx0(cpu_env, t0);
5980         break;
5981     case 1:
5982         gen_helper_booke206_tlbilx1(cpu_env, t0);
5983         break;
5984     case 3:
5985         gen_helper_booke206_tlbilx3(cpu_env, t0);
5986         break;
5987     default:
5988         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5989         break;
5990     }
5991 #endif /* defined(CONFIG_USER_ONLY) */
5992 }
5993 
5994 /* wrtee */
5995 static void gen_wrtee(DisasContext *ctx)
5996 {
5997 #if defined(CONFIG_USER_ONLY)
5998     GEN_PRIV(ctx);
5999 #else
6000     TCGv t0;
6001 
6002     CHK_SV(ctx);
6003     t0 = tcg_temp_new();
6004     tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
6005     tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6006     tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
6007     gen_ppc_maybe_interrupt(ctx);
6008     /*
6009      * Stop translation to have a chance to raise an exception if we
6010      * just set msr_ee to 1
6011      */
6012     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6013 #endif /* defined(CONFIG_USER_ONLY) */
6014 }
6015 
6016 /* wrteei */
6017 static void gen_wrteei(DisasContext *ctx)
6018 {
6019 #if defined(CONFIG_USER_ONLY)
6020     GEN_PRIV(ctx);
6021 #else
6022     CHK_SV(ctx);
6023     if (ctx->opcode & 0x00008000) {
6024         tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
6025         gen_ppc_maybe_interrupt(ctx);
6026         /* Stop translation to have a chance to raise an exception */
6027         ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6028     } else {
6029         tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6030     }
6031 #endif /* defined(CONFIG_USER_ONLY) */
6032 }
6033 
6034 /* PowerPC 440 specific instructions */
6035 
6036 /* dlmzb */
6037 static void gen_dlmzb(DisasContext *ctx)
6038 {
6039     TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
6040     gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
6041                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
6042 }
6043 
6044 /* mbar replaces eieio on 440 */
6045 static void gen_mbar(DisasContext *ctx)
6046 {
6047     /* interpreted as no-op */
6048 }
6049 
6050 /* msync replaces sync on 440 */
6051 static void gen_msync_4xx(DisasContext *ctx)
6052 {
6053     /* Only e500 seems to treat reserved bits as invalid */
6054     if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6055         (ctx->opcode & 0x03FFF801)) {
6056         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6057     }
6058     /* otherwise interpreted as no-op */
6059 }
6060 
6061 /* icbt */
6062 static void gen_icbt_440(DisasContext *ctx)
6063 {
6064     /*
6065      * interpreted as no-op
6066      * XXX: specification say this is treated as a load by the MMU but
6067      *      does not generate any exception
6068      */
6069 }
6070 
6071 #if defined(TARGET_PPC64)
6072 static void gen_maddld(DisasContext *ctx)
6073 {
6074     TCGv_i64 t1 = tcg_temp_new_i64();
6075 
6076     tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6077     tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6078 }
6079 
6080 /* maddhd maddhdu */
6081 static void gen_maddhd_maddhdu(DisasContext *ctx)
6082 {
6083     TCGv_i64 lo = tcg_temp_new_i64();
6084     TCGv_i64 hi = tcg_temp_new_i64();
6085     TCGv_i64 t1 = tcg_temp_new_i64();
6086 
6087     if (Rc(ctx->opcode)) {
6088         tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6089                           cpu_gpr[rB(ctx->opcode)]);
6090         tcg_gen_movi_i64(t1, 0);
6091     } else {
6092         tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6093                           cpu_gpr[rB(ctx->opcode)]);
6094         tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6095     }
6096     tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6097                      cpu_gpr[rC(ctx->opcode)], t1);
6098 }
6099 #endif /* defined(TARGET_PPC64) */
6100 
6101 static void gen_tbegin(DisasContext *ctx)
6102 {
6103     if (unlikely(!ctx->tm_enabled)) {
6104         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6105         return;
6106     }
6107     gen_helper_tbegin(cpu_env);
6108 }
6109 
6110 #define GEN_TM_NOOP(name)                                      \
6111 static inline void gen_##name(DisasContext *ctx)               \
6112 {                                                              \
6113     if (unlikely(!ctx->tm_enabled)) {                          \
6114         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6115         return;                                                \
6116     }                                                          \
6117     /*                                                         \
6118      * Because tbegin always fails in QEMU, these user         \
6119      * space instructions all have a simple implementation:    \
6120      *                                                         \
6121      *     CR[0] = 0b0 || MSR[TS] || 0b0                       \
6122      *           = 0b0 || 0b00    || 0b0                       \
6123      */                                                        \
6124     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6125 }
6126 
6127 GEN_TM_NOOP(tend);
6128 GEN_TM_NOOP(tabort);
6129 GEN_TM_NOOP(tabortwc);
6130 GEN_TM_NOOP(tabortwci);
6131 GEN_TM_NOOP(tabortdc);
6132 GEN_TM_NOOP(tabortdci);
6133 GEN_TM_NOOP(tsr);
6134 
6135 static inline void gen_cp_abort(DisasContext *ctx)
6136 {
6137     /* Do Nothing */
6138 }
6139 
6140 #define GEN_CP_PASTE_NOOP(name)                           \
6141 static inline void gen_##name(DisasContext *ctx)          \
6142 {                                                         \
6143     /*                                                    \
6144      * Generate invalid exception until we have an        \
6145      * implementation of the copy paste facility          \
6146      */                                                   \
6147     gen_invalid(ctx);                                     \
6148 }
6149 
6150 GEN_CP_PASTE_NOOP(copy)
6151 GEN_CP_PASTE_NOOP(paste)
6152 
6153 static void gen_tcheck(DisasContext *ctx)
6154 {
6155     if (unlikely(!ctx->tm_enabled)) {
6156         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6157         return;
6158     }
6159     /*
6160      * Because tbegin always fails, the tcheck implementation is
6161      * simple:
6162      *
6163      * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6164      *         = 0b1 || 0b00 || 0b0
6165      */
6166     tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6167 }
6168 
6169 #if defined(CONFIG_USER_ONLY)
6170 #define GEN_TM_PRIV_NOOP(name)                                 \
6171 static inline void gen_##name(DisasContext *ctx)               \
6172 {                                                              \
6173     gen_priv_opc(ctx);                                         \
6174 }
6175 
6176 #else
6177 
6178 #define GEN_TM_PRIV_NOOP(name)                                 \
6179 static inline void gen_##name(DisasContext *ctx)               \
6180 {                                                              \
6181     CHK_SV(ctx);                                               \
6182     if (unlikely(!ctx->tm_enabled)) {                          \
6183         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6184         return;                                                \
6185     }                                                          \
6186     /*                                                         \
6187      * Because tbegin always fails, the implementation is      \
6188      * simple:                                                 \
6189      *                                                         \
6190      *   CR[0] = 0b0 || MSR[TS] || 0b0                         \
6191      *         = 0b0 || 0b00 | 0b0                             \
6192      */                                                        \
6193     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6194 }
6195 
6196 #endif
6197 
6198 GEN_TM_PRIV_NOOP(treclaim);
6199 GEN_TM_PRIV_NOOP(trechkpt);
6200 
6201 static inline void get_fpr(TCGv_i64 dst, int regno)
6202 {
6203     tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6204 }
6205 
6206 static inline void set_fpr(int regno, TCGv_i64 src)
6207 {
6208     tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6209     /*
6210      * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6211      * corresponding to the target FPR was undefined. However,
6212      * most (if not all) real hardware were setting the result to 0.
6213      * Starting at ISA v3.1, the result for doubleword 1 is now defined
6214      * to be 0.
6215      */
6216     tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6217 }
6218 
6219 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6220 {
6221     tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6222 }
6223 
6224 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6225 {
6226     tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6227 }
6228 
6229 /*
6230  * Helpers for decodetree used by !function for decoding arguments.
6231  */
6232 static int times_2(DisasContext *ctx, int x)
6233 {
6234     return x * 2;
6235 }
6236 
6237 static int times_4(DisasContext *ctx, int x)
6238 {
6239     return x * 4;
6240 }
6241 
6242 static int times_16(DisasContext *ctx, int x)
6243 {
6244     return x * 16;
6245 }
6246 
6247 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6248 {
6249     return deposit64(0xfffffffffffffe00, 3, 6, x);
6250 }
6251 
6252 /*
6253  * Helpers for trans_* functions to check for specific insns flags.
6254  * Use token pasting to ensure that we use the proper flag with the
6255  * proper variable.
6256  */
6257 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6258     do {                                                \
6259         if (((CTX)->insns_flags & PPC_##NAME) == 0) {   \
6260             return false;                               \
6261         }                                               \
6262     } while (0)
6263 
6264 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6265     do {                                                \
6266         if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6267             return false;                               \
6268         }                                               \
6269     } while (0)
6270 
6271 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6272 #if TARGET_LONG_BITS == 32
6273 # define REQUIRE_64BIT(CTX)  return false
6274 #else
6275 # define REQUIRE_64BIT(CTX)  REQUIRE_INSNS_FLAGS(CTX, 64B)
6276 #endif
6277 
6278 #define REQUIRE_VECTOR(CTX)                             \
6279     do {                                                \
6280         if (unlikely(!(CTX)->altivec_enabled)) {        \
6281             gen_exception((CTX), POWERPC_EXCP_VPU);     \
6282             return true;                                \
6283         }                                               \
6284     } while (0)
6285 
6286 #define REQUIRE_VSX(CTX)                                \
6287     do {                                                \
6288         if (unlikely(!(CTX)->vsx_enabled)) {            \
6289             gen_exception((CTX), POWERPC_EXCP_VSXU);    \
6290             return true;                                \
6291         }                                               \
6292     } while (0)
6293 
6294 #define REQUIRE_FPU(ctx)                                \
6295     do {                                                \
6296         if (unlikely(!(ctx)->fpu_enabled)) {            \
6297             gen_exception((ctx), POWERPC_EXCP_FPU);     \
6298             return true;                                \
6299         }                                               \
6300     } while (0)
6301 
6302 #if !defined(CONFIG_USER_ONLY)
6303 #define REQUIRE_SV(CTX)             \
6304     do {                            \
6305         if (unlikely((CTX)->pr)) {  \
6306             gen_priv_opc(CTX);      \
6307             return true;            \
6308         }                           \
6309     } while (0)
6310 
6311 #define REQUIRE_HV(CTX)                             \
6312     do {                                            \
6313         if (unlikely((CTX)->pr || !(CTX)->hv)) {    \
6314             gen_priv_opc(CTX);                      \
6315             return true;                            \
6316         }                                           \
6317     } while (0)
6318 #else
6319 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6320 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6321 #endif
6322 
6323 /*
6324  * Helpers for implementing sets of trans_* functions.
6325  * Defer the implementation of NAME to FUNC, with optional extra arguments.
6326  */
6327 #define TRANS(NAME, FUNC, ...) \
6328     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6329     { return FUNC(ctx, a, __VA_ARGS__); }
6330 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6331     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6332     {                                                          \
6333         REQUIRE_INSNS_FLAGS(ctx, FLAGS);                       \
6334         return FUNC(ctx, a, __VA_ARGS__);                      \
6335     }
6336 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6337     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6338     {                                                          \
6339         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6340         return FUNC(ctx, a, __VA_ARGS__);                      \
6341     }
6342 
6343 #define TRANS64(NAME, FUNC, ...) \
6344     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6345     { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6346 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6347     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6348     {                                                          \
6349         REQUIRE_64BIT(ctx);                                    \
6350         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6351         return FUNC(ctx, a, __VA_ARGS__);                      \
6352     }
6353 
6354 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6355 
6356 
6357 #include "decode-insn32.c.inc"
6358 #include "decode-insn64.c.inc"
6359 #include "power8-pmu-regs.c.inc"
6360 
6361 /*
6362  * Incorporate CIA into the constant when R=1.
6363  * Validate that when R=1, RA=0.
6364  */
6365 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6366 {
6367     d->rt = a->rt;
6368     d->ra = a->ra;
6369     d->si = a->si;
6370     if (a->r) {
6371         if (unlikely(a->ra != 0)) {
6372             gen_invalid(ctx);
6373             return false;
6374         }
6375         d->si += ctx->cia;
6376     }
6377     return true;
6378 }
6379 
6380 #include "translate/fixedpoint-impl.c.inc"
6381 
6382 #include "translate/fp-impl.c.inc"
6383 
6384 #include "translate/vmx-impl.c.inc"
6385 
6386 #include "translate/vsx-impl.c.inc"
6387 
6388 #include "translate/dfp-impl.c.inc"
6389 
6390 #include "translate/spe-impl.c.inc"
6391 
6392 #include "translate/branch-impl.c.inc"
6393 
6394 #include "translate/processor-ctrl-impl.c.inc"
6395 
6396 #include "translate/storage-ctrl-impl.c.inc"
6397 
6398 /* Handles lfdp */
6399 static void gen_dform39(DisasContext *ctx)
6400 {
6401     if ((ctx->opcode & 0x3) == 0) {
6402         if (ctx->insns_flags2 & PPC2_ISA205) {
6403             return gen_lfdp(ctx);
6404         }
6405     }
6406     return gen_invalid(ctx);
6407 }
6408 
6409 /* Handles stfdp */
6410 static void gen_dform3D(DisasContext *ctx)
6411 {
6412     if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6413         /* stfdp */
6414         if (ctx->insns_flags2 & PPC2_ISA205) {
6415             return gen_stfdp(ctx);
6416         }
6417     }
6418     return gen_invalid(ctx);
6419 }
6420 
6421 #if defined(TARGET_PPC64)
6422 /* brd */
6423 static void gen_brd(DisasContext *ctx)
6424 {
6425     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6426 }
6427 
6428 /* brw */
6429 static void gen_brw(DisasContext *ctx)
6430 {
6431     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6432     tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6433 
6434 }
6435 
6436 /* brh */
6437 static void gen_brh(DisasContext *ctx)
6438 {
6439     TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6440     TCGv_i64 t1 = tcg_temp_new_i64();
6441     TCGv_i64 t2 = tcg_temp_new_i64();
6442 
6443     tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6444     tcg_gen_and_i64(t2, t1, mask);
6445     tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6446     tcg_gen_shli_i64(t1, t1, 8);
6447     tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6448 }
6449 #endif
6450 
6451 static opcode_t opcodes[] = {
6452 #if defined(TARGET_PPC64)
6453 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6454 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6455 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6456 #endif
6457 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6458 #if defined(TARGET_PPC64)
6459 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6460 #endif
6461 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6462 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6463 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6464 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6465 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6466 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6467 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6468 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6469 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6470 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6471 #if defined(TARGET_PPC64)
6472 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6473 #endif
6474 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6475 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6476 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6477 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6478 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6479 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6480 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6481 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6482 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6483 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6484 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6485 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6486 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6487 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6488 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6489 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6490 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6491 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6492 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6493 #if defined(TARGET_PPC64)
6494 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6495 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6496 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6497 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6498 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6499 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6500 #endif
6501 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6502 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6503 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6504 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6505 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6506 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6507 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6508 #if defined(TARGET_PPC64)
6509 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6510 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6511 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6512 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6513 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6514 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6515                PPC_NONE, PPC2_ISA300),
6516 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6517                PPC_NONE, PPC2_ISA300),
6518 #endif
6519 /* handles lfdp, lxsd, lxssp */
6520 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6521 /* handles stfdp, stxsd, stxssp */
6522 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6523 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6524 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6525 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6526 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6527 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6528 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6529 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6530 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6531 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6532 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6533 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6534 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6535 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6536 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6537 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6538 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6539 #if defined(TARGET_PPC64)
6540 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6541 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6542 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6543 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6544 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6545 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6546 #endif
6547 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6548 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6549 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6550 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6551 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6552 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6553 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6554 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6555 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6556 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6557 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6558 #if defined(TARGET_PPC64)
6559 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6560 #if !defined(CONFIG_USER_ONLY)
6561 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6562 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6563 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6564 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6565 #endif
6566 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6567 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6568 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6569 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6570 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6571 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6572 #endif
6573 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6574 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6575 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6576 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6577 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6578 #if defined(TARGET_PPC64)
6579 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6580 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6581 #endif
6582 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6583 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6584 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6585 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6586 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6587 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6588 #if defined(TARGET_PPC64)
6589 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6590 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6591 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6592 #endif
6593 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6594 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6595 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6596 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6597 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6598 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6599 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6600 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6601 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6602 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6603 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6604 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6605 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6606 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6607 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6608 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6609 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6610 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6611 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6612 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6613 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6614 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6615 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6616 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6617 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6618 #if defined(TARGET_PPC64)
6619 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6620 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6621              PPC_SEGMENT_64B),
6622 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6623 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6624              PPC_SEGMENT_64B),
6625 #endif
6626 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6627 /*
6628  * XXX Those instructions will need to be handled differently for
6629  * different ISA versions
6630  */
6631 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6632 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6633 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6634 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6635 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6636 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6637 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6638 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6639 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6640 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6641 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6642 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6643 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6644 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6645 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6646 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6647 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6648 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6649 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6650 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6651 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6652 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6653 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6654 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6655 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6656 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6657 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6658                PPC_NONE, PPC2_BOOKE206),
6659 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6660                PPC_NONE, PPC2_BOOKE206),
6661 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6662                PPC_NONE, PPC2_BOOKE206),
6663 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6664                PPC_NONE, PPC2_BOOKE206),
6665 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6666                PPC_NONE, PPC2_BOOKE206),
6667 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6668 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6669 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6670 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6671               PPC_BOOKE, PPC2_BOOKE206),
6672 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6673 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6674                PPC_BOOKE, PPC2_BOOKE206),
6675 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6676              PPC_440_SPEC),
6677 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6678 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6679 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6680 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6681 #if defined(TARGET_PPC64)
6682 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6683               PPC2_ISA300),
6684 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6685 #endif
6686 
6687 #undef GEN_INT_ARITH_ADD
6688 #undef GEN_INT_ARITH_ADD_CONST
6689 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
6690 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6691 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
6692                                 add_ca, compute_ca, compute_ov)               \
6693 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6694 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6695 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6696 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6697 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6698 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6699 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6700 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6701 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6702 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6703 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6704 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6705 
6706 #undef GEN_INT_ARITH_DIVW
6707 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
6708 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6709 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6710 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6711 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6712 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6713 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6714 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6715 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6716 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6717 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6718 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6719 
6720 #if defined(TARGET_PPC64)
6721 #undef GEN_INT_ARITH_DIVD
6722 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
6723 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6724 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6725 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6726 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6727 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6728 
6729 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6730 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6731 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6732 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6733 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6734 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6735 
6736 #undef GEN_INT_ARITH_MUL_HELPER
6737 #define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
6738 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6739 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6740 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6741 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6742 #endif
6743 
6744 #undef GEN_INT_ARITH_SUBF
6745 #undef GEN_INT_ARITH_SUBF_CONST
6746 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
6747 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6748 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
6749                                 add_ca, compute_ca, compute_ov)               \
6750 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6751 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6752 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6753 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6754 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6755 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6756 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6757 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6758 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6759 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6760 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6761 
6762 #undef GEN_LOGICAL1
6763 #undef GEN_LOGICAL2
6764 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
6765 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6766 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
6767 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6768 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6769 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6770 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6771 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6772 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6773 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6774 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6775 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6776 #if defined(TARGET_PPC64)
6777 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6778 #endif
6779 
6780 #if defined(TARGET_PPC64)
6781 #undef GEN_PPC64_R2
6782 #undef GEN_PPC64_R4
6783 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
6784 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6785 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6786              PPC_64B)
6787 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
6788 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6789 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000,   \
6790              PPC_64B),                                                        \
6791 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6792              PPC_64B),                                                        \
6793 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
6794              PPC_64B)
6795 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6796 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6797 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6798 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6799 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6800 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6801 #endif
6802 
6803 #undef GEN_LDX_E
6804 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
6805 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6806 
6807 #if defined(TARGET_PPC64)
6808 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6809 
6810 /* HV/P7 and later only */
6811 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6812 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6813 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6814 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6815 #endif
6816 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6817 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6818 
6819 /* External PID based load */
6820 #undef GEN_LDEPX
6821 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
6822 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6823               0x00000001, PPC_NONE, PPC2_BOOKE206),
6824 
6825 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6826 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6827 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6828 #if defined(TARGET_PPC64)
6829 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6830 #endif
6831 
6832 #undef GEN_STX_E
6833 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
6834 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6835 
6836 #if defined(TARGET_PPC64)
6837 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6838 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6839 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6840 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6841 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6842 #endif
6843 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6844 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6845 
6846 #undef GEN_STEPX
6847 #define GEN_STEPX(name, ldop, opc2, opc3)                                     \
6848 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6849               0x00000001, PPC_NONE, PPC2_BOOKE206),
6850 
6851 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6852 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6853 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6854 #if defined(TARGET_PPC64)
6855 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6856 #endif
6857 
6858 #undef GEN_CRLOGIC
6859 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
6860 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6861 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6862 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6863 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6864 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6865 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6866 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6867 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6868 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6869 
6870 #undef GEN_MAC_HANDLER
6871 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
6872 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6873 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6874 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6875 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6876 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6877 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6878 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6879 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6880 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6881 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6882 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6883 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6884 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6885 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6886 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6887 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6888 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6889 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6890 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6891 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6892 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6893 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6894 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6895 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6896 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6897 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6898 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6899 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6900 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6901 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6902 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6903 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6904 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6905 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6906 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6907 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6908 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6909 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6910 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6911 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6912 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6913 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6914 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6915 
6916 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6917                PPC_NONE, PPC2_TM),
6918 GEN_HANDLER2_E(tend,   "tend",   0x1F, 0x0E, 0x15, 0x01FFF800, \
6919                PPC_NONE, PPC2_TM),
6920 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6921                PPC_NONE, PPC2_TM),
6922 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6923                PPC_NONE, PPC2_TM),
6924 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6925                PPC_NONE, PPC2_TM),
6926 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6927                PPC_NONE, PPC2_TM),
6928 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6929                PPC_NONE, PPC2_TM),
6930 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6931                PPC_NONE, PPC2_TM),
6932 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6933                PPC_NONE, PPC2_TM),
6934 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6935                PPC_NONE, PPC2_TM),
6936 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6937                PPC_NONE, PPC2_TM),
6938 
6939 #include "translate/fp-ops.c.inc"
6940 
6941 #include "translate/vmx-ops.c.inc"
6942 
6943 #include "translate/vsx-ops.c.inc"
6944 
6945 #include "translate/spe-ops.c.inc"
6946 };
6947 
6948 /*****************************************************************************/
6949 /* Opcode types */
6950 enum {
6951     PPC_DIRECT   = 0, /* Opcode routine        */
6952     PPC_INDIRECT = 1, /* Indirect opcode table */
6953 };
6954 
6955 #define PPC_OPCODE_MASK 0x3
6956 
6957 static inline int is_indirect_opcode(void *handler)
6958 {
6959     return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6960 }
6961 
6962 static inline opc_handler_t **ind_table(void *handler)
6963 {
6964     return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6965 }
6966 
6967 /* Instruction table creation */
6968 /* Opcodes tables creation */
6969 static void fill_new_table(opc_handler_t **table, int len)
6970 {
6971     int i;
6972 
6973     for (i = 0; i < len; i++) {
6974         table[i] = &invalid_handler;
6975     }
6976 }
6977 
6978 static int create_new_table(opc_handler_t **table, unsigned char idx)
6979 {
6980     opc_handler_t **tmp;
6981 
6982     tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
6983     fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
6984     table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
6985 
6986     return 0;
6987 }
6988 
6989 static int insert_in_table(opc_handler_t **table, unsigned char idx,
6990                             opc_handler_t *handler)
6991 {
6992     if (table[idx] != &invalid_handler) {
6993         return -1;
6994     }
6995     table[idx] = handler;
6996 
6997     return 0;
6998 }
6999 
7000 static int register_direct_insn(opc_handler_t **ppc_opcodes,
7001                                 unsigned char idx, opc_handler_t *handler)
7002 {
7003     if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
7004         printf("*** ERROR: opcode %02x already assigned in main "
7005                "opcode table\n", idx);
7006         return -1;
7007     }
7008 
7009     return 0;
7010 }
7011 
7012 static int register_ind_in_table(opc_handler_t **table,
7013                                  unsigned char idx1, unsigned char idx2,
7014                                  opc_handler_t *handler)
7015 {
7016     if (table[idx1] == &invalid_handler) {
7017         if (create_new_table(table, idx1) < 0) {
7018             printf("*** ERROR: unable to create indirect table "
7019                    "idx=%02x\n", idx1);
7020             return -1;
7021         }
7022     } else {
7023         if (!is_indirect_opcode(table[idx1])) {
7024             printf("*** ERROR: idx %02x already assigned to a direct "
7025                    "opcode\n", idx1);
7026             return -1;
7027         }
7028     }
7029     if (handler != NULL &&
7030         insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
7031         printf("*** ERROR: opcode %02x already assigned in "
7032                "opcode table %02x\n", idx2, idx1);
7033         return -1;
7034     }
7035 
7036     return 0;
7037 }
7038 
7039 static int register_ind_insn(opc_handler_t **ppc_opcodes,
7040                              unsigned char idx1, unsigned char idx2,
7041                              opc_handler_t *handler)
7042 {
7043     return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
7044 }
7045 
7046 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7047                                 unsigned char idx1, unsigned char idx2,
7048                                 unsigned char idx3, opc_handler_t *handler)
7049 {
7050     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7051         printf("*** ERROR: unable to join indirect table idx "
7052                "[%02x-%02x]\n", idx1, idx2);
7053         return -1;
7054     }
7055     if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7056                               handler) < 0) {
7057         printf("*** ERROR: unable to insert opcode "
7058                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7059         return -1;
7060     }
7061 
7062     return 0;
7063 }
7064 
7065 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7066                                  unsigned char idx1, unsigned char idx2,
7067                                  unsigned char idx3, unsigned char idx4,
7068                                  opc_handler_t *handler)
7069 {
7070     opc_handler_t **table;
7071 
7072     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7073         printf("*** ERROR: unable to join indirect table idx "
7074                "[%02x-%02x]\n", idx1, idx2);
7075         return -1;
7076     }
7077     table = ind_table(ppc_opcodes[idx1]);
7078     if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7079         printf("*** ERROR: unable to join 2nd-level indirect table idx "
7080                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7081         return -1;
7082     }
7083     table = ind_table(table[idx2]);
7084     if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7085         printf("*** ERROR: unable to insert opcode "
7086                "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7087         return -1;
7088     }
7089     return 0;
7090 }
7091 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7092 {
7093     if (insn->opc2 != 0xFF) {
7094         if (insn->opc3 != 0xFF) {
7095             if (insn->opc4 != 0xFF) {
7096                 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7097                                           insn->opc3, insn->opc4,
7098                                           &insn->handler) < 0) {
7099                     return -1;
7100                 }
7101             } else {
7102                 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7103                                          insn->opc3, &insn->handler) < 0) {
7104                     return -1;
7105                 }
7106             }
7107         } else {
7108             if (register_ind_insn(ppc_opcodes, insn->opc1,
7109                                   insn->opc2, &insn->handler) < 0) {
7110                 return -1;
7111             }
7112         }
7113     } else {
7114         if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7115             return -1;
7116         }
7117     }
7118 
7119     return 0;
7120 }
7121 
7122 static int test_opcode_table(opc_handler_t **table, int len)
7123 {
7124     int i, count, tmp;
7125 
7126     for (i = 0, count = 0; i < len; i++) {
7127         /* Consistency fixup */
7128         if (table[i] == NULL) {
7129             table[i] = &invalid_handler;
7130         }
7131         if (table[i] != &invalid_handler) {
7132             if (is_indirect_opcode(table[i])) {
7133                 tmp = test_opcode_table(ind_table(table[i]),
7134                     PPC_CPU_INDIRECT_OPCODES_LEN);
7135                 if (tmp == 0) {
7136                     free(table[i]);
7137                     table[i] = &invalid_handler;
7138                 } else {
7139                     count++;
7140                 }
7141             } else {
7142                 count++;
7143             }
7144         }
7145     }
7146 
7147     return count;
7148 }
7149 
7150 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7151 {
7152     if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7153         printf("*** WARNING: no opcode defined !\n");
7154     }
7155 }
7156 
7157 /*****************************************************************************/
7158 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7159 {
7160     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7161     opcode_t *opc;
7162 
7163     fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7164     for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7165         if (((opc->handler.type & pcc->insns_flags) != 0) ||
7166             ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7167             if (register_insn(cpu->opcodes, opc) < 0) {
7168                 error_setg(errp, "ERROR initializing PowerPC instruction "
7169                            "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7170                            opc->opc3);
7171                 return;
7172             }
7173         }
7174     }
7175     fix_opcode_tables(cpu->opcodes);
7176     fflush(stdout);
7177     fflush(stderr);
7178 }
7179 
7180 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7181 {
7182     opc_handler_t **table, **table_2;
7183     int i, j, k;
7184 
7185     for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7186         if (cpu->opcodes[i] == &invalid_handler) {
7187             continue;
7188         }
7189         if (is_indirect_opcode(cpu->opcodes[i])) {
7190             table = ind_table(cpu->opcodes[i]);
7191             for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7192                 if (table[j] == &invalid_handler) {
7193                     continue;
7194                 }
7195                 if (is_indirect_opcode(table[j])) {
7196                     table_2 = ind_table(table[j]);
7197                     for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7198                         if (table_2[k] != &invalid_handler &&
7199                             is_indirect_opcode(table_2[k])) {
7200                             g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7201                                                      ~PPC_INDIRECT));
7202                         }
7203                     }
7204                     g_free((opc_handler_t *)((uintptr_t)table[j] &
7205                                              ~PPC_INDIRECT));
7206                 }
7207             }
7208             g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7209                 ~PPC_INDIRECT));
7210         }
7211     }
7212 }
7213 
7214 int ppc_fixup_cpu(PowerPCCPU *cpu)
7215 {
7216     CPUPPCState *env = &cpu->env;
7217 
7218     /*
7219      * TCG doesn't (yet) emulate some groups of instructions that are
7220      * implemented on some otherwise supported CPUs (e.g. VSX and
7221      * decimal floating point instructions on POWER7).  We remove
7222      * unsupported instruction groups from the cpu state's instruction
7223      * masks and hope the guest can cope.  For at least the pseries
7224      * machine, the unavailability of these instructions can be
7225      * advertised to the guest via the device tree.
7226      */
7227     if ((env->insns_flags & ~PPC_TCG_INSNS)
7228         || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7229         warn_report("Disabling some instructions which are not "
7230                     "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7231                     env->insns_flags & ~PPC_TCG_INSNS,
7232                     env->insns_flags2 & ~PPC_TCG_INSNS2);
7233     }
7234     env->insns_flags &= PPC_TCG_INSNS;
7235     env->insns_flags2 &= PPC_TCG_INSNS2;
7236     return 0;
7237 }
7238 
7239 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7240 {
7241     opc_handler_t **table, *handler;
7242     uint32_t inval;
7243 
7244     ctx->opcode = insn;
7245 
7246     LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7247               insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7248               ctx->le_mode ? "little" : "big");
7249 
7250     table = cpu->opcodes;
7251     handler = table[opc1(insn)];
7252     if (is_indirect_opcode(handler)) {
7253         table = ind_table(handler);
7254         handler = table[opc2(insn)];
7255         if (is_indirect_opcode(handler)) {
7256             table = ind_table(handler);
7257             handler = table[opc3(insn)];
7258             if (is_indirect_opcode(handler)) {
7259                 table = ind_table(handler);
7260                 handler = table[opc4(insn)];
7261             }
7262         }
7263     }
7264 
7265     /* Is opcode *REALLY* valid ? */
7266     if (unlikely(handler->handler == &gen_invalid)) {
7267         qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7268                       "%02x - %02x - %02x - %02x (%08x) "
7269                       TARGET_FMT_lx "\n",
7270                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7271                       insn, ctx->cia);
7272         return false;
7273     }
7274 
7275     if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7276                  && Rc(insn))) {
7277         inval = handler->inval2;
7278     } else {
7279         inval = handler->inval1;
7280     }
7281 
7282     if (unlikely((insn & inval) != 0)) {
7283         qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7284                       "%02x - %02x - %02x - %02x (%08x) "
7285                       TARGET_FMT_lx "\n", insn & inval,
7286                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7287                       insn, ctx->cia);
7288         return false;
7289     }
7290 
7291     handler->handler(ctx);
7292     return true;
7293 }
7294 
7295 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7296 {
7297     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7298     CPUPPCState *env = cs->env_ptr;
7299     uint32_t hflags = ctx->base.tb->flags;
7300 
7301     ctx->spr_cb = env->spr_cb;
7302     ctx->pr = (hflags >> HFLAGS_PR) & 1;
7303     ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7304     ctx->dr = (hflags >> HFLAGS_DR) & 1;
7305     ctx->hv = (hflags >> HFLAGS_HV) & 1;
7306     ctx->insns_flags = env->insns_flags;
7307     ctx->insns_flags2 = env->insns_flags2;
7308     ctx->access_type = -1;
7309     ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7310     ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7311     ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7312     ctx->flags = env->flags;
7313 #if defined(TARGET_PPC64)
7314     ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7315     ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7316 #endif
7317     ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7318         || env->mmu_model & POWERPC_MMU_64;
7319 
7320     ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7321     ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7322     ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7323     ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7324     ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7325     ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7326     ctx->hr = (hflags >> HFLAGS_HR) & 1;
7327     ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7328     ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7329     ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7330     ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7331     ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7332 
7333     ctx->singlestep_enabled = 0;
7334     if ((hflags >> HFLAGS_SE) & 1) {
7335         ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7336         ctx->base.max_insns = 1;
7337     }
7338     if ((hflags >> HFLAGS_BE) & 1) {
7339         ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7340     }
7341 }
7342 
7343 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7344 {
7345 }
7346 
7347 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7348 {
7349     tcg_gen_insn_start(dcbase->pc_next);
7350 }
7351 
7352 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7353 {
7354     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7355     return opc1(insn) == 1;
7356 }
7357 
7358 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7359 {
7360     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7361     PowerPCCPU *cpu = POWERPC_CPU(cs);
7362     CPUPPCState *env = cs->env_ptr;
7363     target_ulong pc;
7364     uint32_t insn;
7365     bool ok;
7366 
7367     LOG_DISAS("----------------\n");
7368     LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7369               ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7370 
7371     ctx->cia = pc = ctx->base.pc_next;
7372     insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7373     ctx->base.pc_next = pc += 4;
7374 
7375     if (!is_prefix_insn(ctx, insn)) {
7376         ok = (decode_insn32(ctx, insn) ||
7377               decode_legacy(cpu, ctx, insn));
7378     } else if ((pc & 63) == 0) {
7379         /*
7380          * Power v3.1, section 1.9 Exceptions:
7381          * attempt to execute a prefixed instruction that crosses a
7382          * 64-byte address boundary (system alignment error).
7383          */
7384         gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7385         ok = true;
7386     } else {
7387         uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7388                                              need_byteswap(ctx));
7389         ctx->base.pc_next = pc += 4;
7390         ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7391     }
7392     if (!ok) {
7393         gen_invalid(ctx);
7394     }
7395 
7396     /* End the TB when crossing a page boundary. */
7397     if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7398         ctx->base.is_jmp = DISAS_TOO_MANY;
7399     }
7400 }
7401 
7402 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7403 {
7404     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7405     DisasJumpType is_jmp = ctx->base.is_jmp;
7406     target_ulong nip = ctx->base.pc_next;
7407 
7408     if (is_jmp == DISAS_NORETURN) {
7409         /* We have already exited the TB. */
7410         return;
7411     }
7412 
7413     /* Honor single stepping. */
7414     if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) {
7415         bool rfi_type = false;
7416 
7417         switch (is_jmp) {
7418         case DISAS_TOO_MANY:
7419         case DISAS_EXIT_UPDATE:
7420         case DISAS_CHAIN_UPDATE:
7421             gen_update_nip(ctx, nip);
7422             break;
7423         case DISAS_EXIT:
7424         case DISAS_CHAIN:
7425             /*
7426              * This is a heuristic, to put it kindly. The rfi class of
7427              * instructions are among the few outside branches that change
7428              * NIP without taking an interrupt. Single step trace interrupts
7429              * do not fire on completion of these instructions.
7430              */
7431             rfi_type = true;
7432             break;
7433         default:
7434             g_assert_not_reached();
7435         }
7436 
7437         gen_debug_exception(ctx, rfi_type);
7438         return;
7439     }
7440 
7441     switch (is_jmp) {
7442     case DISAS_TOO_MANY:
7443         if (use_goto_tb(ctx, nip)) {
7444             pmu_count_insns(ctx);
7445             tcg_gen_goto_tb(0);
7446             gen_update_nip(ctx, nip);
7447             tcg_gen_exit_tb(ctx->base.tb, 0);
7448             break;
7449         }
7450         /* fall through */
7451     case DISAS_CHAIN_UPDATE:
7452         gen_update_nip(ctx, nip);
7453         /* fall through */
7454     case DISAS_CHAIN:
7455         /*
7456          * tcg_gen_lookup_and_goto_ptr will exit the TB if
7457          * CF_NO_GOTO_PTR is set. Count insns now.
7458          */
7459         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7460             pmu_count_insns(ctx);
7461         }
7462 
7463         tcg_gen_lookup_and_goto_ptr();
7464         break;
7465 
7466     case DISAS_EXIT_UPDATE:
7467         gen_update_nip(ctx, nip);
7468         /* fall through */
7469     case DISAS_EXIT:
7470         pmu_count_insns(ctx);
7471         tcg_gen_exit_tb(NULL, 0);
7472         break;
7473 
7474     default:
7475         g_assert_not_reached();
7476     }
7477 }
7478 
7479 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7480                              CPUState *cs, FILE *logfile)
7481 {
7482     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7483     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7484 }
7485 
7486 static const TranslatorOps ppc_tr_ops = {
7487     .init_disas_context = ppc_tr_init_disas_context,
7488     .tb_start           = ppc_tr_tb_start,
7489     .insn_start         = ppc_tr_insn_start,
7490     .translate_insn     = ppc_tr_translate_insn,
7491     .tb_stop            = ppc_tr_tb_stop,
7492     .disas_log          = ppc_tr_disas_log,
7493 };
7494 
7495 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7496                            target_ulong pc, void *host_pc)
7497 {
7498     DisasContext ctx;
7499 
7500     translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7501 }
7502