xref: /openbmc/qemu/target/ppc/translate.c (revision d5ee641c)
1 /*
2  *  PowerPC emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (C) 2011 Freescale Semiconductor, Inc.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "exec/translator.h"
34 #include "exec/log.h"
35 #include "qemu/atomic128.h"
36 #include "spr_common.h"
37 #include "power8-pmu.h"
38 
39 #include "qemu/qemu-print.h"
40 #include "qapi/error.h"
41 
42 #define HELPER_H "helper.h"
43 #include "exec/helper-info.c.inc"
44 #undef  HELPER_H
45 
46 #define CPU_SINGLE_STEP 0x1
47 #define CPU_BRANCH_STEP 0x2
48 
49 /* Include definitions for instructions classes and implementations flags */
50 /* #define PPC_DEBUG_DISAS */
51 
52 #ifdef PPC_DEBUG_DISAS
53 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
54 #else
55 #  define LOG_DISAS(...) do { } while (0)
56 #endif
57 /*****************************************************************************/
58 /* Code translation helpers                                                  */
59 
60 /* global register indexes */
61 static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
62                           + 10 * 4 + 22 * 5 /* SPE GPRh */
63                           + 8 * 5           /* CRF */];
64 static TCGv cpu_gpr[32];
65 static TCGv cpu_gprh[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
75 static TCGv cpu_reserve;
76 static TCGv cpu_reserve_length;
77 static TCGv cpu_reserve_val;
78 static TCGv cpu_reserve_val2;
79 static TCGv cpu_fpscr;
80 static TCGv_i32 cpu_access_type;
81 
82 void ppc_translate_init(void)
83 {
84     int i;
85     char *p;
86     size_t cpu_reg_names_size;
87 
88     p = cpu_reg_names;
89     cpu_reg_names_size = sizeof(cpu_reg_names);
90 
91     for (i = 0; i < 8; i++) {
92         snprintf(p, cpu_reg_names_size, "crf%d", i);
93         cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
94                                             offsetof(CPUPPCState, crf[i]), p);
95         p += 5;
96         cpu_reg_names_size -= 5;
97     }
98 
99     for (i = 0; i < 32; i++) {
100         snprintf(p, cpu_reg_names_size, "r%d", i);
101         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
102                                         offsetof(CPUPPCState, gpr[i]), p);
103         p += (i < 10) ? 3 : 4;
104         cpu_reg_names_size -= (i < 10) ? 3 : 4;
105         snprintf(p, cpu_reg_names_size, "r%dH", i);
106         cpu_gprh[i] = tcg_global_mem_new(cpu_env,
107                                          offsetof(CPUPPCState, gprh[i]), p);
108         p += (i < 10) ? 4 : 5;
109         cpu_reg_names_size -= (i < 10) ? 4 : 5;
110     }
111 
112     cpu_nip = tcg_global_mem_new(cpu_env,
113                                  offsetof(CPUPPCState, nip), "nip");
114 
115     cpu_msr = tcg_global_mem_new(cpu_env,
116                                  offsetof(CPUPPCState, msr), "msr");
117 
118     cpu_ctr = tcg_global_mem_new(cpu_env,
119                                  offsetof(CPUPPCState, ctr), "ctr");
120 
121     cpu_lr = tcg_global_mem_new(cpu_env,
122                                 offsetof(CPUPPCState, lr), "lr");
123 
124 #if defined(TARGET_PPC64)
125     cpu_cfar = tcg_global_mem_new(cpu_env,
126                                   offsetof(CPUPPCState, cfar), "cfar");
127 #endif
128 
129     cpu_xer = tcg_global_mem_new(cpu_env,
130                                  offsetof(CPUPPCState, xer), "xer");
131     cpu_so = tcg_global_mem_new(cpu_env,
132                                 offsetof(CPUPPCState, so), "SO");
133     cpu_ov = tcg_global_mem_new(cpu_env,
134                                 offsetof(CPUPPCState, ov), "OV");
135     cpu_ca = tcg_global_mem_new(cpu_env,
136                                 offsetof(CPUPPCState, ca), "CA");
137     cpu_ov32 = tcg_global_mem_new(cpu_env,
138                                   offsetof(CPUPPCState, ov32), "OV32");
139     cpu_ca32 = tcg_global_mem_new(cpu_env,
140                                   offsetof(CPUPPCState, ca32), "CA32");
141 
142     cpu_reserve = tcg_global_mem_new(cpu_env,
143                                      offsetof(CPUPPCState, reserve_addr),
144                                      "reserve_addr");
145     cpu_reserve_length = tcg_global_mem_new(cpu_env,
146                                             offsetof(CPUPPCState,
147                                                      reserve_length),
148                                             "reserve_length");
149     cpu_reserve_val = tcg_global_mem_new(cpu_env,
150                                          offsetof(CPUPPCState, reserve_val),
151                                          "reserve_val");
152     cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
153                                           offsetof(CPUPPCState, reserve_val2),
154                                           "reserve_val2");
155 
156     cpu_fpscr = tcg_global_mem_new(cpu_env,
157                                    offsetof(CPUPPCState, fpscr), "fpscr");
158 
159     cpu_access_type = tcg_global_mem_new_i32(cpu_env,
160                                              offsetof(CPUPPCState, access_type),
161                                              "access_type");
162 }
163 
164 /* internal defines */
165 struct DisasContext {
166     DisasContextBase base;
167     target_ulong cia;  /* current instruction address */
168     uint32_t opcode;
169     /* Routine used to access memory */
170     bool pr, hv, dr, le_mode;
171     bool lazy_tlb_flush;
172     bool need_access_type;
173     int mem_idx;
174     int access_type;
175     /* Translation flags */
176     MemOp default_tcg_memop_mask;
177 #if defined(TARGET_PPC64)
178     bool sf_mode;
179     bool has_cfar;
180 #endif
181     bool fpu_enabled;
182     bool altivec_enabled;
183     bool vsx_enabled;
184     bool spe_enabled;
185     bool tm_enabled;
186     bool gtse;
187     bool hr;
188     bool mmcr0_pmcc0;
189     bool mmcr0_pmcc1;
190     bool mmcr0_pmcjce;
191     bool pmc_other;
192     bool pmu_insn_cnt;
193     ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
194     int singlestep_enabled;
195     uint32_t flags;
196     uint64_t insns_flags;
197     uint64_t insns_flags2;
198 };
199 
200 #define DISAS_EXIT         DISAS_TARGET_0  /* exit to main loop, pc updated */
201 #define DISAS_EXIT_UPDATE  DISAS_TARGET_1  /* exit to main loop, pc stale */
202 #define DISAS_CHAIN        DISAS_TARGET_2  /* lookup next tb, pc updated */
203 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3  /* lookup next tb, pc stale */
204 
205 /* Return true iff byteswap is needed in a scalar memop */
206 static inline bool need_byteswap(const DisasContext *ctx)
207 {
208 #if TARGET_BIG_ENDIAN
209      return ctx->le_mode;
210 #else
211      return !ctx->le_mode;
212 #endif
213 }
214 
215 /* True when active word size < size of target_long.  */
216 #ifdef TARGET_PPC64
217 # define NARROW_MODE(C)  (!(C)->sf_mode)
218 #else
219 # define NARROW_MODE(C)  0
220 #endif
221 
222 struct opc_handler_t {
223     /* invalid bits for instruction 1 (Rc(opcode) == 0) */
224     uint32_t inval1;
225     /* invalid bits for instruction 2 (Rc(opcode) == 1) */
226     uint32_t inval2;
227     /* instruction type */
228     uint64_t type;
229     /* extended instruction type */
230     uint64_t type2;
231     /* handler */
232     void (*handler)(DisasContext *ctx);
233 };
234 
235 static inline bool gen_serialize(DisasContext *ctx)
236 {
237     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
238         /* Restart with exclusive lock.  */
239         gen_helper_exit_atomic(cpu_env);
240         ctx->base.is_jmp = DISAS_NORETURN;
241         return false;
242     }
243     return true;
244 }
245 
246 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
247 static inline bool gen_serialize_core_lpar(DisasContext *ctx)
248 {
249     if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
250         return gen_serialize(ctx);
251     }
252 
253     return true;
254 }
255 #endif
256 
257 /* SPR load/store helpers */
258 static inline void gen_load_spr(TCGv t, int reg)
259 {
260     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
261 }
262 
263 static inline void gen_store_spr(int reg, TCGv t)
264 {
265     tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
266 }
267 
268 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
269 {
270     if (ctx->need_access_type && ctx->access_type != access_type) {
271         tcg_gen_movi_i32(cpu_access_type, access_type);
272         ctx->access_type = access_type;
273     }
274 }
275 
276 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
277 {
278     if (NARROW_MODE(ctx)) {
279         nip = (uint32_t)nip;
280     }
281     tcg_gen_movi_tl(cpu_nip, nip);
282 }
283 
284 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
285 {
286     TCGv_i32 t0, t1;
287 
288     /*
289      * These are all synchronous exceptions, we set the PC back to the
290      * faulting instruction
291      */
292     gen_update_nip(ctx, ctx->cia);
293     t0 = tcg_constant_i32(excp);
294     t1 = tcg_constant_i32(error);
295     gen_helper_raise_exception_err(cpu_env, t0, t1);
296     ctx->base.is_jmp = DISAS_NORETURN;
297 }
298 
299 static void gen_exception(DisasContext *ctx, uint32_t excp)
300 {
301     TCGv_i32 t0;
302 
303     /*
304      * These are all synchronous exceptions, we set the PC back to the
305      * faulting instruction
306      */
307     gen_update_nip(ctx, ctx->cia);
308     t0 = tcg_constant_i32(excp);
309     gen_helper_raise_exception(cpu_env, t0);
310     ctx->base.is_jmp = DISAS_NORETURN;
311 }
312 
313 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
314                               target_ulong nip)
315 {
316     TCGv_i32 t0;
317 
318     gen_update_nip(ctx, nip);
319     t0 = tcg_constant_i32(excp);
320     gen_helper_raise_exception(cpu_env, t0);
321     ctx->base.is_jmp = DISAS_NORETURN;
322 }
323 
324 #if !defined(CONFIG_USER_ONLY)
325 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
326 {
327     translator_io_start(&ctx->base);
328     gen_helper_ppc_maybe_interrupt(cpu_env);
329 }
330 #endif
331 
332 /*
333  * Tells the caller what is the appropriate exception to generate and prepares
334  * SPR registers for this exception.
335  *
336  * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
337  * POWERPC_EXCP_DEBUG (on BookE).
338  */
339 static void gen_debug_exception(DisasContext *ctx, bool rfi_type)
340 {
341 #if !defined(CONFIG_USER_ONLY)
342     if (ctx->flags & POWERPC_FLAG_DE) {
343         target_ulong dbsr = 0;
344         if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
345             dbsr = DBCR0_ICMP;
346         } else {
347             /* Must have been branch */
348             dbsr = DBCR0_BRT;
349         }
350         TCGv t0 = tcg_temp_new();
351         gen_load_spr(t0, SPR_BOOKE_DBSR);
352         tcg_gen_ori_tl(t0, t0, dbsr);
353         gen_store_spr(SPR_BOOKE_DBSR, t0);
354         gen_helper_raise_exception(cpu_env,
355                                    tcg_constant_i32(POWERPC_EXCP_DEBUG));
356         ctx->base.is_jmp = DISAS_NORETURN;
357     } else {
358         if (!rfi_type) { /* BookS does not single step rfi type instructions */
359             TCGv t0 = tcg_temp_new();
360             tcg_gen_movi_tl(t0, ctx->cia);
361             gen_helper_book3s_trace(cpu_env, t0);
362             ctx->base.is_jmp = DISAS_NORETURN;
363         }
364     }
365 #endif
366 }
367 
368 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
369 {
370     /* Will be converted to program check if needed */
371     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
372 }
373 
374 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
375 {
376     gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
377 }
378 
379 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
380 {
381     /* Will be converted to program check if needed */
382     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
383 }
384 
385 /*****************************************************************************/
386 /* SPR READ/WRITE CALLBACKS */
387 
388 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
389 {
390 #if 0
391     sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
392     printf("ERROR: try to access SPR %d !\n", sprn);
393 #endif
394 }
395 
396 /* #define PPC_DUMP_SPR_ACCESSES */
397 
398 /*
399  * Generic callbacks:
400  * do nothing but store/retrieve spr value
401  */
402 static void spr_load_dump_spr(int sprn)
403 {
404 #ifdef PPC_DUMP_SPR_ACCESSES
405     TCGv_i32 t0 = tcg_constant_i32(sprn);
406     gen_helper_load_dump_spr(cpu_env, t0);
407 #endif
408 }
409 
410 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
411 {
412     gen_load_spr(cpu_gpr[gprn], sprn);
413     spr_load_dump_spr(sprn);
414 }
415 
416 static void spr_store_dump_spr(int sprn)
417 {
418 #ifdef PPC_DUMP_SPR_ACCESSES
419     TCGv_i32 t0 = tcg_constant_i32(sprn);
420     gen_helper_store_dump_spr(cpu_env, t0);
421 #endif
422 }
423 
424 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
425 {
426     gen_store_spr(sprn, cpu_gpr[gprn]);
427     spr_store_dump_spr(sprn);
428 }
429 
430 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
431 {
432 #ifdef TARGET_PPC64
433     TCGv t0 = tcg_temp_new();
434     tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
435     gen_store_spr(sprn, t0);
436     spr_store_dump_spr(sprn);
437 #else
438     spr_write_generic(ctx, sprn, gprn);
439 #endif
440 }
441 
442 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn)
443 {
444     if (!(ctx->flags & POWERPC_FLAG_SMT)) {
445         spr_write_generic(ctx, sprn, gprn);
446         return;
447     }
448 
449     if (!gen_serialize(ctx)) {
450         return;
451     }
452 
453     gen_helper_spr_core_write_generic(cpu_env, tcg_constant_i32(sprn),
454                                       cpu_gpr[gprn]);
455     spr_store_dump_spr(sprn);
456 }
457 
458 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
459 {
460     /* This does not implement >1 thread */
461     TCGv t0 = tcg_temp_new();
462     TCGv t1 = tcg_temp_new();
463     tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
464     tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
465     tcg_gen_or_tl(t1, t1, t0);
466     gen_store_spr(sprn, t1);
467 }
468 
469 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
470 {
471     if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) {
472         /* CTRL behaves as 1-thread in LPAR-per-thread mode */
473         spr_write_CTRL_ST(ctx, sprn, gprn);
474         goto out;
475     }
476 
477     if (!gen_serialize(ctx)) {
478         return;
479     }
480 
481     gen_helper_spr_write_CTRL(cpu_env, tcg_constant_i32(sprn),
482                               cpu_gpr[gprn]);
483 out:
484     spr_store_dump_spr(sprn);
485 
486     /*
487      * SPR_CTRL writes must force a new translation block,
488      * allowing the PMU to calculate the run latch events with
489      * more accuracy.
490      */
491     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
492 }
493 
494 #if !defined(CONFIG_USER_ONLY)
495 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
496 {
497     TCGv t0 = tcg_temp_new();
498     TCGv t1 = tcg_temp_new();
499     gen_load_spr(t0, sprn);
500     tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
501     tcg_gen_and_tl(t0, t0, t1);
502     gen_store_spr(sprn, t0);
503 }
504 
505 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
506 {
507 }
508 
509 #endif
510 
511 /* SPR common to all PowerPC */
512 /* XER */
513 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
514 {
515     TCGv dst = cpu_gpr[gprn];
516     TCGv t0 = tcg_temp_new();
517     TCGv t1 = tcg_temp_new();
518     TCGv t2 = tcg_temp_new();
519     tcg_gen_mov_tl(dst, cpu_xer);
520     tcg_gen_shli_tl(t0, cpu_so, XER_SO);
521     tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
522     tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
523     tcg_gen_or_tl(t0, t0, t1);
524     tcg_gen_or_tl(dst, dst, t2);
525     tcg_gen_or_tl(dst, dst, t0);
526     if (is_isa300(ctx)) {
527         tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
528         tcg_gen_or_tl(dst, dst, t0);
529         tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
530         tcg_gen_or_tl(dst, dst, t0);
531     }
532 }
533 
534 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
535 {
536     TCGv src = cpu_gpr[gprn];
537     /* Write all flags, while reading back check for isa300 */
538     tcg_gen_andi_tl(cpu_xer, src,
539                     ~((1u << XER_SO) |
540                       (1u << XER_OV) | (1u << XER_OV32) |
541                       (1u << XER_CA) | (1u << XER_CA32)));
542     tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
543     tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
544     tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
545     tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
546     tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
547 }
548 
549 /* LR */
550 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
551 {
552     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
553 }
554 
555 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
556 {
557     tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
558 }
559 
560 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
561 /* Debug facilities */
562 /* CFAR */
563 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
564 {
565     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
566 }
567 
568 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
569 {
570     tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
571 }
572 
573 /* Breakpoint */
574 void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn)
575 {
576     translator_io_start(&ctx->base);
577     gen_helper_store_ciabr(cpu_env, cpu_gpr[gprn]);
578 }
579 
580 /* Watchpoint */
581 void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn)
582 {
583     translator_io_start(&ctx->base);
584     gen_helper_store_dawr0(cpu_env, cpu_gpr[gprn]);
585 }
586 
587 void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
588 {
589     translator_io_start(&ctx->base);
590     gen_helper_store_dawrx0(cpu_env, cpu_gpr[gprn]);
591 }
592 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
593 
594 /* CTR */
595 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
596 {
597     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
598 }
599 
600 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
601 {
602     tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
603 }
604 
605 /* User read access to SPR */
606 /* USPRx */
607 /* UMMCRx */
608 /* UPMCx */
609 /* USIA */
610 /* UDECR */
611 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
612 {
613     gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
614 }
615 
616 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
617 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
618 {
619     gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
620 }
621 #endif
622 
623 /* SPR common to all non-embedded PowerPC */
624 /* DECR */
625 #if !defined(CONFIG_USER_ONLY)
626 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
627 {
628     translator_io_start(&ctx->base);
629     gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
630 }
631 
632 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
633 {
634     translator_io_start(&ctx->base);
635     gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
636 }
637 #endif
638 
639 /* SPR common to all non-embedded PowerPC, except 601 */
640 /* Time base */
641 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
642 {
643     translator_io_start(&ctx->base);
644     gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
645 }
646 
647 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
648 {
649     translator_io_start(&ctx->base);
650     gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
651 }
652 
653 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
654 {
655     gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
656 }
657 
658 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
659 {
660     gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
661 }
662 
663 #if !defined(CONFIG_USER_ONLY)
664 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
665 {
666     translator_io_start(&ctx->base);
667     gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
668 }
669 
670 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
671 {
672     translator_io_start(&ctx->base);
673     gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
674 }
675 
676 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
677 {
678     gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
679 }
680 
681 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
682 {
683     gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
684 }
685 
686 #if defined(TARGET_PPC64)
687 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
688 {
689     translator_io_start(&ctx->base);
690     gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
691 }
692 
693 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
694 {
695     translator_io_start(&ctx->base);
696     gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
697 }
698 
699 /* HDECR */
700 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
701 {
702     translator_io_start(&ctx->base);
703     gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
704 }
705 
706 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
707 {
708     translator_io_start(&ctx->base);
709     gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
710 }
711 
712 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
713 {
714     translator_io_start(&ctx->base);
715     gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
716 }
717 
718 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
719 {
720     translator_io_start(&ctx->base);
721     gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
722 }
723 
724 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
725 {
726     translator_io_start(&ctx->base);
727     gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
728 }
729 
730 #endif
731 #endif
732 
733 #if !defined(CONFIG_USER_ONLY)
734 /* IBAT0U...IBAT0U */
735 /* IBAT0L...IBAT7L */
736 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
737 {
738     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
739                   offsetof(CPUPPCState,
740                            IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
741 }
742 
743 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
744 {
745     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
746                   offsetof(CPUPPCState,
747                            IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
748 }
749 
750 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
751 {
752     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
753     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
754 }
755 
756 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
757 {
758     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
759     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
760 }
761 
762 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
763 {
764     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
765     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
766 }
767 
768 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
769 {
770     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
771     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
772 }
773 
774 /* DBAT0U...DBAT7U */
775 /* DBAT0L...DBAT7L */
776 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
777 {
778     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
779                   offsetof(CPUPPCState,
780                            DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
781 }
782 
783 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
784 {
785     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
786                   offsetof(CPUPPCState,
787                            DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
788 }
789 
790 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
791 {
792     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
793     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
794 }
795 
796 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
797 {
798     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
799     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
800 }
801 
802 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
803 {
804     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
805     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
806 }
807 
808 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
809 {
810     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
811     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
812 }
813 
814 /* SDR1 */
815 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
816 {
817     gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
818 }
819 
820 #if defined(TARGET_PPC64)
821 /* 64 bits PowerPC specific SPRs */
822 /* PIDR */
823 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
824 {
825     gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
826 }
827 
828 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
829 {
830     gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
831 }
832 
833 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
834 {
835     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
836 }
837 
838 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
839 {
840     TCGv t0 = tcg_temp_new();
841     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
842     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
843 }
844 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
845 {
846     gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
847 }
848 
849 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
850 {
851     gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
852 }
853 
854 /* DPDES */
855 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
856 {
857     if (!gen_serialize_core_lpar(ctx)) {
858         return;
859     }
860 
861     gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
862 }
863 
864 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
865 {
866     if (!gen_serialize_core_lpar(ctx)) {
867         return;
868     }
869 
870     gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
871 }
872 #endif
873 #endif
874 
875 /* PowerPC 40x specific registers */
876 #if !defined(CONFIG_USER_ONLY)
877 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
878 {
879     translator_io_start(&ctx->base);
880     gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
881 }
882 
883 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
884 {
885     translator_io_start(&ctx->base);
886     gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
887 }
888 
889 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
890 {
891     translator_io_start(&ctx->base);
892     gen_store_spr(sprn, cpu_gpr[gprn]);
893     gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
894     /* We must stop translation as we may have rebooted */
895     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
896 }
897 
898 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
899 {
900     translator_io_start(&ctx->base);
901     gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
902 }
903 
904 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
905 {
906     translator_io_start(&ctx->base);
907     gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
908 }
909 
910 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
911 {
912     translator_io_start(&ctx->base);
913     gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
914 }
915 
916 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
917 {
918     TCGv t0 = tcg_temp_new();
919     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
920     gen_helper_store_40x_pid(cpu_env, t0);
921 }
922 
923 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
924 {
925     translator_io_start(&ctx->base);
926     gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
927 }
928 
929 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
930 {
931     translator_io_start(&ctx->base);
932     gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
933 }
934 #endif
935 
936 /* PIR */
937 #if !defined(CONFIG_USER_ONLY)
938 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
939 {
940     TCGv t0 = tcg_temp_new();
941     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
942     gen_store_spr(SPR_PIR, t0);
943 }
944 #endif
945 
946 /* SPE specific registers */
947 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
948 {
949     TCGv_i32 t0 = tcg_temp_new_i32();
950     tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
951     tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
952 }
953 
954 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
955 {
956     TCGv_i32 t0 = tcg_temp_new_i32();
957     tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
958     tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
959 }
960 
961 #if !defined(CONFIG_USER_ONLY)
962 /* Callback used to write the exception vector base */
963 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
964 {
965     TCGv t0 = tcg_temp_new();
966     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
967     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
968     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
969     gen_store_spr(sprn, t0);
970 }
971 
972 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
973 {
974     int sprn_offs;
975 
976     if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
977         sprn_offs = sprn - SPR_BOOKE_IVOR0;
978     } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
979         sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
980     } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
981         sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
982     } else {
983         qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
984                       " vector 0x%03x\n", sprn);
985         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
986         return;
987     }
988 
989     TCGv t0 = tcg_temp_new();
990     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
991     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
992     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
993     gen_store_spr(sprn, t0);
994 }
995 #endif
996 
997 #ifdef TARGET_PPC64
998 #ifndef CONFIG_USER_ONLY
999 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
1000 {
1001     TCGv t0 = tcg_temp_new();
1002     TCGv t1 = tcg_temp_new();
1003     TCGv t2 = tcg_temp_new();
1004 
1005     /*
1006      * Note, the HV=1 PR=0 case is handled earlier by simply using
1007      * spr_write_generic for HV mode in the SPR table
1008      */
1009 
1010     /* Build insertion mask into t1 based on context */
1011     if (ctx->pr) {
1012         gen_load_spr(t1, SPR_UAMOR);
1013     } else {
1014         gen_load_spr(t1, SPR_AMOR);
1015     }
1016 
1017     /* Mask new bits into t2 */
1018     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1019 
1020     /* Load AMR and clear new bits in t0 */
1021     gen_load_spr(t0, SPR_AMR);
1022     tcg_gen_andc_tl(t0, t0, t1);
1023 
1024     /* Or'in new bits and write it out */
1025     tcg_gen_or_tl(t0, t0, t2);
1026     gen_store_spr(SPR_AMR, t0);
1027     spr_store_dump_spr(SPR_AMR);
1028 }
1029 
1030 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
1031 {
1032     TCGv t0 = tcg_temp_new();
1033     TCGv t1 = tcg_temp_new();
1034     TCGv t2 = tcg_temp_new();
1035 
1036     /*
1037      * Note, the HV=1 case is handled earlier by simply using
1038      * spr_write_generic for HV mode in the SPR table
1039      */
1040 
1041     /* Build insertion mask into t1 based on context */
1042     gen_load_spr(t1, SPR_AMOR);
1043 
1044     /* Mask new bits into t2 */
1045     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1046 
1047     /* Load AMR and clear new bits in t0 */
1048     gen_load_spr(t0, SPR_UAMOR);
1049     tcg_gen_andc_tl(t0, t0, t1);
1050 
1051     /* Or'in new bits and write it out */
1052     tcg_gen_or_tl(t0, t0, t2);
1053     gen_store_spr(SPR_UAMOR, t0);
1054     spr_store_dump_spr(SPR_UAMOR);
1055 }
1056 
1057 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1058 {
1059     TCGv t0 = tcg_temp_new();
1060     TCGv t1 = tcg_temp_new();
1061     TCGv t2 = tcg_temp_new();
1062 
1063     /*
1064      * Note, the HV=1 case is handled earlier by simply using
1065      * spr_write_generic for HV mode in the SPR table
1066      */
1067 
1068     /* Build insertion mask into t1 based on context */
1069     gen_load_spr(t1, SPR_AMOR);
1070 
1071     /* Mask new bits into t2 */
1072     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1073 
1074     /* Load AMR and clear new bits in t0 */
1075     gen_load_spr(t0, SPR_IAMR);
1076     tcg_gen_andc_tl(t0, t0, t1);
1077 
1078     /* Or'in new bits and write it out */
1079     tcg_gen_or_tl(t0, t0, t2);
1080     gen_store_spr(SPR_IAMR, t0);
1081     spr_store_dump_spr(SPR_IAMR);
1082 }
1083 #endif
1084 #endif
1085 
1086 #ifndef CONFIG_USER_ONLY
1087 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1088 {
1089     gen_helper_fixup_thrm(cpu_env);
1090     gen_load_spr(cpu_gpr[gprn], sprn);
1091     spr_load_dump_spr(sprn);
1092 }
1093 #endif /* !CONFIG_USER_ONLY */
1094 
1095 #if !defined(CONFIG_USER_ONLY)
1096 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1097 {
1098     TCGv t0 = tcg_temp_new();
1099 
1100     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1101     gen_store_spr(sprn, t0);
1102 }
1103 
1104 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1105 {
1106     TCGv t0 = tcg_temp_new();
1107 
1108     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1109     gen_store_spr(sprn, t0);
1110 }
1111 
1112 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1113 {
1114     TCGv t0 = tcg_temp_new();
1115 
1116     tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1117                     ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1118     gen_store_spr(sprn, t0);
1119 }
1120 
1121 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1122 {
1123     gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1124 }
1125 
1126 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1127 {
1128     TCGv_i32 t0 = tcg_constant_i32(sprn);
1129     gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1130 }
1131 
1132 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1133 {
1134     gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1135 }
1136 
1137 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1138 {
1139     gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1140 }
1141 
1142 #endif
1143 
1144 #if !defined(CONFIG_USER_ONLY)
1145 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1146 {
1147     TCGv val = tcg_temp_new();
1148     tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1149     gen_store_spr(SPR_BOOKE_MAS3, val);
1150     tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1151     gen_store_spr(SPR_BOOKE_MAS7, val);
1152 }
1153 
1154 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1155 {
1156     TCGv mas7 = tcg_temp_new();
1157     TCGv mas3 = tcg_temp_new();
1158     gen_load_spr(mas7, SPR_BOOKE_MAS7);
1159     tcg_gen_shli_tl(mas7, mas7, 32);
1160     gen_load_spr(mas3, SPR_BOOKE_MAS3);
1161     tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1162 }
1163 
1164 #endif
1165 
1166 #ifdef TARGET_PPC64
1167 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1168                                     int bit, int sprn, int cause)
1169 {
1170     TCGv_i32 t1 = tcg_constant_i32(bit);
1171     TCGv_i32 t2 = tcg_constant_i32(sprn);
1172     TCGv_i32 t3 = tcg_constant_i32(cause);
1173 
1174     gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1175 }
1176 
1177 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1178                                    int bit, int sprn, int cause)
1179 {
1180     TCGv_i32 t1 = tcg_constant_i32(bit);
1181     TCGv_i32 t2 = tcg_constant_i32(sprn);
1182     TCGv_i32 t3 = tcg_constant_i32(cause);
1183 
1184     gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1185 }
1186 
1187 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1188 {
1189     TCGv spr_up = tcg_temp_new();
1190     TCGv spr = tcg_temp_new();
1191 
1192     gen_load_spr(spr, sprn - 1);
1193     tcg_gen_shri_tl(spr_up, spr, 32);
1194     tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1195 }
1196 
1197 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1198 {
1199     TCGv spr = tcg_temp_new();
1200 
1201     gen_load_spr(spr, sprn - 1);
1202     tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1203     gen_store_spr(sprn - 1, spr);
1204 }
1205 
1206 #if !defined(CONFIG_USER_ONLY)
1207 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1208 {
1209     TCGv hmer = tcg_temp_new();
1210 
1211     gen_load_spr(hmer, sprn);
1212     tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1213     gen_store_spr(sprn, hmer);
1214     spr_store_dump_spr(sprn);
1215 }
1216 
1217 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
1218 {
1219     gen_helper_load_tfmr(cpu_gpr[gprn], cpu_env);
1220 }
1221 
1222 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
1223 {
1224     gen_helper_store_tfmr(cpu_env, cpu_gpr[gprn]);
1225 }
1226 
1227 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1228 {
1229     translator_io_start(&ctx->base);
1230     gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1231 }
1232 #endif /* !defined(CONFIG_USER_ONLY) */
1233 
1234 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1235 {
1236     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1237     spr_read_generic(ctx, gprn, sprn);
1238 }
1239 
1240 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1241 {
1242     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1243     spr_write_generic(ctx, sprn, gprn);
1244 }
1245 
1246 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1247 {
1248     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1249     spr_read_generic(ctx, gprn, sprn);
1250 }
1251 
1252 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1253 {
1254     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1255     spr_write_generic(ctx, sprn, gprn);
1256 }
1257 
1258 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1259 {
1260     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1261     spr_read_prev_upper32(ctx, gprn, sprn);
1262 }
1263 
1264 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1265 {
1266     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1267     spr_write_prev_upper32(ctx, sprn, gprn);
1268 }
1269 
1270 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1271 {
1272     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1273     spr_read_generic(ctx, gprn, sprn);
1274 }
1275 
1276 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1277 {
1278     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1279     spr_write_generic(ctx, sprn, gprn);
1280 }
1281 
1282 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1283 {
1284     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1285     spr_read_prev_upper32(ctx, gprn, sprn);
1286 }
1287 
1288 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1289 {
1290     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1291     spr_write_prev_upper32(ctx, sprn, gprn);
1292 }
1293 
1294 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1295 {
1296     TCGv t0 = tcg_temp_new();
1297 
1298     /*
1299      * Access to the (H)DEXCR in problem state is done using separated
1300      * SPR indexes which are 16 below the SPR indexes which have full
1301      * access to the (H)DEXCR in privileged state. Problem state can
1302      * only read bits 32:63, bits 0:31 return 0.
1303      *
1304      * See section 9.3.1-9.3.2 of PowerISA v3.1B
1305      */
1306 
1307     gen_load_spr(t0, sprn + 16);
1308     tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1309 }
1310 #endif
1311 
1312 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
1313 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1314 
1315 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
1316 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1317 
1318 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
1319 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1320 
1321 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
1322 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1323 
1324 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
1325 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1326 
1327 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1328 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1329 
1330 typedef struct opcode_t {
1331     unsigned char opc1, opc2, opc3, opc4;
1332 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1333     unsigned char pad[4];
1334 #endif
1335     opc_handler_t handler;
1336     const char *oname;
1337 } opcode_t;
1338 
1339 static void gen_priv_opc(DisasContext *ctx)
1340 {
1341     gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1342 }
1343 
1344 /* Helpers for priv. check */
1345 #define GEN_PRIV(CTX)              \
1346     do {                           \
1347         gen_priv_opc(CTX); return; \
1348     } while (0)
1349 
1350 #if defined(CONFIG_USER_ONLY)
1351 #define CHK_HV(CTX) GEN_PRIV(CTX)
1352 #define CHK_SV(CTX) GEN_PRIV(CTX)
1353 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1354 #else
1355 #define CHK_HV(CTX)                         \
1356     do {                                    \
1357         if (unlikely(ctx->pr || !ctx->hv)) {\
1358             GEN_PRIV(CTX);                  \
1359         }                                   \
1360     } while (0)
1361 #define CHK_SV(CTX)              \
1362     do {                         \
1363         if (unlikely(ctx->pr)) { \
1364             GEN_PRIV(CTX);       \
1365         }                        \
1366     } while (0)
1367 #define CHK_HVRM(CTX)                                   \
1368     do {                                                \
1369         if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1370             GEN_PRIV(CTX);                              \
1371         }                                               \
1372     } while (0)
1373 #endif
1374 
1375 #define CHK_NONE(CTX)
1376 
1377 /*****************************************************************************/
1378 /* PowerPC instructions table                                                */
1379 
1380 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
1381 {                                                                             \
1382     .opc1 = op1,                                                              \
1383     .opc2 = op2,                                                              \
1384     .opc3 = op3,                                                              \
1385     .opc4 = 0xff,                                                             \
1386     .handler = {                                                              \
1387         .inval1  = invl,                                                      \
1388         .type = _typ,                                                         \
1389         .type2 = _typ2,                                                       \
1390         .handler = &gen_##name,                                               \
1391     },                                                                        \
1392     .oname = stringify(name),                                                 \
1393 }
1394 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
1395 {                                                                             \
1396     .opc1 = op1,                                                              \
1397     .opc2 = op2,                                                              \
1398     .opc3 = op3,                                                              \
1399     .opc4 = 0xff,                                                             \
1400     .handler = {                                                              \
1401         .inval1  = invl1,                                                     \
1402         .inval2  = invl2,                                                     \
1403         .type = _typ,                                                         \
1404         .type2 = _typ2,                                                       \
1405         .handler = &gen_##name,                                               \
1406     },                                                                        \
1407     .oname = stringify(name),                                                 \
1408 }
1409 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
1410 {                                                                             \
1411     .opc1 = op1,                                                              \
1412     .opc2 = op2,                                                              \
1413     .opc3 = op3,                                                              \
1414     .opc4 = 0xff,                                                             \
1415     .handler = {                                                              \
1416         .inval1  = invl,                                                      \
1417         .type = _typ,                                                         \
1418         .type2 = _typ2,                                                       \
1419         .handler = &gen_##name,                                               \
1420     },                                                                        \
1421     .oname = onam,                                                            \
1422 }
1423 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
1424 {                                                                             \
1425     .opc1 = op1,                                                              \
1426     .opc2 = op2,                                                              \
1427     .opc3 = op3,                                                              \
1428     .opc4 = op4,                                                              \
1429     .handler = {                                                              \
1430         .inval1  = invl,                                                      \
1431         .type = _typ,                                                         \
1432         .type2 = _typ2,                                                       \
1433         .handler = &gen_##name,                                               \
1434     },                                                                        \
1435     .oname = stringify(name),                                                 \
1436 }
1437 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
1438 {                                                                             \
1439     .opc1 = op1,                                                              \
1440     .opc2 = op2,                                                              \
1441     .opc3 = op3,                                                              \
1442     .opc4 = op4,                                                              \
1443     .handler = {                                                              \
1444         .inval1  = invl,                                                      \
1445         .type = _typ,                                                         \
1446         .type2 = _typ2,                                                       \
1447         .handler = &gen_##name,                                               \
1448     },                                                                        \
1449     .oname = onam,                                                            \
1450 }
1451 
1452 /* Invalid instruction */
1453 static void gen_invalid(DisasContext *ctx)
1454 {
1455     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1456 }
1457 
1458 static opc_handler_t invalid_handler = {
1459     .inval1  = 0xFFFFFFFF,
1460     .inval2  = 0xFFFFFFFF,
1461     .type    = PPC_NONE,
1462     .type2   = PPC_NONE,
1463     .handler = gen_invalid,
1464 };
1465 
1466 /***                           Integer comparison                          ***/
1467 
1468 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1469 {
1470     TCGv t0 = tcg_temp_new();
1471     TCGv t1 = tcg_temp_new();
1472     TCGv_i32 t = tcg_temp_new_i32();
1473 
1474     tcg_gen_movi_tl(t0, CRF_EQ);
1475     tcg_gen_movi_tl(t1, CRF_LT);
1476     tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1477                        t0, arg0, arg1, t1, t0);
1478     tcg_gen_movi_tl(t1, CRF_GT);
1479     tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1480                        t0, arg0, arg1, t1, t0);
1481 
1482     tcg_gen_trunc_tl_i32(t, t0);
1483     tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1484     tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1485 }
1486 
1487 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1488 {
1489     TCGv t0 = tcg_constant_tl(arg1);
1490     gen_op_cmp(arg0, t0, s, crf);
1491 }
1492 
1493 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1494 {
1495     TCGv t0, t1;
1496     t0 = tcg_temp_new();
1497     t1 = tcg_temp_new();
1498     if (s) {
1499         tcg_gen_ext32s_tl(t0, arg0);
1500         tcg_gen_ext32s_tl(t1, arg1);
1501     } else {
1502         tcg_gen_ext32u_tl(t0, arg0);
1503         tcg_gen_ext32u_tl(t1, arg1);
1504     }
1505     gen_op_cmp(t0, t1, s, crf);
1506 }
1507 
1508 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1509 {
1510     TCGv t0 = tcg_constant_tl(arg1);
1511     gen_op_cmp32(arg0, t0, s, crf);
1512 }
1513 
1514 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1515 {
1516     if (NARROW_MODE(ctx)) {
1517         gen_op_cmpi32(reg, 0, 1, 0);
1518     } else {
1519         gen_op_cmpi(reg, 0, 1, 0);
1520     }
1521 }
1522 
1523 /* cmprb - range comparison: isupper, isaplha, islower*/
1524 static void gen_cmprb(DisasContext *ctx)
1525 {
1526     TCGv_i32 src1 = tcg_temp_new_i32();
1527     TCGv_i32 src2 = tcg_temp_new_i32();
1528     TCGv_i32 src2lo = tcg_temp_new_i32();
1529     TCGv_i32 src2hi = tcg_temp_new_i32();
1530     TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1531 
1532     tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1533     tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1534 
1535     tcg_gen_andi_i32(src1, src1, 0xFF);
1536     tcg_gen_ext8u_i32(src2lo, src2);
1537     tcg_gen_shri_i32(src2, src2, 8);
1538     tcg_gen_ext8u_i32(src2hi, src2);
1539 
1540     tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1541     tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1542     tcg_gen_and_i32(crf, src2lo, src2hi);
1543 
1544     if (ctx->opcode & 0x00200000) {
1545         tcg_gen_shri_i32(src2, src2, 8);
1546         tcg_gen_ext8u_i32(src2lo, src2);
1547         tcg_gen_shri_i32(src2, src2, 8);
1548         tcg_gen_ext8u_i32(src2hi, src2);
1549         tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1550         tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1551         tcg_gen_and_i32(src2lo, src2lo, src2hi);
1552         tcg_gen_or_i32(crf, crf, src2lo);
1553     }
1554     tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1555 }
1556 
1557 #if defined(TARGET_PPC64)
1558 /* cmpeqb */
1559 static void gen_cmpeqb(DisasContext *ctx)
1560 {
1561     gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1562                       cpu_gpr[rB(ctx->opcode)]);
1563 }
1564 #endif
1565 
1566 /* isel (PowerPC 2.03 specification) */
1567 static void gen_isel(DisasContext *ctx)
1568 {
1569     uint32_t bi = rC(ctx->opcode);
1570     uint32_t mask = 0x08 >> (bi & 0x03);
1571     TCGv t0 = tcg_temp_new();
1572     TCGv zr;
1573 
1574     tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1575     tcg_gen_andi_tl(t0, t0, mask);
1576 
1577     zr = tcg_constant_tl(0);
1578     tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1579                        rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1580                        cpu_gpr[rB(ctx->opcode)]);
1581 }
1582 
1583 /* cmpb: PowerPC 2.05 specification */
1584 static void gen_cmpb(DisasContext *ctx)
1585 {
1586     gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1587                     cpu_gpr[rB(ctx->opcode)]);
1588 }
1589 
1590 /***                           Integer arithmetic                          ***/
1591 
1592 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1593                                            TCGv arg1, TCGv arg2, int sub)
1594 {
1595     TCGv t0 = tcg_temp_new();
1596 
1597     tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1598     tcg_gen_xor_tl(t0, arg1, arg2);
1599     if (sub) {
1600         tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1601     } else {
1602         tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1603     }
1604     if (NARROW_MODE(ctx)) {
1605         tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1606         if (is_isa300(ctx)) {
1607             tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1608         }
1609     } else {
1610         if (is_isa300(ctx)) {
1611             tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1612         }
1613         tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1614     }
1615     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1616 }
1617 
1618 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1619                                              TCGv res, TCGv arg0, TCGv arg1,
1620                                              TCGv ca32, int sub)
1621 {
1622     TCGv t0;
1623 
1624     if (!is_isa300(ctx)) {
1625         return;
1626     }
1627 
1628     t0 = tcg_temp_new();
1629     if (sub) {
1630         tcg_gen_eqv_tl(t0, arg0, arg1);
1631     } else {
1632         tcg_gen_xor_tl(t0, arg0, arg1);
1633     }
1634     tcg_gen_xor_tl(t0, t0, res);
1635     tcg_gen_extract_tl(ca32, t0, 32, 1);
1636 }
1637 
1638 /* Common add function */
1639 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1640                                     TCGv arg2, TCGv ca, TCGv ca32,
1641                                     bool add_ca, bool compute_ca,
1642                                     bool compute_ov, bool compute_rc0)
1643 {
1644     TCGv t0 = ret;
1645 
1646     if (compute_ca || compute_ov) {
1647         t0 = tcg_temp_new();
1648     }
1649 
1650     if (compute_ca) {
1651         if (NARROW_MODE(ctx)) {
1652             /*
1653              * Caution: a non-obvious corner case of the spec is that
1654              * we must produce the *entire* 64-bit addition, but
1655              * produce the carry into bit 32.
1656              */
1657             TCGv t1 = tcg_temp_new();
1658             tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
1659             tcg_gen_add_tl(t0, arg1, arg2);
1660             if (add_ca) {
1661                 tcg_gen_add_tl(t0, t0, ca);
1662             }
1663             tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
1664             tcg_gen_extract_tl(ca, ca, 32, 1);
1665             if (is_isa300(ctx)) {
1666                 tcg_gen_mov_tl(ca32, ca);
1667             }
1668         } else {
1669             TCGv zero = tcg_constant_tl(0);
1670             if (add_ca) {
1671                 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1672                 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1673             } else {
1674                 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1675             }
1676             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1677         }
1678     } else {
1679         tcg_gen_add_tl(t0, arg1, arg2);
1680         if (add_ca) {
1681             tcg_gen_add_tl(t0, t0, ca);
1682         }
1683     }
1684 
1685     if (compute_ov) {
1686         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1687     }
1688     if (unlikely(compute_rc0)) {
1689         gen_set_Rc0(ctx, t0);
1690     }
1691 
1692     if (t0 != ret) {
1693         tcg_gen_mov_tl(ret, t0);
1694     }
1695 }
1696 /* Add functions with two operands */
1697 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
1698 static void glue(gen_, name)(DisasContext *ctx)                               \
1699 {                                                                             \
1700     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1701                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1702                      ca, glue(ca, 32),                                        \
1703                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1704 }
1705 /* Add functions with one operand and one immediate */
1706 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
1707                                 add_ca, compute_ca, compute_ov)               \
1708 static void glue(gen_, name)(DisasContext *ctx)                               \
1709 {                                                                             \
1710     TCGv t0 = tcg_constant_tl(const_val);                                     \
1711     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1712                      cpu_gpr[rA(ctx->opcode)], t0,                            \
1713                      ca, glue(ca, 32),                                        \
1714                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1715 }
1716 
1717 /* add  add.  addo  addo. */
1718 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1719 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1720 /* addc  addc.  addco  addco. */
1721 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1722 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1723 /* adde  adde.  addeo  addeo. */
1724 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1725 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1726 /* addme  addme.  addmeo  addmeo.  */
1727 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1728 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1729 /* addex */
1730 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1731 /* addze  addze.  addzeo  addzeo.*/
1732 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1733 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1734 /* addic  addic.*/
1735 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1736 {
1737     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
1738     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1739                      c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1740 }
1741 
1742 static void gen_addic(DisasContext *ctx)
1743 {
1744     gen_op_addic(ctx, 0);
1745 }
1746 
1747 static void gen_addic_(DisasContext *ctx)
1748 {
1749     gen_op_addic(ctx, 1);
1750 }
1751 
1752 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1753                                      TCGv arg2, int sign, int compute_ov)
1754 {
1755     TCGv_i32 t0 = tcg_temp_new_i32();
1756     TCGv_i32 t1 = tcg_temp_new_i32();
1757     TCGv_i32 t2 = tcg_temp_new_i32();
1758     TCGv_i32 t3 = tcg_temp_new_i32();
1759 
1760     tcg_gen_trunc_tl_i32(t0, arg1);
1761     tcg_gen_trunc_tl_i32(t1, arg2);
1762     if (sign) {
1763         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1764         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1765         tcg_gen_and_i32(t2, t2, t3);
1766         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1767         tcg_gen_or_i32(t2, t2, t3);
1768         tcg_gen_movi_i32(t3, 0);
1769         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1770         tcg_gen_div_i32(t3, t0, t1);
1771         tcg_gen_extu_i32_tl(ret, t3);
1772     } else {
1773         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1774         tcg_gen_movi_i32(t3, 0);
1775         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1776         tcg_gen_divu_i32(t3, t0, t1);
1777         tcg_gen_extu_i32_tl(ret, t3);
1778     }
1779     if (compute_ov) {
1780         tcg_gen_extu_i32_tl(cpu_ov, t2);
1781         if (is_isa300(ctx)) {
1782             tcg_gen_extu_i32_tl(cpu_ov32, t2);
1783         }
1784         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1785     }
1786 
1787     if (unlikely(Rc(ctx->opcode) != 0)) {
1788         gen_set_Rc0(ctx, ret);
1789     }
1790 }
1791 /* Div functions */
1792 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1793 static void glue(gen_, name)(DisasContext *ctx)                               \
1794 {                                                                             \
1795     gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1796                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1797                      sign, compute_ov);                                       \
1798 }
1799 /* divwu  divwu.  divwuo  divwuo.   */
1800 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1801 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1802 /* divw  divw.  divwo  divwo.   */
1803 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1804 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1805 
1806 /* div[wd]eu[o][.] */
1807 #define GEN_DIVE(name, hlpr, compute_ov)                                      \
1808 static void gen_##name(DisasContext *ctx)                                     \
1809 {                                                                             \
1810     TCGv_i32 t0 = tcg_constant_i32(compute_ov);                               \
1811     gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env,                      \
1812                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1813     if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1814         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1815     }                                                                         \
1816 }
1817 
1818 GEN_DIVE(divweu, divweu, 0);
1819 GEN_DIVE(divweuo, divweu, 1);
1820 GEN_DIVE(divwe, divwe, 0);
1821 GEN_DIVE(divweo, divwe, 1);
1822 
1823 #if defined(TARGET_PPC64)
1824 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1825                                      TCGv arg2, int sign, int compute_ov)
1826 {
1827     TCGv_i64 t0 = tcg_temp_new_i64();
1828     TCGv_i64 t1 = tcg_temp_new_i64();
1829     TCGv_i64 t2 = tcg_temp_new_i64();
1830     TCGv_i64 t3 = tcg_temp_new_i64();
1831 
1832     tcg_gen_mov_i64(t0, arg1);
1833     tcg_gen_mov_i64(t1, arg2);
1834     if (sign) {
1835         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1836         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1837         tcg_gen_and_i64(t2, t2, t3);
1838         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1839         tcg_gen_or_i64(t2, t2, t3);
1840         tcg_gen_movi_i64(t3, 0);
1841         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1842         tcg_gen_div_i64(ret, t0, t1);
1843     } else {
1844         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1845         tcg_gen_movi_i64(t3, 0);
1846         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1847         tcg_gen_divu_i64(ret, t0, t1);
1848     }
1849     if (compute_ov) {
1850         tcg_gen_mov_tl(cpu_ov, t2);
1851         if (is_isa300(ctx)) {
1852             tcg_gen_mov_tl(cpu_ov32, t2);
1853         }
1854         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1855     }
1856 
1857     if (unlikely(Rc(ctx->opcode) != 0)) {
1858         gen_set_Rc0(ctx, ret);
1859     }
1860 }
1861 
1862 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1863 static void glue(gen_, name)(DisasContext *ctx)                               \
1864 {                                                                             \
1865     gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1866                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1867                       sign, compute_ov);                                      \
1868 }
1869 /* divdu  divdu.  divduo  divduo.   */
1870 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1871 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1872 /* divd  divd.  divdo  divdo.   */
1873 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1874 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1875 
1876 GEN_DIVE(divdeu, divdeu, 0);
1877 GEN_DIVE(divdeuo, divdeu, 1);
1878 GEN_DIVE(divde, divde, 0);
1879 GEN_DIVE(divdeo, divde, 1);
1880 #endif
1881 
1882 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1883                                      TCGv arg2, int sign)
1884 {
1885     TCGv_i32 t0 = tcg_temp_new_i32();
1886     TCGv_i32 t1 = tcg_temp_new_i32();
1887 
1888     tcg_gen_trunc_tl_i32(t0, arg1);
1889     tcg_gen_trunc_tl_i32(t1, arg2);
1890     if (sign) {
1891         TCGv_i32 t2 = tcg_temp_new_i32();
1892         TCGv_i32 t3 = tcg_temp_new_i32();
1893         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1894         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1895         tcg_gen_and_i32(t2, t2, t3);
1896         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1897         tcg_gen_or_i32(t2, t2, t3);
1898         tcg_gen_movi_i32(t3, 0);
1899         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1900         tcg_gen_rem_i32(t3, t0, t1);
1901         tcg_gen_ext_i32_tl(ret, t3);
1902     } else {
1903         TCGv_i32 t2 = tcg_constant_i32(1);
1904         TCGv_i32 t3 = tcg_constant_i32(0);
1905         tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1906         tcg_gen_remu_i32(t0, t0, t1);
1907         tcg_gen_extu_i32_tl(ret, t0);
1908     }
1909 }
1910 
1911 #define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1912 static void glue(gen_, name)(DisasContext *ctx)                             \
1913 {                                                                           \
1914     gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1915                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1916                       sign);                                                \
1917 }
1918 
1919 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1920 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1921 
1922 #if defined(TARGET_PPC64)
1923 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1924                                      TCGv arg2, int sign)
1925 {
1926     TCGv_i64 t0 = tcg_temp_new_i64();
1927     TCGv_i64 t1 = tcg_temp_new_i64();
1928 
1929     tcg_gen_mov_i64(t0, arg1);
1930     tcg_gen_mov_i64(t1, arg2);
1931     if (sign) {
1932         TCGv_i64 t2 = tcg_temp_new_i64();
1933         TCGv_i64 t3 = tcg_temp_new_i64();
1934         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1935         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1936         tcg_gen_and_i64(t2, t2, t3);
1937         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1938         tcg_gen_or_i64(t2, t2, t3);
1939         tcg_gen_movi_i64(t3, 0);
1940         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1941         tcg_gen_rem_i64(ret, t0, t1);
1942     } else {
1943         TCGv_i64 t2 = tcg_constant_i64(1);
1944         TCGv_i64 t3 = tcg_constant_i64(0);
1945         tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1946         tcg_gen_remu_i64(ret, t0, t1);
1947     }
1948 }
1949 
1950 #define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1951 static void glue(gen_, name)(DisasContext *ctx)                           \
1952 {                                                                         \
1953   gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1954                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1955                     sign);                                                \
1956 }
1957 
1958 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1959 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1960 #endif
1961 
1962 /* mulhw  mulhw. */
1963 static void gen_mulhw(DisasContext *ctx)
1964 {
1965     TCGv_i32 t0 = tcg_temp_new_i32();
1966     TCGv_i32 t1 = tcg_temp_new_i32();
1967 
1968     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1969     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1970     tcg_gen_muls2_i32(t0, t1, t0, t1);
1971     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1972     if (unlikely(Rc(ctx->opcode) != 0)) {
1973         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1974     }
1975 }
1976 
1977 /* mulhwu  mulhwu.  */
1978 static void gen_mulhwu(DisasContext *ctx)
1979 {
1980     TCGv_i32 t0 = tcg_temp_new_i32();
1981     TCGv_i32 t1 = tcg_temp_new_i32();
1982 
1983     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1984     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1985     tcg_gen_mulu2_i32(t0, t1, t0, t1);
1986     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1987     if (unlikely(Rc(ctx->opcode) != 0)) {
1988         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1989     }
1990 }
1991 
1992 /* mullw  mullw. */
1993 static void gen_mullw(DisasContext *ctx)
1994 {
1995 #if defined(TARGET_PPC64)
1996     TCGv_i64 t0, t1;
1997     t0 = tcg_temp_new_i64();
1998     t1 = tcg_temp_new_i64();
1999     tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
2000     tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
2001     tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2002 #else
2003     tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2004                     cpu_gpr[rB(ctx->opcode)]);
2005 #endif
2006     if (unlikely(Rc(ctx->opcode) != 0)) {
2007         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2008     }
2009 }
2010 
2011 /* mullwo  mullwo. */
2012 static void gen_mullwo(DisasContext *ctx)
2013 {
2014     TCGv_i32 t0 = tcg_temp_new_i32();
2015     TCGv_i32 t1 = tcg_temp_new_i32();
2016 
2017     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
2018     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
2019     tcg_gen_muls2_i32(t0, t1, t0, t1);
2020 #if defined(TARGET_PPC64)
2021     tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2022 #else
2023     tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
2024 #endif
2025 
2026     tcg_gen_sari_i32(t0, t0, 31);
2027     tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
2028     tcg_gen_extu_i32_tl(cpu_ov, t0);
2029     if (is_isa300(ctx)) {
2030         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2031     }
2032     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2033 
2034     if (unlikely(Rc(ctx->opcode) != 0)) {
2035         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2036     }
2037 }
2038 
2039 /* mulli */
2040 static void gen_mulli(DisasContext *ctx)
2041 {
2042     tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2043                     SIMM(ctx->opcode));
2044 }
2045 
2046 #if defined(TARGET_PPC64)
2047 /* mulhd  mulhd. */
2048 static void gen_mulhd(DisasContext *ctx)
2049 {
2050     TCGv lo = tcg_temp_new();
2051     tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2052                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2053     if (unlikely(Rc(ctx->opcode) != 0)) {
2054         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2055     }
2056 }
2057 
2058 /* mulhdu  mulhdu. */
2059 static void gen_mulhdu(DisasContext *ctx)
2060 {
2061     TCGv lo = tcg_temp_new();
2062     tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2063                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2064     if (unlikely(Rc(ctx->opcode) != 0)) {
2065         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2066     }
2067 }
2068 
2069 /* mulld  mulld. */
2070 static void gen_mulld(DisasContext *ctx)
2071 {
2072     tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2073                    cpu_gpr[rB(ctx->opcode)]);
2074     if (unlikely(Rc(ctx->opcode) != 0)) {
2075         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2076     }
2077 }
2078 
2079 /* mulldo  mulldo. */
2080 static void gen_mulldo(DisasContext *ctx)
2081 {
2082     TCGv_i64 t0 = tcg_temp_new_i64();
2083     TCGv_i64 t1 = tcg_temp_new_i64();
2084 
2085     tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2086                       cpu_gpr[rB(ctx->opcode)]);
2087     tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2088 
2089     tcg_gen_sari_i64(t0, t0, 63);
2090     tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2091     if (is_isa300(ctx)) {
2092         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2093     }
2094     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2095 
2096     if (unlikely(Rc(ctx->opcode) != 0)) {
2097         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2098     }
2099 }
2100 #endif
2101 
2102 /* Common subf function */
2103 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2104                                      TCGv arg2, bool add_ca, bool compute_ca,
2105                                      bool compute_ov, bool compute_rc0)
2106 {
2107     TCGv t0 = ret;
2108 
2109     if (compute_ca || compute_ov) {
2110         t0 = tcg_temp_new();
2111     }
2112 
2113     if (compute_ca) {
2114         /* dest = ~arg1 + arg2 [+ ca].  */
2115         if (NARROW_MODE(ctx)) {
2116             /*
2117              * Caution: a non-obvious corner case of the spec is that
2118              * we must produce the *entire* 64-bit addition, but
2119              * produce the carry into bit 32.
2120              */
2121             TCGv inv1 = tcg_temp_new();
2122             TCGv t1 = tcg_temp_new();
2123             tcg_gen_not_tl(inv1, arg1);
2124             if (add_ca) {
2125                 tcg_gen_add_tl(t0, arg2, cpu_ca);
2126             } else {
2127                 tcg_gen_addi_tl(t0, arg2, 1);
2128             }
2129             tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
2130             tcg_gen_add_tl(t0, t0, inv1);
2131             tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
2132             tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2133             if (is_isa300(ctx)) {
2134                 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2135             }
2136         } else if (add_ca) {
2137             TCGv zero, inv1 = tcg_temp_new();
2138             tcg_gen_not_tl(inv1, arg1);
2139             zero = tcg_constant_tl(0);
2140             tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2141             tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2142             gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2143         } else {
2144             tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2145             tcg_gen_sub_tl(t0, arg2, arg1);
2146             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2147         }
2148     } else if (add_ca) {
2149         /*
2150          * Since we're ignoring carry-out, we can simplify the
2151          * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2152          */
2153         tcg_gen_sub_tl(t0, arg2, arg1);
2154         tcg_gen_add_tl(t0, t0, cpu_ca);
2155         tcg_gen_subi_tl(t0, t0, 1);
2156     } else {
2157         tcg_gen_sub_tl(t0, arg2, arg1);
2158     }
2159 
2160     if (compute_ov) {
2161         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2162     }
2163     if (unlikely(compute_rc0)) {
2164         gen_set_Rc0(ctx, t0);
2165     }
2166 
2167     if (t0 != ret) {
2168         tcg_gen_mov_tl(ret, t0);
2169     }
2170 }
2171 /* Sub functions with Two operands functions */
2172 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
2173 static void glue(gen_, name)(DisasContext *ctx)                               \
2174 {                                                                             \
2175     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2176                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
2177                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2178 }
2179 /* Sub functions with one operand and one immediate */
2180 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
2181                                 add_ca, compute_ca, compute_ov)               \
2182 static void glue(gen_, name)(DisasContext *ctx)                               \
2183 {                                                                             \
2184     TCGv t0 = tcg_constant_tl(const_val);                                     \
2185     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2186                       cpu_gpr[rA(ctx->opcode)], t0,                           \
2187                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2188 }
2189 /* subf  subf.  subfo  subfo. */
2190 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2191 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2192 /* subfc  subfc.  subfco  subfco. */
2193 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2194 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2195 /* subfe  subfe.  subfeo  subfo. */
2196 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2197 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2198 /* subfme  subfme.  subfmeo  subfmeo.  */
2199 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2200 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2201 /* subfze  subfze.  subfzeo  subfzeo.*/
2202 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2203 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2204 
2205 /* subfic */
2206 static void gen_subfic(DisasContext *ctx)
2207 {
2208     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
2209     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2210                       c, 0, 1, 0, 0);
2211 }
2212 
2213 /* neg neg. nego nego. */
2214 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2215 {
2216     TCGv zero = tcg_constant_tl(0);
2217     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2218                       zero, 0, 0, compute_ov, Rc(ctx->opcode));
2219 }
2220 
2221 static void gen_neg(DisasContext *ctx)
2222 {
2223     tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2224     if (unlikely(Rc(ctx->opcode))) {
2225         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2226     }
2227 }
2228 
2229 static void gen_nego(DisasContext *ctx)
2230 {
2231     gen_op_arith_neg(ctx, 1);
2232 }
2233 
2234 /***                            Integer logical                            ***/
2235 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
2236 static void glue(gen_, name)(DisasContext *ctx)                               \
2237 {                                                                             \
2238     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
2239        cpu_gpr[rB(ctx->opcode)]);                                             \
2240     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2241         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2242 }
2243 
2244 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
2245 static void glue(gen_, name)(DisasContext *ctx)                               \
2246 {                                                                             \
2247     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
2248     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2249         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2250 }
2251 
2252 /* and & and. */
2253 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2254 /* andc & andc. */
2255 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2256 
2257 /* andi. */
2258 static void gen_andi_(DisasContext *ctx)
2259 {
2260     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2261                     UIMM(ctx->opcode));
2262     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2263 }
2264 
2265 /* andis. */
2266 static void gen_andis_(DisasContext *ctx)
2267 {
2268     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2269                     UIMM(ctx->opcode) << 16);
2270     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2271 }
2272 
2273 /* cntlzw */
2274 static void gen_cntlzw(DisasContext *ctx)
2275 {
2276     TCGv_i32 t = tcg_temp_new_i32();
2277 
2278     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2279     tcg_gen_clzi_i32(t, t, 32);
2280     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2281 
2282     if (unlikely(Rc(ctx->opcode) != 0)) {
2283         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2284     }
2285 }
2286 
2287 /* cnttzw */
2288 static void gen_cnttzw(DisasContext *ctx)
2289 {
2290     TCGv_i32 t = tcg_temp_new_i32();
2291 
2292     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2293     tcg_gen_ctzi_i32(t, t, 32);
2294     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2295 
2296     if (unlikely(Rc(ctx->opcode) != 0)) {
2297         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2298     }
2299 }
2300 
2301 /* eqv & eqv. */
2302 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2303 /* extsb & extsb. */
2304 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2305 /* extsh & extsh. */
2306 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2307 /* nand & nand. */
2308 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2309 /* nor & nor. */
2310 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2311 
2312 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2313 static void gen_pause(DisasContext *ctx)
2314 {
2315     TCGv_i32 t0 = tcg_constant_i32(0);
2316     tcg_gen_st_i32(t0, cpu_env,
2317                    -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2318 
2319     /* Stop translation, this gives other CPUs a chance to run */
2320     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2321 }
2322 #endif /* defined(TARGET_PPC64) */
2323 
2324 /* or & or. */
2325 static void gen_or(DisasContext *ctx)
2326 {
2327     int rs, ra, rb;
2328 
2329     rs = rS(ctx->opcode);
2330     ra = rA(ctx->opcode);
2331     rb = rB(ctx->opcode);
2332     /* Optimisation for mr. ri case */
2333     if (rs != ra || rs != rb) {
2334         if (rs != rb) {
2335             tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2336         } else {
2337             tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2338         }
2339         if (unlikely(Rc(ctx->opcode) != 0)) {
2340             gen_set_Rc0(ctx, cpu_gpr[ra]);
2341         }
2342     } else if (unlikely(Rc(ctx->opcode) != 0)) {
2343         gen_set_Rc0(ctx, cpu_gpr[rs]);
2344 #if defined(TARGET_PPC64)
2345     } else if (rs != 0) { /* 0 is nop */
2346         int prio = 0;
2347 
2348         switch (rs) {
2349         case 1:
2350             /* Set process priority to low */
2351             prio = 2;
2352             break;
2353         case 6:
2354             /* Set process priority to medium-low */
2355             prio = 3;
2356             break;
2357         case 2:
2358             /* Set process priority to normal */
2359             prio = 4;
2360             break;
2361 #if !defined(CONFIG_USER_ONLY)
2362         case 31:
2363             if (!ctx->pr) {
2364                 /* Set process priority to very low */
2365                 prio = 1;
2366             }
2367             break;
2368         case 5:
2369             if (!ctx->pr) {
2370                 /* Set process priority to medium-hight */
2371                 prio = 5;
2372             }
2373             break;
2374         case 3:
2375             if (!ctx->pr) {
2376                 /* Set process priority to high */
2377                 prio = 6;
2378             }
2379             break;
2380         case 7:
2381             if (ctx->hv && !ctx->pr) {
2382                 /* Set process priority to very high */
2383                 prio = 7;
2384             }
2385             break;
2386 #endif
2387         default:
2388             break;
2389         }
2390         if (prio) {
2391             TCGv t0 = tcg_temp_new();
2392             gen_load_spr(t0, SPR_PPR);
2393             tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2394             tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2395             gen_store_spr(SPR_PPR, t0);
2396         }
2397 #if !defined(CONFIG_USER_ONLY)
2398         /*
2399          * Pause out of TCG otherwise spin loops with smt_low eat too
2400          * much CPU and the kernel hangs.  This applies to all
2401          * encodings other than no-op, e.g., miso(rs=26), yield(27),
2402          * mdoio(29), mdoom(30), and all currently undefined.
2403          */
2404         gen_pause(ctx);
2405 #endif
2406 #endif
2407     }
2408 }
2409 /* orc & orc. */
2410 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2411 
2412 /* xor & xor. */
2413 static void gen_xor(DisasContext *ctx)
2414 {
2415     /* Optimisation for "set to zero" case */
2416     if (rS(ctx->opcode) != rB(ctx->opcode)) {
2417         tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2418                        cpu_gpr[rB(ctx->opcode)]);
2419     } else {
2420         tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2421     }
2422     if (unlikely(Rc(ctx->opcode) != 0)) {
2423         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2424     }
2425 }
2426 
2427 /* ori */
2428 static void gen_ori(DisasContext *ctx)
2429 {
2430     target_ulong uimm = UIMM(ctx->opcode);
2431 
2432     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2433         return;
2434     }
2435     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2436 }
2437 
2438 /* oris */
2439 static void gen_oris(DisasContext *ctx)
2440 {
2441     target_ulong uimm = UIMM(ctx->opcode);
2442 
2443     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2444         /* NOP */
2445         return;
2446     }
2447     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2448                    uimm << 16);
2449 }
2450 
2451 /* xori */
2452 static void gen_xori(DisasContext *ctx)
2453 {
2454     target_ulong uimm = UIMM(ctx->opcode);
2455 
2456     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2457         /* NOP */
2458         return;
2459     }
2460     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2461 }
2462 
2463 /* xoris */
2464 static void gen_xoris(DisasContext *ctx)
2465 {
2466     target_ulong uimm = UIMM(ctx->opcode);
2467 
2468     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2469         /* NOP */
2470         return;
2471     }
2472     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2473                     uimm << 16);
2474 }
2475 
2476 /* popcntb : PowerPC 2.03 specification */
2477 static void gen_popcntb(DisasContext *ctx)
2478 {
2479     gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2480 }
2481 
2482 static void gen_popcntw(DisasContext *ctx)
2483 {
2484 #if defined(TARGET_PPC64)
2485     gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2486 #else
2487     tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2488 #endif
2489 }
2490 
2491 #if defined(TARGET_PPC64)
2492 /* popcntd: PowerPC 2.06 specification */
2493 static void gen_popcntd(DisasContext *ctx)
2494 {
2495     tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2496 }
2497 #endif
2498 
2499 /* prtyw: PowerPC 2.05 specification */
2500 static void gen_prtyw(DisasContext *ctx)
2501 {
2502     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2503     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2504     TCGv t0 = tcg_temp_new();
2505     tcg_gen_shri_tl(t0, rs, 16);
2506     tcg_gen_xor_tl(ra, rs, t0);
2507     tcg_gen_shri_tl(t0, ra, 8);
2508     tcg_gen_xor_tl(ra, ra, t0);
2509     tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2510 }
2511 
2512 #if defined(TARGET_PPC64)
2513 /* prtyd: PowerPC 2.05 specification */
2514 static void gen_prtyd(DisasContext *ctx)
2515 {
2516     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2517     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2518     TCGv t0 = tcg_temp_new();
2519     tcg_gen_shri_tl(t0, rs, 32);
2520     tcg_gen_xor_tl(ra, rs, t0);
2521     tcg_gen_shri_tl(t0, ra, 16);
2522     tcg_gen_xor_tl(ra, ra, t0);
2523     tcg_gen_shri_tl(t0, ra, 8);
2524     tcg_gen_xor_tl(ra, ra, t0);
2525     tcg_gen_andi_tl(ra, ra, 1);
2526 }
2527 #endif
2528 
2529 #if defined(TARGET_PPC64)
2530 /* bpermd */
2531 static void gen_bpermd(DisasContext *ctx)
2532 {
2533     gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2534                       cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2535 }
2536 #endif
2537 
2538 #if defined(TARGET_PPC64)
2539 /* extsw & extsw. */
2540 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2541 
2542 /* cntlzd */
2543 static void gen_cntlzd(DisasContext *ctx)
2544 {
2545     tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2546     if (unlikely(Rc(ctx->opcode) != 0)) {
2547         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2548     }
2549 }
2550 
2551 /* cnttzd */
2552 static void gen_cnttzd(DisasContext *ctx)
2553 {
2554     tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2555     if (unlikely(Rc(ctx->opcode) != 0)) {
2556         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2557     }
2558 }
2559 
2560 /* darn */
2561 static void gen_darn(DisasContext *ctx)
2562 {
2563     int l = L(ctx->opcode);
2564 
2565     if (l > 2) {
2566         tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2567     } else {
2568         translator_io_start(&ctx->base);
2569         if (l == 0) {
2570             gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2571         } else {
2572             /* Return 64-bit random for both CRN and RRN */
2573             gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2574         }
2575     }
2576 }
2577 #endif
2578 
2579 /***                             Integer rotate                            ***/
2580 
2581 /* rlwimi & rlwimi. */
2582 static void gen_rlwimi(DisasContext *ctx)
2583 {
2584     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2585     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2586     uint32_t sh = SH(ctx->opcode);
2587     uint32_t mb = MB(ctx->opcode);
2588     uint32_t me = ME(ctx->opcode);
2589 
2590     if (sh == (31 - me) && mb <= me) {
2591         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2592     } else {
2593         target_ulong mask;
2594         bool mask_in_32b = true;
2595         TCGv t1;
2596 
2597 #if defined(TARGET_PPC64)
2598         mb += 32;
2599         me += 32;
2600 #endif
2601         mask = MASK(mb, me);
2602 
2603 #if defined(TARGET_PPC64)
2604         if (mask > 0xffffffffu) {
2605             mask_in_32b = false;
2606         }
2607 #endif
2608         t1 = tcg_temp_new();
2609         if (mask_in_32b) {
2610             TCGv_i32 t0 = tcg_temp_new_i32();
2611             tcg_gen_trunc_tl_i32(t0, t_rs);
2612             tcg_gen_rotli_i32(t0, t0, sh);
2613             tcg_gen_extu_i32_tl(t1, t0);
2614         } else {
2615 #if defined(TARGET_PPC64)
2616             tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2617             tcg_gen_rotli_i64(t1, t1, sh);
2618 #else
2619             g_assert_not_reached();
2620 #endif
2621         }
2622 
2623         tcg_gen_andi_tl(t1, t1, mask);
2624         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2625         tcg_gen_or_tl(t_ra, t_ra, t1);
2626     }
2627     if (unlikely(Rc(ctx->opcode) != 0)) {
2628         gen_set_Rc0(ctx, t_ra);
2629     }
2630 }
2631 
2632 /* rlwinm & rlwinm. */
2633 static void gen_rlwinm(DisasContext *ctx)
2634 {
2635     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2636     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2637     int sh = SH(ctx->opcode);
2638     int mb = MB(ctx->opcode);
2639     int me = ME(ctx->opcode);
2640     int len = me - mb + 1;
2641     int rsh = (32 - sh) & 31;
2642 
2643     if (sh != 0 && len > 0 && me == (31 - sh)) {
2644         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2645     } else if (me == 31 && rsh + len <= 32) {
2646         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2647     } else {
2648         target_ulong mask;
2649         bool mask_in_32b = true;
2650 #if defined(TARGET_PPC64)
2651         mb += 32;
2652         me += 32;
2653 #endif
2654         mask = MASK(mb, me);
2655 #if defined(TARGET_PPC64)
2656         if (mask > 0xffffffffu) {
2657             mask_in_32b = false;
2658         }
2659 #endif
2660         if (mask_in_32b) {
2661             if (sh == 0) {
2662                 tcg_gen_andi_tl(t_ra, t_rs, mask);
2663             } else {
2664                 TCGv_i32 t0 = tcg_temp_new_i32();
2665                 tcg_gen_trunc_tl_i32(t0, t_rs);
2666                 tcg_gen_rotli_i32(t0, t0, sh);
2667                 tcg_gen_andi_i32(t0, t0, mask);
2668                 tcg_gen_extu_i32_tl(t_ra, t0);
2669             }
2670         } else {
2671 #if defined(TARGET_PPC64)
2672             tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2673             tcg_gen_rotli_i64(t_ra, t_ra, sh);
2674             tcg_gen_andi_i64(t_ra, t_ra, mask);
2675 #else
2676             g_assert_not_reached();
2677 #endif
2678         }
2679     }
2680     if (unlikely(Rc(ctx->opcode) != 0)) {
2681         gen_set_Rc0(ctx, t_ra);
2682     }
2683 }
2684 
2685 /* rlwnm & rlwnm. */
2686 static void gen_rlwnm(DisasContext *ctx)
2687 {
2688     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2689     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2690     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2691     uint32_t mb = MB(ctx->opcode);
2692     uint32_t me = ME(ctx->opcode);
2693     target_ulong mask;
2694     bool mask_in_32b = true;
2695 
2696 #if defined(TARGET_PPC64)
2697     mb += 32;
2698     me += 32;
2699 #endif
2700     mask = MASK(mb, me);
2701 
2702 #if defined(TARGET_PPC64)
2703     if (mask > 0xffffffffu) {
2704         mask_in_32b = false;
2705     }
2706 #endif
2707     if (mask_in_32b) {
2708         TCGv_i32 t0 = tcg_temp_new_i32();
2709         TCGv_i32 t1 = tcg_temp_new_i32();
2710         tcg_gen_trunc_tl_i32(t0, t_rb);
2711         tcg_gen_trunc_tl_i32(t1, t_rs);
2712         tcg_gen_andi_i32(t0, t0, 0x1f);
2713         tcg_gen_rotl_i32(t1, t1, t0);
2714         tcg_gen_extu_i32_tl(t_ra, t1);
2715     } else {
2716 #if defined(TARGET_PPC64)
2717         TCGv_i64 t0 = tcg_temp_new_i64();
2718         tcg_gen_andi_i64(t0, t_rb, 0x1f);
2719         tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2720         tcg_gen_rotl_i64(t_ra, t_ra, t0);
2721 #else
2722         g_assert_not_reached();
2723 #endif
2724     }
2725 
2726     tcg_gen_andi_tl(t_ra, t_ra, mask);
2727 
2728     if (unlikely(Rc(ctx->opcode) != 0)) {
2729         gen_set_Rc0(ctx, t_ra);
2730     }
2731 }
2732 
2733 #if defined(TARGET_PPC64)
2734 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
2735 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2736 {                                                                             \
2737     gen_##name(ctx, 0);                                                       \
2738 }                                                                             \
2739                                                                               \
2740 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2741 {                                                                             \
2742     gen_##name(ctx, 1);                                                       \
2743 }
2744 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
2745 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2746 {                                                                             \
2747     gen_##name(ctx, 0, 0);                                                    \
2748 }                                                                             \
2749                                                                               \
2750 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2751 {                                                                             \
2752     gen_##name(ctx, 0, 1);                                                    \
2753 }                                                                             \
2754                                                                               \
2755 static void glue(gen_, name##2)(DisasContext *ctx)                            \
2756 {                                                                             \
2757     gen_##name(ctx, 1, 0);                                                    \
2758 }                                                                             \
2759                                                                               \
2760 static void glue(gen_, name##3)(DisasContext *ctx)                            \
2761 {                                                                             \
2762     gen_##name(ctx, 1, 1);                                                    \
2763 }
2764 
2765 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2766 {
2767     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2768     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2769     int len = me - mb + 1;
2770     int rsh = (64 - sh) & 63;
2771 
2772     if (sh != 0 && len > 0 && me == (63 - sh)) {
2773         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2774     } else if (me == 63 && rsh + len <= 64) {
2775         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2776     } else {
2777         tcg_gen_rotli_tl(t_ra, t_rs, sh);
2778         tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2779     }
2780     if (unlikely(Rc(ctx->opcode) != 0)) {
2781         gen_set_Rc0(ctx, t_ra);
2782     }
2783 }
2784 
2785 /* rldicl - rldicl. */
2786 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2787 {
2788     uint32_t sh, mb;
2789 
2790     sh = SH(ctx->opcode) | (shn << 5);
2791     mb = MB(ctx->opcode) | (mbn << 5);
2792     gen_rldinm(ctx, mb, 63, sh);
2793 }
2794 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2795 
2796 /* rldicr - rldicr. */
2797 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2798 {
2799     uint32_t sh, me;
2800 
2801     sh = SH(ctx->opcode) | (shn << 5);
2802     me = MB(ctx->opcode) | (men << 5);
2803     gen_rldinm(ctx, 0, me, sh);
2804 }
2805 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2806 
2807 /* rldic - rldic. */
2808 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2809 {
2810     uint32_t sh, mb;
2811 
2812     sh = SH(ctx->opcode) | (shn << 5);
2813     mb = MB(ctx->opcode) | (mbn << 5);
2814     gen_rldinm(ctx, mb, 63 - sh, sh);
2815 }
2816 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2817 
2818 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2819 {
2820     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2821     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2822     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2823     TCGv t0;
2824 
2825     t0 = tcg_temp_new();
2826     tcg_gen_andi_tl(t0, t_rb, 0x3f);
2827     tcg_gen_rotl_tl(t_ra, t_rs, t0);
2828 
2829     tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2830     if (unlikely(Rc(ctx->opcode) != 0)) {
2831         gen_set_Rc0(ctx, t_ra);
2832     }
2833 }
2834 
2835 /* rldcl - rldcl. */
2836 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2837 {
2838     uint32_t mb;
2839 
2840     mb = MB(ctx->opcode) | (mbn << 5);
2841     gen_rldnm(ctx, mb, 63);
2842 }
2843 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2844 
2845 /* rldcr - rldcr. */
2846 static inline void gen_rldcr(DisasContext *ctx, int men)
2847 {
2848     uint32_t me;
2849 
2850     me = MB(ctx->opcode) | (men << 5);
2851     gen_rldnm(ctx, 0, me);
2852 }
2853 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2854 
2855 /* rldimi - rldimi. */
2856 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2857 {
2858     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2859     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2860     uint32_t sh = SH(ctx->opcode) | (shn << 5);
2861     uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2862     uint32_t me = 63 - sh;
2863 
2864     if (mb <= me) {
2865         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2866     } else {
2867         target_ulong mask = MASK(mb, me);
2868         TCGv t1 = tcg_temp_new();
2869 
2870         tcg_gen_rotli_tl(t1, t_rs, sh);
2871         tcg_gen_andi_tl(t1, t1, mask);
2872         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2873         tcg_gen_or_tl(t_ra, t_ra, t1);
2874     }
2875     if (unlikely(Rc(ctx->opcode) != 0)) {
2876         gen_set_Rc0(ctx, t_ra);
2877     }
2878 }
2879 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2880 #endif
2881 
2882 /***                             Integer shift                             ***/
2883 
2884 /* slw & slw. */
2885 static void gen_slw(DisasContext *ctx)
2886 {
2887     TCGv t0, t1;
2888 
2889     t0 = tcg_temp_new();
2890     /* AND rS with a mask that is 0 when rB >= 0x20 */
2891 #if defined(TARGET_PPC64)
2892     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2893     tcg_gen_sari_tl(t0, t0, 0x3f);
2894 #else
2895     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2896     tcg_gen_sari_tl(t0, t0, 0x1f);
2897 #endif
2898     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2899     t1 = tcg_temp_new();
2900     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2901     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2902     tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2903     if (unlikely(Rc(ctx->opcode) != 0)) {
2904         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2905     }
2906 }
2907 
2908 /* sraw & sraw. */
2909 static void gen_sraw(DisasContext *ctx)
2910 {
2911     gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2912                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2913     if (unlikely(Rc(ctx->opcode) != 0)) {
2914         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2915     }
2916 }
2917 
2918 /* srawi & srawi. */
2919 static void gen_srawi(DisasContext *ctx)
2920 {
2921     int sh = SH(ctx->opcode);
2922     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2923     TCGv src = cpu_gpr[rS(ctx->opcode)];
2924     if (sh == 0) {
2925         tcg_gen_ext32s_tl(dst, src);
2926         tcg_gen_movi_tl(cpu_ca, 0);
2927         if (is_isa300(ctx)) {
2928             tcg_gen_movi_tl(cpu_ca32, 0);
2929         }
2930     } else {
2931         TCGv t0;
2932         tcg_gen_ext32s_tl(dst, src);
2933         tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2934         t0 = tcg_temp_new();
2935         tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2936         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2937         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2938         if (is_isa300(ctx)) {
2939             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2940         }
2941         tcg_gen_sari_tl(dst, dst, sh);
2942     }
2943     if (unlikely(Rc(ctx->opcode) != 0)) {
2944         gen_set_Rc0(ctx, dst);
2945     }
2946 }
2947 
2948 /* srw & srw. */
2949 static void gen_srw(DisasContext *ctx)
2950 {
2951     TCGv t0, t1;
2952 
2953     t0 = tcg_temp_new();
2954     /* AND rS with a mask that is 0 when rB >= 0x20 */
2955 #if defined(TARGET_PPC64)
2956     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2957     tcg_gen_sari_tl(t0, t0, 0x3f);
2958 #else
2959     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2960     tcg_gen_sari_tl(t0, t0, 0x1f);
2961 #endif
2962     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2963     tcg_gen_ext32u_tl(t0, t0);
2964     t1 = tcg_temp_new();
2965     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2966     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2967     if (unlikely(Rc(ctx->opcode) != 0)) {
2968         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2969     }
2970 }
2971 
2972 #if defined(TARGET_PPC64)
2973 /* sld & sld. */
2974 static void gen_sld(DisasContext *ctx)
2975 {
2976     TCGv t0, t1;
2977 
2978     t0 = tcg_temp_new();
2979     /* AND rS with a mask that is 0 when rB >= 0x40 */
2980     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2981     tcg_gen_sari_tl(t0, t0, 0x3f);
2982     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2983     t1 = tcg_temp_new();
2984     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2985     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2986     if (unlikely(Rc(ctx->opcode) != 0)) {
2987         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2988     }
2989 }
2990 
2991 /* srad & srad. */
2992 static void gen_srad(DisasContext *ctx)
2993 {
2994     gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
2995                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2996     if (unlikely(Rc(ctx->opcode) != 0)) {
2997         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2998     }
2999 }
3000 /* sradi & sradi. */
3001 static inline void gen_sradi(DisasContext *ctx, int n)
3002 {
3003     int sh = SH(ctx->opcode) + (n << 5);
3004     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3005     TCGv src = cpu_gpr[rS(ctx->opcode)];
3006     if (sh == 0) {
3007         tcg_gen_mov_tl(dst, src);
3008         tcg_gen_movi_tl(cpu_ca, 0);
3009         if (is_isa300(ctx)) {
3010             tcg_gen_movi_tl(cpu_ca32, 0);
3011         }
3012     } else {
3013         TCGv t0;
3014         tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
3015         t0 = tcg_temp_new();
3016         tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
3017         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
3018         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
3019         if (is_isa300(ctx)) {
3020             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
3021         }
3022         tcg_gen_sari_tl(dst, src, sh);
3023     }
3024     if (unlikely(Rc(ctx->opcode) != 0)) {
3025         gen_set_Rc0(ctx, dst);
3026     }
3027 }
3028 
3029 static void gen_sradi0(DisasContext *ctx)
3030 {
3031     gen_sradi(ctx, 0);
3032 }
3033 
3034 static void gen_sradi1(DisasContext *ctx)
3035 {
3036     gen_sradi(ctx, 1);
3037 }
3038 
3039 /* extswsli & extswsli. */
3040 static inline void gen_extswsli(DisasContext *ctx, int n)
3041 {
3042     int sh = SH(ctx->opcode) + (n << 5);
3043     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3044     TCGv src = cpu_gpr[rS(ctx->opcode)];
3045 
3046     tcg_gen_ext32s_tl(dst, src);
3047     tcg_gen_shli_tl(dst, dst, sh);
3048     if (unlikely(Rc(ctx->opcode) != 0)) {
3049         gen_set_Rc0(ctx, dst);
3050     }
3051 }
3052 
3053 static void gen_extswsli0(DisasContext *ctx)
3054 {
3055     gen_extswsli(ctx, 0);
3056 }
3057 
3058 static void gen_extswsli1(DisasContext *ctx)
3059 {
3060     gen_extswsli(ctx, 1);
3061 }
3062 
3063 /* srd & srd. */
3064 static void gen_srd(DisasContext *ctx)
3065 {
3066     TCGv t0, t1;
3067 
3068     t0 = tcg_temp_new();
3069     /* AND rS with a mask that is 0 when rB >= 0x40 */
3070     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3071     tcg_gen_sari_tl(t0, t0, 0x3f);
3072     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3073     t1 = tcg_temp_new();
3074     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3075     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3076     if (unlikely(Rc(ctx->opcode) != 0)) {
3077         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3078     }
3079 }
3080 #endif
3081 
3082 /***                           Addressing modes                            ***/
3083 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3084 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3085                                       target_long maskl)
3086 {
3087     target_long simm = SIMM(ctx->opcode);
3088 
3089     simm &= ~maskl;
3090     if (rA(ctx->opcode) == 0) {
3091         if (NARROW_MODE(ctx)) {
3092             simm = (uint32_t)simm;
3093         }
3094         tcg_gen_movi_tl(EA, simm);
3095     } else if (likely(simm != 0)) {
3096         tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3097         if (NARROW_MODE(ctx)) {
3098             tcg_gen_ext32u_tl(EA, EA);
3099         }
3100     } else {
3101         if (NARROW_MODE(ctx)) {
3102             tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3103         } else {
3104             tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3105         }
3106     }
3107 }
3108 
3109 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3110 {
3111     if (rA(ctx->opcode) == 0) {
3112         if (NARROW_MODE(ctx)) {
3113             tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3114         } else {
3115             tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3116         }
3117     } else {
3118         tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3119         if (NARROW_MODE(ctx)) {
3120             tcg_gen_ext32u_tl(EA, EA);
3121         }
3122     }
3123 }
3124 
3125 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3126 {
3127     if (rA(ctx->opcode) == 0) {
3128         tcg_gen_movi_tl(EA, 0);
3129     } else if (NARROW_MODE(ctx)) {
3130         tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3131     } else {
3132         tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3133     }
3134 }
3135 
3136 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3137                                 target_long val)
3138 {
3139     tcg_gen_addi_tl(ret, arg1, val);
3140     if (NARROW_MODE(ctx)) {
3141         tcg_gen_ext32u_tl(ret, ret);
3142     }
3143 }
3144 
3145 static inline void gen_align_no_le(DisasContext *ctx)
3146 {
3147     gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3148                       (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3149 }
3150 
3151 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3152 {
3153     TCGv ea = tcg_temp_new();
3154     if (ra) {
3155         tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3156     } else {
3157         tcg_gen_mov_tl(ea, displ);
3158     }
3159     if (NARROW_MODE(ctx)) {
3160         tcg_gen_ext32u_tl(ea, ea);
3161     }
3162     return ea;
3163 }
3164 
3165 /***                             Integer load                              ***/
3166 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3167 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3168 
3169 #define GEN_QEMU_LOAD_TL(ldop, op)                                      \
3170 static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
3171                                   TCGv val,                             \
3172                                   TCGv addr)                            \
3173 {                                                                       \
3174     tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
3175 }
3176 
3177 GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
3178 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3179 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3180 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3181 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3182 
3183 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3184 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3185 
3186 #define GEN_QEMU_LOAD_64(ldop, op)                                  \
3187 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
3188                                              TCGv_i64 val,          \
3189                                              TCGv addr)             \
3190 {                                                                   \
3191     tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
3192 }
3193 
3194 GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
3195 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3196 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3197 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3198 GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_UQ))
3199 
3200 #if defined(TARGET_PPC64)
3201 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3202 #endif
3203 
3204 #define GEN_QEMU_STORE_TL(stop, op)                                     \
3205 static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
3206                                   TCGv val,                             \
3207                                   TCGv addr)                            \
3208 {                                                                       \
3209     tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
3210 }
3211 
3212 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3213 GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
3214 #endif
3215 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3216 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3217 
3218 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3219 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3220 
3221 #define GEN_QEMU_STORE_64(stop, op)                               \
3222 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
3223                                               TCGv_i64 val,       \
3224                                               TCGv addr)          \
3225 {                                                                 \
3226     tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
3227 }
3228 
3229 GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
3230 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3231 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3232 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3233 
3234 #if defined(TARGET_PPC64)
3235 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3236 #endif
3237 
3238 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
3239 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3240 {                                                                             \
3241     TCGv EA;                                                                  \
3242     chk(ctx);                                                                 \
3243     gen_set_access_type(ctx, ACCESS_INT);                                     \
3244     EA = tcg_temp_new();                                                      \
3245     gen_addr_reg_index(ctx, EA);                                              \
3246     gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
3247 }
3248 
3249 #define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
3250     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3251 
3252 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
3253     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3254 
3255 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
3256 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3257 {                                                                             \
3258     TCGv EA;                                                                  \
3259     CHK_SV(ctx);                                                              \
3260     gen_set_access_type(ctx, ACCESS_INT);                                     \
3261     EA = tcg_temp_new();                                                      \
3262     gen_addr_reg_index(ctx, EA);                                              \
3263     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3264 }
3265 
3266 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3267 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3268 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3269 #if defined(TARGET_PPC64)
3270 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3271 #endif
3272 
3273 #if defined(TARGET_PPC64)
3274 /* CI load/store variants */
3275 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3276 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3277 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3278 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3279 #endif
3280 
3281 /***                              Integer store                            ***/
3282 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
3283 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3284 {                                                                             \
3285     TCGv EA;                                                                  \
3286     chk(ctx);                                                                 \
3287     gen_set_access_type(ctx, ACCESS_INT);                                     \
3288     EA = tcg_temp_new();                                                      \
3289     gen_addr_reg_index(ctx, EA);                                              \
3290     gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
3291 }
3292 #define GEN_STX(name, stop, opc2, opc3, type)                                 \
3293     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3294 
3295 #define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
3296     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3297 
3298 #define GEN_STEPX(name, stop, opc2, opc3)                                     \
3299 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3300 {                                                                             \
3301     TCGv EA;                                                                  \
3302     CHK_SV(ctx);                                                              \
3303     gen_set_access_type(ctx, ACCESS_INT);                                     \
3304     EA = tcg_temp_new();                                                      \
3305     gen_addr_reg_index(ctx, EA);                                              \
3306     tcg_gen_qemu_st_tl(                                                       \
3307         cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
3308 }
3309 
3310 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3311 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3312 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3313 #if defined(TARGET_PPC64)
3314 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3315 #endif
3316 
3317 #if defined(TARGET_PPC64)
3318 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3319 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3320 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3321 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3322 #endif
3323 /***                Integer load and store with byte reverse               ***/
3324 
3325 /* lhbrx */
3326 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3327 
3328 /* lwbrx */
3329 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3330 
3331 #if defined(TARGET_PPC64)
3332 /* ldbrx */
3333 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3334 /* stdbrx */
3335 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3336 #endif  /* TARGET_PPC64 */
3337 
3338 /* sthbrx */
3339 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3340 /* stwbrx */
3341 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3342 
3343 /***                    Integer load and store multiple                    ***/
3344 
3345 /* lmw */
3346 static void gen_lmw(DisasContext *ctx)
3347 {
3348     TCGv t0;
3349     TCGv_i32 t1;
3350 
3351     if (ctx->le_mode) {
3352         gen_align_no_le(ctx);
3353         return;
3354     }
3355     gen_set_access_type(ctx, ACCESS_INT);
3356     t0 = tcg_temp_new();
3357     t1 = tcg_constant_i32(rD(ctx->opcode));
3358     gen_addr_imm_index(ctx, t0, 0);
3359     gen_helper_lmw(cpu_env, t0, t1);
3360 }
3361 
3362 /* stmw */
3363 static void gen_stmw(DisasContext *ctx)
3364 {
3365     TCGv t0;
3366     TCGv_i32 t1;
3367 
3368     if (ctx->le_mode) {
3369         gen_align_no_le(ctx);
3370         return;
3371     }
3372     gen_set_access_type(ctx, ACCESS_INT);
3373     t0 = tcg_temp_new();
3374     t1 = tcg_constant_i32(rS(ctx->opcode));
3375     gen_addr_imm_index(ctx, t0, 0);
3376     gen_helper_stmw(cpu_env, t0, t1);
3377 }
3378 
3379 /***                    Integer load and store strings                     ***/
3380 
3381 /* lswi */
3382 /*
3383  * PowerPC32 specification says we must generate an exception if rA is
3384  * in the range of registers to be loaded.  In an other hand, IBM says
3385  * this is valid, but rA won't be loaded.  For now, I'll follow the
3386  * spec...
3387  */
3388 static void gen_lswi(DisasContext *ctx)
3389 {
3390     TCGv t0;
3391     TCGv_i32 t1, t2;
3392     int nb = NB(ctx->opcode);
3393     int start = rD(ctx->opcode);
3394     int ra = rA(ctx->opcode);
3395     int nr;
3396 
3397     if (ctx->le_mode) {
3398         gen_align_no_le(ctx);
3399         return;
3400     }
3401     if (nb == 0) {
3402         nb = 32;
3403     }
3404     nr = DIV_ROUND_UP(nb, 4);
3405     if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3406         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3407         return;
3408     }
3409     gen_set_access_type(ctx, ACCESS_INT);
3410     t0 = tcg_temp_new();
3411     gen_addr_register(ctx, t0);
3412     t1 = tcg_constant_i32(nb);
3413     t2 = tcg_constant_i32(start);
3414     gen_helper_lsw(cpu_env, t0, t1, t2);
3415 }
3416 
3417 /* lswx */
3418 static void gen_lswx(DisasContext *ctx)
3419 {
3420     TCGv t0;
3421     TCGv_i32 t1, t2, t3;
3422 
3423     if (ctx->le_mode) {
3424         gen_align_no_le(ctx);
3425         return;
3426     }
3427     gen_set_access_type(ctx, ACCESS_INT);
3428     t0 = tcg_temp_new();
3429     gen_addr_reg_index(ctx, t0);
3430     t1 = tcg_constant_i32(rD(ctx->opcode));
3431     t2 = tcg_constant_i32(rA(ctx->opcode));
3432     t3 = tcg_constant_i32(rB(ctx->opcode));
3433     gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3434 }
3435 
3436 /* stswi */
3437 static void gen_stswi(DisasContext *ctx)
3438 {
3439     TCGv t0;
3440     TCGv_i32 t1, t2;
3441     int nb = NB(ctx->opcode);
3442 
3443     if (ctx->le_mode) {
3444         gen_align_no_le(ctx);
3445         return;
3446     }
3447     gen_set_access_type(ctx, ACCESS_INT);
3448     t0 = tcg_temp_new();
3449     gen_addr_register(ctx, t0);
3450     if (nb == 0) {
3451         nb = 32;
3452     }
3453     t1 = tcg_constant_i32(nb);
3454     t2 = tcg_constant_i32(rS(ctx->opcode));
3455     gen_helper_stsw(cpu_env, t0, t1, t2);
3456 }
3457 
3458 /* stswx */
3459 static void gen_stswx(DisasContext *ctx)
3460 {
3461     TCGv t0;
3462     TCGv_i32 t1, t2;
3463 
3464     if (ctx->le_mode) {
3465         gen_align_no_le(ctx);
3466         return;
3467     }
3468     gen_set_access_type(ctx, ACCESS_INT);
3469     t0 = tcg_temp_new();
3470     gen_addr_reg_index(ctx, t0);
3471     t1 = tcg_temp_new_i32();
3472     tcg_gen_trunc_tl_i32(t1, cpu_xer);
3473     tcg_gen_andi_i32(t1, t1, 0x7F);
3474     t2 = tcg_constant_i32(rS(ctx->opcode));
3475     gen_helper_stsw(cpu_env, t0, t1, t2);
3476 }
3477 
3478 /***                        Memory synchronisation                         ***/
3479 /* eieio */
3480 static void gen_eieio(DisasContext *ctx)
3481 {
3482     TCGBar bar = TCG_MO_ALL;
3483 
3484     /*
3485      * eieio has complex semanitcs. It provides memory ordering between
3486      * operations in the set:
3487      * - loads from CI memory.
3488      * - stores to CI memory.
3489      * - stores to WT memory.
3490      *
3491      * It separately also orders memory for operations in the set:
3492      * - stores to cacheble memory.
3493      *
3494      * It also serializes instructions:
3495      * - dcbt and dcbst.
3496      *
3497      * It separately serializes:
3498      * - tlbie and tlbsync.
3499      *
3500      * And separately serializes:
3501      * - slbieg, slbiag, and slbsync.
3502      *
3503      * The end result is that CI memory ordering requires TCG_MO_ALL
3504      * and it is not possible to special-case more relaxed ordering for
3505      * cacheable accesses. TCG_BAR_SC is required to provide this
3506      * serialization.
3507      */
3508 
3509     /*
3510      * POWER9 has a eieio instruction variant using bit 6 as a hint to
3511      * tell the CPU it is a store-forwarding barrier.
3512      */
3513     if (ctx->opcode & 0x2000000) {
3514         /*
3515          * ISA says that "Reserved fields in instructions are ignored
3516          * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3517          * as this is not an instruction software should be using,
3518          * complain to the user.
3519          */
3520         if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3521             qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3522                           TARGET_FMT_lx "\n", ctx->cia);
3523         } else {
3524             bar = TCG_MO_ST_LD;
3525         }
3526     }
3527 
3528     tcg_gen_mb(bar | TCG_BAR_SC);
3529 }
3530 
3531 #if !defined(CONFIG_USER_ONLY)
3532 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3533 {
3534     TCGv_i32 t;
3535     TCGLabel *l;
3536 
3537     if (!ctx->lazy_tlb_flush) {
3538         return;
3539     }
3540     l = gen_new_label();
3541     t = tcg_temp_new_i32();
3542     tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3543     tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3544     if (global) {
3545         gen_helper_check_tlb_flush_global(cpu_env);
3546     } else {
3547         gen_helper_check_tlb_flush_local(cpu_env);
3548     }
3549     gen_set_label(l);
3550 }
3551 #else
3552 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3553 #endif
3554 
3555 /* isync */
3556 static void gen_isync(DisasContext *ctx)
3557 {
3558     /*
3559      * We need to check for a pending TLB flush. This can only happen in
3560      * kernel mode however so check MSR_PR
3561      */
3562     if (!ctx->pr) {
3563         gen_check_tlb_flush(ctx, false);
3564     }
3565     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3566     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3567 }
3568 
3569 #define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3570 
3571 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3572 {
3573     TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3574     TCGv t0 = tcg_temp_new();
3575 
3576     gen_set_access_type(ctx, ACCESS_RES);
3577     gen_addr_reg_index(ctx, t0);
3578     tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3579     tcg_gen_mov_tl(cpu_reserve, t0);
3580     tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
3581     tcg_gen_mov_tl(cpu_reserve_val, gpr);
3582 }
3583 
3584 #define LARX(name, memop)                  \
3585 static void gen_##name(DisasContext *ctx)  \
3586 {                                          \
3587     gen_load_locked(ctx, memop);           \
3588 }
3589 
3590 /* lwarx */
3591 LARX(lbarx, DEF_MEMOP(MO_UB))
3592 LARX(lharx, DEF_MEMOP(MO_UW))
3593 LARX(lwarx, DEF_MEMOP(MO_UL))
3594 
3595 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3596                                       TCGv EA, TCGCond cond, int addend)
3597 {
3598     TCGv t = tcg_temp_new();
3599     TCGv t2 = tcg_temp_new();
3600     TCGv u = tcg_temp_new();
3601 
3602     tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3603     tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3604     tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3605     tcg_gen_addi_tl(u, t, addend);
3606 
3607     /* E.g. for fetch and increment bounded... */
3608     /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3609     tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3610     tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3611 
3612     /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3613     tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3614     tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3615 }
3616 
3617 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3618 {
3619     uint32_t gpr_FC = FC(ctx->opcode);
3620     TCGv EA = tcg_temp_new();
3621     int rt = rD(ctx->opcode);
3622     bool need_serial;
3623     TCGv src, dst;
3624 
3625     gen_addr_register(ctx, EA);
3626     dst = cpu_gpr[rt];
3627     src = cpu_gpr[(rt + 1) & 31];
3628 
3629     need_serial = false;
3630     memop |= MO_ALIGN;
3631     switch (gpr_FC) {
3632     case 0: /* Fetch and add */
3633         tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3634         break;
3635     case 1: /* Fetch and xor */
3636         tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3637         break;
3638     case 2: /* Fetch and or */
3639         tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3640         break;
3641     case 3: /* Fetch and 'and' */
3642         tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3643         break;
3644     case 4:  /* Fetch and max unsigned */
3645         tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3646         break;
3647     case 5:  /* Fetch and max signed */
3648         tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3649         break;
3650     case 6:  /* Fetch and min unsigned */
3651         tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3652         break;
3653     case 7:  /* Fetch and min signed */
3654         tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3655         break;
3656     case 8: /* Swap */
3657         tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3658         break;
3659 
3660     case 16: /* Compare and swap not equal */
3661         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3662             need_serial = true;
3663         } else {
3664             TCGv t0 = tcg_temp_new();
3665             TCGv t1 = tcg_temp_new();
3666 
3667             tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3668             if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3669                 tcg_gen_mov_tl(t1, src);
3670             } else {
3671                 tcg_gen_ext32u_tl(t1, src);
3672             }
3673             tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3674                                cpu_gpr[(rt + 2) & 31], t0);
3675             tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3676             tcg_gen_mov_tl(dst, t0);
3677         }
3678         break;
3679 
3680     case 24: /* Fetch and increment bounded */
3681         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3682             need_serial = true;
3683         } else {
3684             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3685         }
3686         break;
3687     case 25: /* Fetch and increment equal */
3688         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3689             need_serial = true;
3690         } else {
3691             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3692         }
3693         break;
3694     case 28: /* Fetch and decrement bounded */
3695         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3696             need_serial = true;
3697         } else {
3698             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3699         }
3700         break;
3701 
3702     default:
3703         /* invoke data storage error handler */
3704         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3705     }
3706 
3707     if (need_serial) {
3708         /* Restart with exclusive lock.  */
3709         gen_helper_exit_atomic(cpu_env);
3710         ctx->base.is_jmp = DISAS_NORETURN;
3711     }
3712 }
3713 
3714 static void gen_lwat(DisasContext *ctx)
3715 {
3716     gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3717 }
3718 
3719 #ifdef TARGET_PPC64
3720 static void gen_ldat(DisasContext *ctx)
3721 {
3722     gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3723 }
3724 #endif
3725 
3726 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3727 {
3728     uint32_t gpr_FC = FC(ctx->opcode);
3729     TCGv EA = tcg_temp_new();
3730     TCGv src, discard;
3731 
3732     gen_addr_register(ctx, EA);
3733     src = cpu_gpr[rD(ctx->opcode)];
3734     discard = tcg_temp_new();
3735 
3736     memop |= MO_ALIGN;
3737     switch (gpr_FC) {
3738     case 0: /* add and Store */
3739         tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3740         break;
3741     case 1: /* xor and Store */
3742         tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3743         break;
3744     case 2: /* Or and Store */
3745         tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3746         break;
3747     case 3: /* 'and' and Store */
3748         tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3749         break;
3750     case 4:  /* Store max unsigned */
3751         tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3752         break;
3753     case 5:  /* Store max signed */
3754         tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3755         break;
3756     case 6:  /* Store min unsigned */
3757         tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3758         break;
3759     case 7:  /* Store min signed */
3760         tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3761         break;
3762     case 24: /* Store twin  */
3763         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3764             /* Restart with exclusive lock.  */
3765             gen_helper_exit_atomic(cpu_env);
3766             ctx->base.is_jmp = DISAS_NORETURN;
3767         } else {
3768             TCGv t = tcg_temp_new();
3769             TCGv t2 = tcg_temp_new();
3770             TCGv s = tcg_temp_new();
3771             TCGv s2 = tcg_temp_new();
3772             TCGv ea_plus_s = tcg_temp_new();
3773 
3774             tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3775             tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3776             tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3777             tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3778             tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3779             tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3780             tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3781         }
3782         break;
3783     default:
3784         /* invoke data storage error handler */
3785         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3786     }
3787 }
3788 
3789 static void gen_stwat(DisasContext *ctx)
3790 {
3791     gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3792 }
3793 
3794 #ifdef TARGET_PPC64
3795 static void gen_stdat(DisasContext *ctx)
3796 {
3797     gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3798 }
3799 #endif
3800 
3801 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3802 {
3803     TCGLabel *lfail;
3804     TCGv EA;
3805     TCGv cr0;
3806     TCGv t0;
3807     int rs = rS(ctx->opcode);
3808 
3809     lfail = gen_new_label();
3810     EA = tcg_temp_new();
3811     cr0 = tcg_temp_new();
3812     t0 = tcg_temp_new();
3813 
3814     tcg_gen_mov_tl(cr0, cpu_so);
3815     gen_set_access_type(ctx, ACCESS_RES);
3816     gen_addr_reg_index(ctx, EA);
3817     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3818     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3819 
3820     tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3821                               cpu_gpr[rs], ctx->mem_idx,
3822                               DEF_MEMOP(memop) | MO_ALIGN);
3823     tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3824     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3825     tcg_gen_or_tl(cr0, cr0, t0);
3826 
3827     gen_set_label(lfail);
3828     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3829     tcg_gen_movi_tl(cpu_reserve, -1);
3830 }
3831 
3832 #define STCX(name, memop)                  \
3833 static void gen_##name(DisasContext *ctx)  \
3834 {                                          \
3835     gen_conditional_store(ctx, memop);     \
3836 }
3837 
3838 STCX(stbcx_, DEF_MEMOP(MO_UB))
3839 STCX(sthcx_, DEF_MEMOP(MO_UW))
3840 STCX(stwcx_, DEF_MEMOP(MO_UL))
3841 
3842 #if defined(TARGET_PPC64)
3843 /* ldarx */
3844 LARX(ldarx, DEF_MEMOP(MO_UQ))
3845 /* stdcx. */
3846 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3847 
3848 /* lqarx */
3849 static void gen_lqarx(DisasContext *ctx)
3850 {
3851     int rd = rD(ctx->opcode);
3852     TCGv EA, hi, lo;
3853     TCGv_i128 t16;
3854 
3855     if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3856                  (rd == rB(ctx->opcode)))) {
3857         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3858         return;
3859     }
3860 
3861     gen_set_access_type(ctx, ACCESS_RES);
3862     EA = tcg_temp_new();
3863     gen_addr_reg_index(ctx, EA);
3864 
3865     /* Note that the low part is always in RD+1, even in LE mode.  */
3866     lo = cpu_gpr[rd + 1];
3867     hi = cpu_gpr[rd];
3868 
3869     t16 = tcg_temp_new_i128();
3870     tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3871     tcg_gen_extr_i128_i64(lo, hi, t16);
3872 
3873     tcg_gen_mov_tl(cpu_reserve, EA);
3874     tcg_gen_movi_tl(cpu_reserve_length, 16);
3875     tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3876     tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3877 }
3878 
3879 /* stqcx. */
3880 static void gen_stqcx_(DisasContext *ctx)
3881 {
3882     TCGLabel *lfail;
3883     TCGv EA, t0, t1;
3884     TCGv cr0;
3885     TCGv_i128 cmp, val;
3886     int rs = rS(ctx->opcode);
3887 
3888     if (unlikely(rs & 1)) {
3889         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3890         return;
3891     }
3892 
3893     lfail = gen_new_label();
3894     EA = tcg_temp_new();
3895     cr0 = tcg_temp_new();
3896 
3897     tcg_gen_mov_tl(cr0, cpu_so);
3898     gen_set_access_type(ctx, ACCESS_RES);
3899     gen_addr_reg_index(ctx, EA);
3900     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3901     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3902 
3903     cmp = tcg_temp_new_i128();
3904     val = tcg_temp_new_i128();
3905 
3906     tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3907 
3908     /* Note that the low part is always in RS+1, even in LE mode.  */
3909     tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3910 
3911     tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3912                                 DEF_MEMOP(MO_128 | MO_ALIGN));
3913 
3914     t0 = tcg_temp_new();
3915     t1 = tcg_temp_new();
3916     tcg_gen_extr_i128_i64(t1, t0, val);
3917 
3918     tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3919     tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3920     tcg_gen_or_tl(t0, t0, t1);
3921 
3922     tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3923     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3924     tcg_gen_or_tl(cr0, cr0, t0);
3925 
3926     gen_set_label(lfail);
3927     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3928     tcg_gen_movi_tl(cpu_reserve, -1);
3929 }
3930 #endif /* defined(TARGET_PPC64) */
3931 
3932 /* sync */
3933 static void gen_sync(DisasContext *ctx)
3934 {
3935     TCGBar bar = TCG_MO_ALL;
3936     uint32_t l = (ctx->opcode >> 21) & 3;
3937 
3938     if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3939         bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3940     }
3941 
3942     /*
3943      * We may need to check for a pending TLB flush.
3944      *
3945      * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3946      *
3947      * Additionally, this can only happen in kernel mode however so
3948      * check MSR_PR as well.
3949      */
3950     if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3951         gen_check_tlb_flush(ctx, true);
3952     }
3953 
3954     tcg_gen_mb(bar | TCG_BAR_SC);
3955 }
3956 
3957 /* wait */
3958 static void gen_wait(DisasContext *ctx)
3959 {
3960     uint32_t wc;
3961 
3962     if (ctx->insns_flags & PPC_WAIT) {
3963         /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
3964 
3965         if (ctx->insns_flags2 & PPC2_PM_ISA206) {
3966             /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
3967             wc = WC(ctx->opcode);
3968         } else {
3969             wc = 0;
3970         }
3971 
3972     } else if (ctx->insns_flags2 & PPC2_ISA300) {
3973         /* v3.0 defines a new 'wait' encoding. */
3974         wc = WC(ctx->opcode);
3975         if (ctx->insns_flags2 & PPC2_ISA310) {
3976             uint32_t pl = PL(ctx->opcode);
3977 
3978             /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
3979             if (wc == 3) {
3980                 gen_invalid(ctx);
3981                 return;
3982             }
3983 
3984             /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
3985             if (pl > 0 && wc != 2) {
3986                 gen_invalid(ctx);
3987                 return;
3988             }
3989 
3990         } else { /* ISA300 */
3991             /* WC 1-3 are reserved */
3992             if (wc > 0) {
3993                 gen_invalid(ctx);
3994                 return;
3995             }
3996         }
3997 
3998     } else {
3999         warn_report("wait instruction decoded with wrong ISA flags.");
4000         gen_invalid(ctx);
4001         return;
4002     }
4003 
4004     /*
4005      * wait without WC field or with WC=0 waits for an exception / interrupt
4006      * to occur.
4007      */
4008     if (wc == 0) {
4009         TCGv_i32 t0 = tcg_constant_i32(1);
4010         tcg_gen_st_i32(t0, cpu_env,
4011                        -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
4012         /* Stop translation, as the CPU is supposed to sleep from now */
4013         gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4014     }
4015 
4016     /*
4017      * Other wait types must not just wait until an exception occurs because
4018      * ignoring their other wake-up conditions could cause a hang.
4019      *
4020      * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
4021      * no-ops.
4022      *
4023      * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
4024      *
4025      * wc=2 waits for an implementation-specific condition, such could be
4026      * always true, so it can be implemented as a no-op.
4027      *
4028      * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
4029      *
4030      * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
4031      * Reservation-loss may have implementation-specific conditions, so it
4032      * can be implemented as a no-op.
4033      *
4034      * wc=2 waits for an exception or an amount of time to pass. This
4035      * amount is implementation-specific so it can be implemented as a
4036      * no-op.
4037      *
4038      * ISA v3.1 allows for execution to resume "in the rare case of
4039      * an implementation-dependent event", so in any case software must
4040      * not depend on the architected resumption condition to become
4041      * true, so no-op implementations should be architecturally correct
4042      * (if suboptimal).
4043      */
4044 }
4045 
4046 #if defined(TARGET_PPC64)
4047 static void gen_doze(DisasContext *ctx)
4048 {
4049 #if defined(CONFIG_USER_ONLY)
4050     GEN_PRIV(ctx);
4051 #else
4052     TCGv_i32 t;
4053 
4054     CHK_HV(ctx);
4055     translator_io_start(&ctx->base);
4056     t = tcg_constant_i32(PPC_PM_DOZE);
4057     gen_helper_pminsn(cpu_env, t);
4058     /* Stop translation, as the CPU is supposed to sleep from now */
4059     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4060 #endif /* defined(CONFIG_USER_ONLY) */
4061 }
4062 
4063 static void gen_nap(DisasContext *ctx)
4064 {
4065 #if defined(CONFIG_USER_ONLY)
4066     GEN_PRIV(ctx);
4067 #else
4068     TCGv_i32 t;
4069 
4070     CHK_HV(ctx);
4071     translator_io_start(&ctx->base);
4072     t = tcg_constant_i32(PPC_PM_NAP);
4073     gen_helper_pminsn(cpu_env, t);
4074     /* Stop translation, as the CPU is supposed to sleep from now */
4075     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4076 #endif /* defined(CONFIG_USER_ONLY) */
4077 }
4078 
4079 static void gen_stop(DisasContext *ctx)
4080 {
4081 #if defined(CONFIG_USER_ONLY)
4082     GEN_PRIV(ctx);
4083 #else
4084     TCGv_i32 t;
4085 
4086     CHK_HV(ctx);
4087     translator_io_start(&ctx->base);
4088     t = tcg_constant_i32(PPC_PM_STOP);
4089     gen_helper_pminsn(cpu_env, t);
4090     /* Stop translation, as the CPU is supposed to sleep from now */
4091     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4092 #endif /* defined(CONFIG_USER_ONLY) */
4093 }
4094 
4095 static void gen_sleep(DisasContext *ctx)
4096 {
4097 #if defined(CONFIG_USER_ONLY)
4098     GEN_PRIV(ctx);
4099 #else
4100     TCGv_i32 t;
4101 
4102     CHK_HV(ctx);
4103     translator_io_start(&ctx->base);
4104     t = tcg_constant_i32(PPC_PM_SLEEP);
4105     gen_helper_pminsn(cpu_env, t);
4106     /* Stop translation, as the CPU is supposed to sleep from now */
4107     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4108 #endif /* defined(CONFIG_USER_ONLY) */
4109 }
4110 
4111 static void gen_rvwinkle(DisasContext *ctx)
4112 {
4113 #if defined(CONFIG_USER_ONLY)
4114     GEN_PRIV(ctx);
4115 #else
4116     TCGv_i32 t;
4117 
4118     CHK_HV(ctx);
4119     translator_io_start(&ctx->base);
4120     t = tcg_constant_i32(PPC_PM_RVWINKLE);
4121     gen_helper_pminsn(cpu_env, t);
4122     /* Stop translation, as the CPU is supposed to sleep from now */
4123     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4124 #endif /* defined(CONFIG_USER_ONLY) */
4125 }
4126 #endif /* #if defined(TARGET_PPC64) */
4127 
4128 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4129 {
4130 #if defined(TARGET_PPC64)
4131     if (ctx->has_cfar) {
4132         tcg_gen_movi_tl(cpu_cfar, nip);
4133     }
4134 #endif
4135 }
4136 
4137 #if defined(TARGET_PPC64)
4138 static void pmu_count_insns(DisasContext *ctx)
4139 {
4140     /*
4141      * Do not bother calling the helper if the PMU isn't counting
4142      * instructions.
4143      */
4144     if (!ctx->pmu_insn_cnt) {
4145         return;
4146     }
4147 
4148  #if !defined(CONFIG_USER_ONLY)
4149     TCGLabel *l;
4150     TCGv t0;
4151 
4152     /*
4153      * The PMU insns_inc() helper stops the internal PMU timer if a
4154      * counter overflows happens. In that case, if the guest is
4155      * running with icount and we do not handle it beforehand,
4156      * the helper can trigger a 'bad icount read'.
4157      */
4158     translator_io_start(&ctx->base);
4159 
4160     /* Avoid helper calls when only PMC5-6 are enabled. */
4161     if (!ctx->pmc_other) {
4162         l = gen_new_label();
4163         t0 = tcg_temp_new();
4164 
4165         gen_load_spr(t0, SPR_POWER_PMC5);
4166         tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4167         gen_store_spr(SPR_POWER_PMC5, t0);
4168         /* Check for overflow, if it's enabled */
4169         if (ctx->mmcr0_pmcjce) {
4170             tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4171             gen_helper_handle_pmc5_overflow(cpu_env);
4172         }
4173 
4174         gen_set_label(l);
4175     } else {
4176         gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4177     }
4178   #else
4179     /*
4180      * User mode can read (but not write) PMC5 and start/stop
4181      * the PMU via MMCR0_FC. In this case just increment
4182      * PMC5 with base.num_insns.
4183      */
4184     TCGv t0 = tcg_temp_new();
4185 
4186     gen_load_spr(t0, SPR_POWER_PMC5);
4187     tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4188     gen_store_spr(SPR_POWER_PMC5, t0);
4189   #endif /* #if !defined(CONFIG_USER_ONLY) */
4190 }
4191 #else
4192 static void pmu_count_insns(DisasContext *ctx)
4193 {
4194     return;
4195 }
4196 #endif /* #if defined(TARGET_PPC64) */
4197 
4198 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4199 {
4200     if (unlikely(ctx->singlestep_enabled)) {
4201         return false;
4202     }
4203     return translator_use_goto_tb(&ctx->base, dest);
4204 }
4205 
4206 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4207 {
4208     if (unlikely(ctx->singlestep_enabled)) {
4209         gen_debug_exception(ctx, false);
4210     } else {
4211         /*
4212          * tcg_gen_lookup_and_goto_ptr will exit the TB if
4213          * CF_NO_GOTO_PTR is set. Count insns now.
4214          */
4215         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4216             pmu_count_insns(ctx);
4217         }
4218 
4219         tcg_gen_lookup_and_goto_ptr();
4220     }
4221 }
4222 
4223 /***                                Branch                                 ***/
4224 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4225 {
4226     if (NARROW_MODE(ctx)) {
4227         dest = (uint32_t) dest;
4228     }
4229     if (use_goto_tb(ctx, dest)) {
4230         pmu_count_insns(ctx);
4231         tcg_gen_goto_tb(n);
4232         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4233         tcg_gen_exit_tb(ctx->base.tb, n);
4234     } else {
4235         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4236         gen_lookup_and_goto_ptr(ctx);
4237     }
4238 }
4239 
4240 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4241 {
4242     if (NARROW_MODE(ctx)) {
4243         nip = (uint32_t)nip;
4244     }
4245     tcg_gen_movi_tl(cpu_lr, nip);
4246 }
4247 
4248 /* b ba bl bla */
4249 static void gen_b(DisasContext *ctx)
4250 {
4251     target_ulong li, target;
4252 
4253     /* sign extend LI */
4254     li = LI(ctx->opcode);
4255     li = (li ^ 0x02000000) - 0x02000000;
4256     if (likely(AA(ctx->opcode) == 0)) {
4257         target = ctx->cia + li;
4258     } else {
4259         target = li;
4260     }
4261     if (LK(ctx->opcode)) {
4262         gen_setlr(ctx, ctx->base.pc_next);
4263     }
4264     gen_update_cfar(ctx, ctx->cia);
4265     gen_goto_tb(ctx, 0, target);
4266     ctx->base.is_jmp = DISAS_NORETURN;
4267 }
4268 
4269 #define BCOND_IM  0
4270 #define BCOND_LR  1
4271 #define BCOND_CTR 2
4272 #define BCOND_TAR 3
4273 
4274 static void gen_bcond(DisasContext *ctx, int type)
4275 {
4276     uint32_t bo = BO(ctx->opcode);
4277     TCGLabel *l1;
4278     TCGv target;
4279 
4280     if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4281         target = tcg_temp_new();
4282         if (type == BCOND_CTR) {
4283             tcg_gen_mov_tl(target, cpu_ctr);
4284         } else if (type == BCOND_TAR) {
4285             gen_load_spr(target, SPR_TAR);
4286         } else {
4287             tcg_gen_mov_tl(target, cpu_lr);
4288         }
4289     } else {
4290         target = NULL;
4291     }
4292     if (LK(ctx->opcode)) {
4293         gen_setlr(ctx, ctx->base.pc_next);
4294     }
4295     l1 = gen_new_label();
4296     if ((bo & 0x4) == 0) {
4297         /* Decrement and test CTR */
4298         TCGv temp = tcg_temp_new();
4299 
4300         if (type == BCOND_CTR) {
4301             /*
4302              * All ISAs up to v3 describe this form of bcctr as invalid but
4303              * some processors, ie. 64-bit server processors compliant with
4304              * arch 2.x, do implement a "test and decrement" logic instead,
4305              * as described in their respective UMs. This logic involves CTR
4306              * to act as both the branch target and a counter, which makes
4307              * it basically useless and thus never used in real code.
4308              *
4309              * This form was hence chosen to trigger extra micro-architectural
4310              * side-effect on real HW needed for the Spectre v2 workaround.
4311              * It is up to guests that implement such workaround, ie. linux, to
4312              * use this form in a way it just triggers the side-effect without
4313              * doing anything else harmful.
4314              */
4315             if (unlikely(!is_book3s_arch2x(ctx))) {
4316                 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4317                 return;
4318             }
4319 
4320             if (NARROW_MODE(ctx)) {
4321                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4322             } else {
4323                 tcg_gen_mov_tl(temp, cpu_ctr);
4324             }
4325             if (bo & 0x2) {
4326                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4327             } else {
4328                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4329             }
4330             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4331         } else {
4332             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4333             if (NARROW_MODE(ctx)) {
4334                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4335             } else {
4336                 tcg_gen_mov_tl(temp, cpu_ctr);
4337             }
4338             if (bo & 0x2) {
4339                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4340             } else {
4341                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4342             }
4343         }
4344     }
4345     if ((bo & 0x10) == 0) {
4346         /* Test CR */
4347         uint32_t bi = BI(ctx->opcode);
4348         uint32_t mask = 0x08 >> (bi & 0x03);
4349         TCGv_i32 temp = tcg_temp_new_i32();
4350 
4351         if (bo & 0x8) {
4352             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4353             tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4354         } else {
4355             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4356             tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4357         }
4358     }
4359     gen_update_cfar(ctx, ctx->cia);
4360     if (type == BCOND_IM) {
4361         target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4362         if (likely(AA(ctx->opcode) == 0)) {
4363             gen_goto_tb(ctx, 0, ctx->cia + li);
4364         } else {
4365             gen_goto_tb(ctx, 0, li);
4366         }
4367     } else {
4368         if (NARROW_MODE(ctx)) {
4369             tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4370         } else {
4371             tcg_gen_andi_tl(cpu_nip, target, ~3);
4372         }
4373         gen_lookup_and_goto_ptr(ctx);
4374     }
4375     if ((bo & 0x14) != 0x14) {
4376         /* fallthrough case */
4377         gen_set_label(l1);
4378         gen_goto_tb(ctx, 1, ctx->base.pc_next);
4379     }
4380     ctx->base.is_jmp = DISAS_NORETURN;
4381 }
4382 
4383 static void gen_bc(DisasContext *ctx)
4384 {
4385     gen_bcond(ctx, BCOND_IM);
4386 }
4387 
4388 static void gen_bcctr(DisasContext *ctx)
4389 {
4390     gen_bcond(ctx, BCOND_CTR);
4391 }
4392 
4393 static void gen_bclr(DisasContext *ctx)
4394 {
4395     gen_bcond(ctx, BCOND_LR);
4396 }
4397 
4398 static void gen_bctar(DisasContext *ctx)
4399 {
4400     gen_bcond(ctx, BCOND_TAR);
4401 }
4402 
4403 /***                      Condition register logical                       ***/
4404 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
4405 static void glue(gen_, name)(DisasContext *ctx)                               \
4406 {                                                                             \
4407     uint8_t bitmask;                                                          \
4408     int sh;                                                                   \
4409     TCGv_i32 t0, t1;                                                          \
4410     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
4411     t0 = tcg_temp_new_i32();                                                  \
4412     if (sh > 0)                                                               \
4413         tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
4414     else if (sh < 0)                                                          \
4415         tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
4416     else                                                                      \
4417         tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
4418     t1 = tcg_temp_new_i32();                                                  \
4419     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
4420     if (sh > 0)                                                               \
4421         tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
4422     else if (sh < 0)                                                          \
4423         tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
4424     else                                                                      \
4425         tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
4426     tcg_op(t0, t0, t1);                                                       \
4427     bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
4428     tcg_gen_andi_i32(t0, t0, bitmask);                                        \
4429     tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
4430     tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
4431 }
4432 
4433 /* crand */
4434 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4435 /* crandc */
4436 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4437 /* creqv */
4438 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4439 /* crnand */
4440 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4441 /* crnor */
4442 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4443 /* cror */
4444 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4445 /* crorc */
4446 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4447 /* crxor */
4448 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4449 
4450 /* mcrf */
4451 static void gen_mcrf(DisasContext *ctx)
4452 {
4453     tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4454 }
4455 
4456 /***                           System linkage                              ***/
4457 
4458 /* rfi (supervisor only) */
4459 static void gen_rfi(DisasContext *ctx)
4460 {
4461 #if defined(CONFIG_USER_ONLY)
4462     GEN_PRIV(ctx);
4463 #else
4464     /*
4465      * This instruction doesn't exist anymore on 64-bit server
4466      * processors compliant with arch 2.x
4467      */
4468     if (is_book3s_arch2x(ctx)) {
4469         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4470         return;
4471     }
4472     /* Restore CPU state */
4473     CHK_SV(ctx);
4474     translator_io_start(&ctx->base);
4475     gen_update_cfar(ctx, ctx->cia);
4476     gen_helper_rfi(cpu_env);
4477     ctx->base.is_jmp = DISAS_EXIT;
4478 #endif
4479 }
4480 
4481 #if defined(TARGET_PPC64)
4482 static void gen_rfid(DisasContext *ctx)
4483 {
4484 #if defined(CONFIG_USER_ONLY)
4485     GEN_PRIV(ctx);
4486 #else
4487     /* Restore CPU state */
4488     CHK_SV(ctx);
4489     translator_io_start(&ctx->base);
4490     gen_update_cfar(ctx, ctx->cia);
4491     gen_helper_rfid(cpu_env);
4492     ctx->base.is_jmp = DISAS_EXIT;
4493 #endif
4494 }
4495 
4496 #if !defined(CONFIG_USER_ONLY)
4497 static void gen_rfscv(DisasContext *ctx)
4498 {
4499 #if defined(CONFIG_USER_ONLY)
4500     GEN_PRIV(ctx);
4501 #else
4502     /* Restore CPU state */
4503     CHK_SV(ctx);
4504     translator_io_start(&ctx->base);
4505     gen_update_cfar(ctx, ctx->cia);
4506     gen_helper_rfscv(cpu_env);
4507     ctx->base.is_jmp = DISAS_EXIT;
4508 #endif
4509 }
4510 #endif
4511 
4512 static void gen_hrfid(DisasContext *ctx)
4513 {
4514 #if defined(CONFIG_USER_ONLY)
4515     GEN_PRIV(ctx);
4516 #else
4517     /* Restore CPU state */
4518     CHK_HV(ctx);
4519     translator_io_start(&ctx->base);
4520     gen_helper_hrfid(cpu_env);
4521     ctx->base.is_jmp = DISAS_EXIT;
4522 #endif
4523 }
4524 #endif
4525 
4526 /* sc */
4527 #if defined(CONFIG_USER_ONLY)
4528 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4529 #else
4530 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4531 #endif
4532 static void gen_sc(DisasContext *ctx)
4533 {
4534     uint32_t lev;
4535 
4536     /*
4537      * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
4538      * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
4539      * for Ultravisor which TCG does not support, so just ignore the top 6.
4540      */
4541     lev = (ctx->opcode >> 5) & 0x1;
4542     gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4543 }
4544 
4545 #if defined(TARGET_PPC64)
4546 #if !defined(CONFIG_USER_ONLY)
4547 static void gen_scv(DisasContext *ctx)
4548 {
4549     uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4550 
4551     /* Set the PC back to the faulting instruction. */
4552     gen_update_nip(ctx, ctx->cia);
4553     gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4554 
4555     ctx->base.is_jmp = DISAS_NORETURN;
4556 }
4557 #endif
4558 #endif
4559 
4560 /***                                Trap                                   ***/
4561 
4562 /* Check for unconditional traps (always or never) */
4563 static bool check_unconditional_trap(DisasContext *ctx)
4564 {
4565     /* Trap never */
4566     if (TO(ctx->opcode) == 0) {
4567         return true;
4568     }
4569     /* Trap always */
4570     if (TO(ctx->opcode) == 31) {
4571         gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4572         return true;
4573     }
4574     return false;
4575 }
4576 
4577 /* tw */
4578 static void gen_tw(DisasContext *ctx)
4579 {
4580     TCGv_i32 t0;
4581 
4582     if (check_unconditional_trap(ctx)) {
4583         return;
4584     }
4585     t0 = tcg_constant_i32(TO(ctx->opcode));
4586     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4587                   t0);
4588 }
4589 
4590 /* twi */
4591 static void gen_twi(DisasContext *ctx)
4592 {
4593     TCGv t0;
4594     TCGv_i32 t1;
4595 
4596     if (check_unconditional_trap(ctx)) {
4597         return;
4598     }
4599     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4600     t1 = tcg_constant_i32(TO(ctx->opcode));
4601     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4602 }
4603 
4604 #if defined(TARGET_PPC64)
4605 /* td */
4606 static void gen_td(DisasContext *ctx)
4607 {
4608     TCGv_i32 t0;
4609 
4610     if (check_unconditional_trap(ctx)) {
4611         return;
4612     }
4613     t0 = tcg_constant_i32(TO(ctx->opcode));
4614     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4615                   t0);
4616 }
4617 
4618 /* tdi */
4619 static void gen_tdi(DisasContext *ctx)
4620 {
4621     TCGv t0;
4622     TCGv_i32 t1;
4623 
4624     if (check_unconditional_trap(ctx)) {
4625         return;
4626     }
4627     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4628     t1 = tcg_constant_i32(TO(ctx->opcode));
4629     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4630 }
4631 #endif
4632 
4633 /***                          Processor control                            ***/
4634 
4635 /* mcrxr */
4636 static void gen_mcrxr(DisasContext *ctx)
4637 {
4638     TCGv_i32 t0 = tcg_temp_new_i32();
4639     TCGv_i32 t1 = tcg_temp_new_i32();
4640     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4641 
4642     tcg_gen_trunc_tl_i32(t0, cpu_so);
4643     tcg_gen_trunc_tl_i32(t1, cpu_ov);
4644     tcg_gen_trunc_tl_i32(dst, cpu_ca);
4645     tcg_gen_shli_i32(t0, t0, 3);
4646     tcg_gen_shli_i32(t1, t1, 2);
4647     tcg_gen_shli_i32(dst, dst, 1);
4648     tcg_gen_or_i32(dst, dst, t0);
4649     tcg_gen_or_i32(dst, dst, t1);
4650 
4651     tcg_gen_movi_tl(cpu_so, 0);
4652     tcg_gen_movi_tl(cpu_ov, 0);
4653     tcg_gen_movi_tl(cpu_ca, 0);
4654 }
4655 
4656 #ifdef TARGET_PPC64
4657 /* mcrxrx */
4658 static void gen_mcrxrx(DisasContext *ctx)
4659 {
4660     TCGv t0 = tcg_temp_new();
4661     TCGv t1 = tcg_temp_new();
4662     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4663 
4664     /* copy OV and OV32 */
4665     tcg_gen_shli_tl(t0, cpu_ov, 1);
4666     tcg_gen_or_tl(t0, t0, cpu_ov32);
4667     tcg_gen_shli_tl(t0, t0, 2);
4668     /* copy CA and CA32 */
4669     tcg_gen_shli_tl(t1, cpu_ca, 1);
4670     tcg_gen_or_tl(t1, t1, cpu_ca32);
4671     tcg_gen_or_tl(t0, t0, t1);
4672     tcg_gen_trunc_tl_i32(dst, t0);
4673 }
4674 #endif
4675 
4676 /* mfcr mfocrf */
4677 static void gen_mfcr(DisasContext *ctx)
4678 {
4679     uint32_t crm, crn;
4680 
4681     if (likely(ctx->opcode & 0x00100000)) {
4682         crm = CRM(ctx->opcode);
4683         if (likely(crm && ((crm & (crm - 1)) == 0))) {
4684             crn = ctz32(crm);
4685             tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4686             tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4687                             cpu_gpr[rD(ctx->opcode)], crn * 4);
4688         }
4689     } else {
4690         TCGv_i32 t0 = tcg_temp_new_i32();
4691         tcg_gen_mov_i32(t0, cpu_crf[0]);
4692         tcg_gen_shli_i32(t0, t0, 4);
4693         tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4694         tcg_gen_shli_i32(t0, t0, 4);
4695         tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4696         tcg_gen_shli_i32(t0, t0, 4);
4697         tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4698         tcg_gen_shli_i32(t0, t0, 4);
4699         tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4700         tcg_gen_shli_i32(t0, t0, 4);
4701         tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4702         tcg_gen_shli_i32(t0, t0, 4);
4703         tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4704         tcg_gen_shli_i32(t0, t0, 4);
4705         tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4706         tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4707     }
4708 }
4709 
4710 /* mfmsr */
4711 static void gen_mfmsr(DisasContext *ctx)
4712 {
4713     CHK_SV(ctx);
4714     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4715 }
4716 
4717 /* mfspr */
4718 static inline void gen_op_mfspr(DisasContext *ctx)
4719 {
4720     void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4721     uint32_t sprn = SPR(ctx->opcode);
4722 
4723 #if defined(CONFIG_USER_ONLY)
4724     read_cb = ctx->spr_cb[sprn].uea_read;
4725 #else
4726     if (ctx->pr) {
4727         read_cb = ctx->spr_cb[sprn].uea_read;
4728     } else if (ctx->hv) {
4729         read_cb = ctx->spr_cb[sprn].hea_read;
4730     } else {
4731         read_cb = ctx->spr_cb[sprn].oea_read;
4732     }
4733 #endif
4734     if (likely(read_cb != NULL)) {
4735         if (likely(read_cb != SPR_NOACCESS)) {
4736             (*read_cb)(ctx, rD(ctx->opcode), sprn);
4737         } else {
4738             /* Privilege exception */
4739             /*
4740              * This is a hack to avoid warnings when running Linux:
4741              * this OS breaks the PowerPC virtualisation model,
4742              * allowing userland application to read the PVR
4743              */
4744             if (sprn != SPR_PVR) {
4745                 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4746                               "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4747                               ctx->cia);
4748             }
4749             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4750         }
4751     } else {
4752         /* ISA 2.07 defines these as no-ops */
4753         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4754             (sprn >= 808 && sprn <= 811)) {
4755             /* This is a nop */
4756             return;
4757         }
4758         /* Not defined */
4759         qemu_log_mask(LOG_GUEST_ERROR,
4760                       "Trying to read invalid spr %d (0x%03x) at "
4761                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4762 
4763         /*
4764          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4765          * generate a priv, a hv emu or a no-op
4766          */
4767         if (sprn & 0x10) {
4768             if (ctx->pr) {
4769                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4770             }
4771         } else {
4772             if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4773                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4774             }
4775         }
4776     }
4777 }
4778 
4779 static void gen_mfspr(DisasContext *ctx)
4780 {
4781     gen_op_mfspr(ctx);
4782 }
4783 
4784 /* mftb */
4785 static void gen_mftb(DisasContext *ctx)
4786 {
4787     gen_op_mfspr(ctx);
4788 }
4789 
4790 /* mtcrf mtocrf*/
4791 static void gen_mtcrf(DisasContext *ctx)
4792 {
4793     uint32_t crm, crn;
4794 
4795     crm = CRM(ctx->opcode);
4796     if (likely((ctx->opcode & 0x00100000))) {
4797         if (crm && ((crm & (crm - 1)) == 0)) {
4798             TCGv_i32 temp = tcg_temp_new_i32();
4799             crn = ctz32(crm);
4800             tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4801             tcg_gen_shri_i32(temp, temp, crn * 4);
4802             tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4803         }
4804     } else {
4805         TCGv_i32 temp = tcg_temp_new_i32();
4806         tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4807         for (crn = 0 ; crn < 8 ; crn++) {
4808             if (crm & (1 << crn)) {
4809                     tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4810                     tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4811             }
4812         }
4813     }
4814 }
4815 
4816 /* mtmsr */
4817 #if defined(TARGET_PPC64)
4818 static void gen_mtmsrd(DisasContext *ctx)
4819 {
4820     if (unlikely(!is_book3s_arch2x(ctx))) {
4821         gen_invalid(ctx);
4822         return;
4823     }
4824 
4825     CHK_SV(ctx);
4826 
4827 #if !defined(CONFIG_USER_ONLY)
4828     TCGv t0, t1;
4829     target_ulong mask;
4830 
4831     t0 = tcg_temp_new();
4832     t1 = tcg_temp_new();
4833 
4834     translator_io_start(&ctx->base);
4835 
4836     if (ctx->opcode & 0x00010000) {
4837         /* L=1 form only updates EE and RI */
4838         mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4839     } else {
4840         /* mtmsrd does not alter HV, S, ME, or LE */
4841         mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4842                  (1ULL << MSR_HV));
4843         /*
4844          * XXX: we need to update nip before the store if we enter
4845          *      power saving mode, we will exit the loop directly from
4846          *      ppc_store_msr
4847          */
4848         gen_update_nip(ctx, ctx->base.pc_next);
4849     }
4850 
4851     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4852     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4853     tcg_gen_or_tl(t0, t0, t1);
4854 
4855     gen_helper_store_msr(cpu_env, t0);
4856 
4857     /* Must stop the translation as machine state (may have) changed */
4858     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4859 #endif /* !defined(CONFIG_USER_ONLY) */
4860 }
4861 #endif /* defined(TARGET_PPC64) */
4862 
4863 static void gen_mtmsr(DisasContext *ctx)
4864 {
4865     CHK_SV(ctx);
4866 
4867 #if !defined(CONFIG_USER_ONLY)
4868     TCGv t0, t1;
4869     target_ulong mask = 0xFFFFFFFF;
4870 
4871     t0 = tcg_temp_new();
4872     t1 = tcg_temp_new();
4873 
4874     translator_io_start(&ctx->base);
4875     if (ctx->opcode & 0x00010000) {
4876         /* L=1 form only updates EE and RI */
4877         mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4878     } else {
4879         /* mtmsr does not alter S, ME, or LE */
4880         mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4881 
4882         /*
4883          * XXX: we need to update nip before the store if we enter
4884          *      power saving mode, we will exit the loop directly from
4885          *      ppc_store_msr
4886          */
4887         gen_update_nip(ctx, ctx->base.pc_next);
4888     }
4889 
4890     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4891     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4892     tcg_gen_or_tl(t0, t0, t1);
4893 
4894     gen_helper_store_msr(cpu_env, t0);
4895 
4896     /* Must stop the translation as machine state (may have) changed */
4897     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4898 #endif
4899 }
4900 
4901 /* mtspr */
4902 static void gen_mtspr(DisasContext *ctx)
4903 {
4904     void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4905     uint32_t sprn = SPR(ctx->opcode);
4906 
4907 #if defined(CONFIG_USER_ONLY)
4908     write_cb = ctx->spr_cb[sprn].uea_write;
4909 #else
4910     if (ctx->pr) {
4911         write_cb = ctx->spr_cb[sprn].uea_write;
4912     } else if (ctx->hv) {
4913         write_cb = ctx->spr_cb[sprn].hea_write;
4914     } else {
4915         write_cb = ctx->spr_cb[sprn].oea_write;
4916     }
4917 #endif
4918     if (likely(write_cb != NULL)) {
4919         if (likely(write_cb != SPR_NOACCESS)) {
4920             (*write_cb)(ctx, sprn, rS(ctx->opcode));
4921         } else {
4922             /* Privilege exception */
4923             qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4924                           "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4925                           ctx->cia);
4926             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4927         }
4928     } else {
4929         /* ISA 2.07 defines these as no-ops */
4930         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4931             (sprn >= 808 && sprn <= 811)) {
4932             /* This is a nop */
4933             return;
4934         }
4935 
4936         /* Not defined */
4937         qemu_log_mask(LOG_GUEST_ERROR,
4938                       "Trying to write invalid spr %d (0x%03x) at "
4939                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4940 
4941 
4942         /*
4943          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4944          * generate a priv, a hv emu or a no-op
4945          */
4946         if (sprn & 0x10) {
4947             if (ctx->pr) {
4948                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4949             }
4950         } else {
4951             if (ctx->pr || sprn == 0) {
4952                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4953             }
4954         }
4955     }
4956 }
4957 
4958 #if defined(TARGET_PPC64)
4959 /* setb */
4960 static void gen_setb(DisasContext *ctx)
4961 {
4962     TCGv_i32 t0 = tcg_temp_new_i32();
4963     TCGv_i32 t8 = tcg_constant_i32(8);
4964     TCGv_i32 tm1 = tcg_constant_i32(-1);
4965     int crf = crfS(ctx->opcode);
4966 
4967     tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
4968     tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
4969     tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4970 }
4971 #endif
4972 
4973 /***                         Cache management                              ***/
4974 
4975 /* dcbf */
4976 static void gen_dcbf(DisasContext *ctx)
4977 {
4978     /* XXX: specification says this is treated as a load by the MMU */
4979     TCGv t0;
4980     gen_set_access_type(ctx, ACCESS_CACHE);
4981     t0 = tcg_temp_new();
4982     gen_addr_reg_index(ctx, t0);
4983     gen_qemu_ld8u(ctx, t0, t0);
4984 }
4985 
4986 /* dcbfep (external PID dcbf) */
4987 static void gen_dcbfep(DisasContext *ctx)
4988 {
4989     /* XXX: specification says this is treated as a load by the MMU */
4990     TCGv t0;
4991     CHK_SV(ctx);
4992     gen_set_access_type(ctx, ACCESS_CACHE);
4993     t0 = tcg_temp_new();
4994     gen_addr_reg_index(ctx, t0);
4995     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
4996 }
4997 
4998 /* dcbi (Supervisor only) */
4999 static void gen_dcbi(DisasContext *ctx)
5000 {
5001 #if defined(CONFIG_USER_ONLY)
5002     GEN_PRIV(ctx);
5003 #else
5004     TCGv EA, val;
5005 
5006     CHK_SV(ctx);
5007     EA = tcg_temp_new();
5008     gen_set_access_type(ctx, ACCESS_CACHE);
5009     gen_addr_reg_index(ctx, EA);
5010     val = tcg_temp_new();
5011     /* XXX: specification says this should be treated as a store by the MMU */
5012     gen_qemu_ld8u(ctx, val, EA);
5013     gen_qemu_st8(ctx, val, EA);
5014 #endif /* defined(CONFIG_USER_ONLY) */
5015 }
5016 
5017 /* dcdst */
5018 static void gen_dcbst(DisasContext *ctx)
5019 {
5020     /* XXX: specification say this is treated as a load by the MMU */
5021     TCGv t0;
5022     gen_set_access_type(ctx, ACCESS_CACHE);
5023     t0 = tcg_temp_new();
5024     gen_addr_reg_index(ctx, t0);
5025     gen_qemu_ld8u(ctx, t0, t0);
5026 }
5027 
5028 /* dcbstep (dcbstep External PID version) */
5029 static void gen_dcbstep(DisasContext *ctx)
5030 {
5031     /* XXX: specification say this is treated as a load by the MMU */
5032     TCGv t0;
5033     gen_set_access_type(ctx, ACCESS_CACHE);
5034     t0 = tcg_temp_new();
5035     gen_addr_reg_index(ctx, t0);
5036     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5037 }
5038 
5039 /* dcbt */
5040 static void gen_dcbt(DisasContext *ctx)
5041 {
5042     /*
5043      * interpreted as no-op
5044      * XXX: specification say this is treated as a load by the MMU but
5045      *      does not generate any exception
5046      */
5047 }
5048 
5049 /* dcbtep */
5050 static void gen_dcbtep(DisasContext *ctx)
5051 {
5052     /*
5053      * interpreted as no-op
5054      * XXX: specification say this is treated as a load by the MMU but
5055      *      does not generate any exception
5056      */
5057 }
5058 
5059 /* dcbtst */
5060 static void gen_dcbtst(DisasContext *ctx)
5061 {
5062     /*
5063      * interpreted as no-op
5064      * XXX: specification say this is treated as a load by the MMU but
5065      *      does not generate any exception
5066      */
5067 }
5068 
5069 /* dcbtstep */
5070 static void gen_dcbtstep(DisasContext *ctx)
5071 {
5072     /*
5073      * interpreted as no-op
5074      * XXX: specification say this is treated as a load by the MMU but
5075      *      does not generate any exception
5076      */
5077 }
5078 
5079 /* dcbtls */
5080 static void gen_dcbtls(DisasContext *ctx)
5081 {
5082     /* Always fails locking the cache */
5083     TCGv t0 = tcg_temp_new();
5084     gen_load_spr(t0, SPR_Exxx_L1CSR0);
5085     tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5086     gen_store_spr(SPR_Exxx_L1CSR0, t0);
5087 }
5088 
5089 /* dcblc */
5090 static void gen_dcblc(DisasContext *ctx)
5091 {
5092     /*
5093      * interpreted as no-op
5094      */
5095 }
5096 
5097 /* dcbz */
5098 static void gen_dcbz(DisasContext *ctx)
5099 {
5100     TCGv tcgv_addr;
5101     TCGv_i32 tcgv_op;
5102 
5103     gen_set_access_type(ctx, ACCESS_CACHE);
5104     tcgv_addr = tcg_temp_new();
5105     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5106     gen_addr_reg_index(ctx, tcgv_addr);
5107     gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5108 }
5109 
5110 /* dcbzep */
5111 static void gen_dcbzep(DisasContext *ctx)
5112 {
5113     TCGv tcgv_addr;
5114     TCGv_i32 tcgv_op;
5115 
5116     gen_set_access_type(ctx, ACCESS_CACHE);
5117     tcgv_addr = tcg_temp_new();
5118     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5119     gen_addr_reg_index(ctx, tcgv_addr);
5120     gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5121 }
5122 
5123 /* dst / dstt */
5124 static void gen_dst(DisasContext *ctx)
5125 {
5126     if (rA(ctx->opcode) == 0) {
5127         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5128     } else {
5129         /* interpreted as no-op */
5130     }
5131 }
5132 
5133 /* dstst /dststt */
5134 static void gen_dstst(DisasContext *ctx)
5135 {
5136     if (rA(ctx->opcode) == 0) {
5137         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5138     } else {
5139         /* interpreted as no-op */
5140     }
5141 
5142 }
5143 
5144 /* dss / dssall */
5145 static void gen_dss(DisasContext *ctx)
5146 {
5147     /* interpreted as no-op */
5148 }
5149 
5150 /* icbi */
5151 static void gen_icbi(DisasContext *ctx)
5152 {
5153     TCGv t0;
5154     gen_set_access_type(ctx, ACCESS_CACHE);
5155     t0 = tcg_temp_new();
5156     gen_addr_reg_index(ctx, t0);
5157     gen_helper_icbi(cpu_env, t0);
5158 }
5159 
5160 /* icbiep */
5161 static void gen_icbiep(DisasContext *ctx)
5162 {
5163     TCGv t0;
5164     gen_set_access_type(ctx, ACCESS_CACHE);
5165     t0 = tcg_temp_new();
5166     gen_addr_reg_index(ctx, t0);
5167     gen_helper_icbiep(cpu_env, t0);
5168 }
5169 
5170 /* Optional: */
5171 /* dcba */
5172 static void gen_dcba(DisasContext *ctx)
5173 {
5174     /*
5175      * interpreted as no-op
5176      * XXX: specification say this is treated as a store by the MMU
5177      *      but does not generate any exception
5178      */
5179 }
5180 
5181 /***                    Segment register manipulation                      ***/
5182 /* Supervisor only: */
5183 
5184 /* mfsr */
5185 static void gen_mfsr(DisasContext *ctx)
5186 {
5187 #if defined(CONFIG_USER_ONLY)
5188     GEN_PRIV(ctx);
5189 #else
5190     TCGv t0;
5191 
5192     CHK_SV(ctx);
5193     t0 = tcg_constant_tl(SR(ctx->opcode));
5194     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5195 #endif /* defined(CONFIG_USER_ONLY) */
5196 }
5197 
5198 /* mfsrin */
5199 static void gen_mfsrin(DisasContext *ctx)
5200 {
5201 #if defined(CONFIG_USER_ONLY)
5202     GEN_PRIV(ctx);
5203 #else
5204     TCGv t0;
5205 
5206     CHK_SV(ctx);
5207     t0 = tcg_temp_new();
5208     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5209     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5210 #endif /* defined(CONFIG_USER_ONLY) */
5211 }
5212 
5213 /* mtsr */
5214 static void gen_mtsr(DisasContext *ctx)
5215 {
5216 #if defined(CONFIG_USER_ONLY)
5217     GEN_PRIV(ctx);
5218 #else
5219     TCGv t0;
5220 
5221     CHK_SV(ctx);
5222     t0 = tcg_constant_tl(SR(ctx->opcode));
5223     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5224 #endif /* defined(CONFIG_USER_ONLY) */
5225 }
5226 
5227 /* mtsrin */
5228 static void gen_mtsrin(DisasContext *ctx)
5229 {
5230 #if defined(CONFIG_USER_ONLY)
5231     GEN_PRIV(ctx);
5232 #else
5233     TCGv t0;
5234     CHK_SV(ctx);
5235 
5236     t0 = tcg_temp_new();
5237     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5238     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5239 #endif /* defined(CONFIG_USER_ONLY) */
5240 }
5241 
5242 #if defined(TARGET_PPC64)
5243 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5244 
5245 /* mfsr */
5246 static void gen_mfsr_64b(DisasContext *ctx)
5247 {
5248 #if defined(CONFIG_USER_ONLY)
5249     GEN_PRIV(ctx);
5250 #else
5251     TCGv t0;
5252 
5253     CHK_SV(ctx);
5254     t0 = tcg_constant_tl(SR(ctx->opcode));
5255     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5256 #endif /* defined(CONFIG_USER_ONLY) */
5257 }
5258 
5259 /* mfsrin */
5260 static void gen_mfsrin_64b(DisasContext *ctx)
5261 {
5262 #if defined(CONFIG_USER_ONLY)
5263     GEN_PRIV(ctx);
5264 #else
5265     TCGv t0;
5266 
5267     CHK_SV(ctx);
5268     t0 = tcg_temp_new();
5269     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5270     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5271 #endif /* defined(CONFIG_USER_ONLY) */
5272 }
5273 
5274 /* mtsr */
5275 static void gen_mtsr_64b(DisasContext *ctx)
5276 {
5277 #if defined(CONFIG_USER_ONLY)
5278     GEN_PRIV(ctx);
5279 #else
5280     TCGv t0;
5281 
5282     CHK_SV(ctx);
5283     t0 = tcg_constant_tl(SR(ctx->opcode));
5284     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5285 #endif /* defined(CONFIG_USER_ONLY) */
5286 }
5287 
5288 /* mtsrin */
5289 static void gen_mtsrin_64b(DisasContext *ctx)
5290 {
5291 #if defined(CONFIG_USER_ONLY)
5292     GEN_PRIV(ctx);
5293 #else
5294     TCGv t0;
5295 
5296     CHK_SV(ctx);
5297     t0 = tcg_temp_new();
5298     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5299     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5300 #endif /* defined(CONFIG_USER_ONLY) */
5301 }
5302 
5303 #endif /* defined(TARGET_PPC64) */
5304 
5305 /***                      Lookaside buffer management                      ***/
5306 /* Optional & supervisor only: */
5307 
5308 /* tlbia */
5309 static void gen_tlbia(DisasContext *ctx)
5310 {
5311 #if defined(CONFIG_USER_ONLY)
5312     GEN_PRIV(ctx);
5313 #else
5314     CHK_HV(ctx);
5315 
5316     gen_helper_tlbia(cpu_env);
5317 #endif  /* defined(CONFIG_USER_ONLY) */
5318 }
5319 
5320 /* tlbsync */
5321 static void gen_tlbsync(DisasContext *ctx)
5322 {
5323 #if defined(CONFIG_USER_ONLY)
5324     GEN_PRIV(ctx);
5325 #else
5326 
5327     if (ctx->gtse) {
5328         CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5329     } else {
5330         CHK_HV(ctx); /* Else hypervisor privileged */
5331     }
5332 
5333     /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5334     if (ctx->insns_flags & PPC_BOOKE) {
5335         gen_check_tlb_flush(ctx, true);
5336     }
5337 #endif /* defined(CONFIG_USER_ONLY) */
5338 }
5339 
5340 /***                              External control                         ***/
5341 /* Optional: */
5342 
5343 /* eciwx */
5344 static void gen_eciwx(DisasContext *ctx)
5345 {
5346     TCGv t0;
5347     /* Should check EAR[E] ! */
5348     gen_set_access_type(ctx, ACCESS_EXT);
5349     t0 = tcg_temp_new();
5350     gen_addr_reg_index(ctx, t0);
5351     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5352                        DEF_MEMOP(MO_UL | MO_ALIGN));
5353 }
5354 
5355 /* ecowx */
5356 static void gen_ecowx(DisasContext *ctx)
5357 {
5358     TCGv t0;
5359     /* Should check EAR[E] ! */
5360     gen_set_access_type(ctx, ACCESS_EXT);
5361     t0 = tcg_temp_new();
5362     gen_addr_reg_index(ctx, t0);
5363     tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5364                        DEF_MEMOP(MO_UL | MO_ALIGN));
5365 }
5366 
5367 /* 602 - 603 - G2 TLB management */
5368 
5369 /* tlbld */
5370 static void gen_tlbld_6xx(DisasContext *ctx)
5371 {
5372 #if defined(CONFIG_USER_ONLY)
5373     GEN_PRIV(ctx);
5374 #else
5375     CHK_SV(ctx);
5376     gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5377 #endif /* defined(CONFIG_USER_ONLY) */
5378 }
5379 
5380 /* tlbli */
5381 static void gen_tlbli_6xx(DisasContext *ctx)
5382 {
5383 #if defined(CONFIG_USER_ONLY)
5384     GEN_PRIV(ctx);
5385 #else
5386     CHK_SV(ctx);
5387     gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5388 #endif /* defined(CONFIG_USER_ONLY) */
5389 }
5390 
5391 /* BookE specific instructions */
5392 
5393 /* XXX: not implemented on 440 ? */
5394 static void gen_mfapidi(DisasContext *ctx)
5395 {
5396     /* XXX: TODO */
5397     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5398 }
5399 
5400 /* XXX: not implemented on 440 ? */
5401 static void gen_tlbiva(DisasContext *ctx)
5402 {
5403 #if defined(CONFIG_USER_ONLY)
5404     GEN_PRIV(ctx);
5405 #else
5406     TCGv t0;
5407 
5408     CHK_SV(ctx);
5409     t0 = tcg_temp_new();
5410     gen_addr_reg_index(ctx, t0);
5411     gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5412 #endif /* defined(CONFIG_USER_ONLY) */
5413 }
5414 
5415 /* All 405 MAC instructions are translated here */
5416 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5417                                         int ra, int rb, int rt, int Rc)
5418 {
5419     TCGv t0, t1;
5420 
5421     t0 = tcg_temp_new();
5422     t1 = tcg_temp_new();
5423 
5424     switch (opc3 & 0x0D) {
5425     case 0x05:
5426         /* macchw    - macchw.    - macchwo   - macchwo.   */
5427         /* macchws   - macchws.   - macchwso  - macchwso.  */
5428         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
5429         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
5430         /* mulchw - mulchw. */
5431         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5432         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5433         tcg_gen_ext16s_tl(t1, t1);
5434         break;
5435     case 0x04:
5436         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
5437         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
5438         /* mulchwu - mulchwu. */
5439         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5440         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5441         tcg_gen_ext16u_tl(t1, t1);
5442         break;
5443     case 0x01:
5444         /* machhw    - machhw.    - machhwo   - machhwo.   */
5445         /* machhws   - machhws.   - machhwso  - machhwso.  */
5446         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
5447         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
5448         /* mulhhw - mulhhw. */
5449         tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5450         tcg_gen_ext16s_tl(t0, t0);
5451         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5452         tcg_gen_ext16s_tl(t1, t1);
5453         break;
5454     case 0x00:
5455         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
5456         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
5457         /* mulhhwu - mulhhwu. */
5458         tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5459         tcg_gen_ext16u_tl(t0, t0);
5460         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5461         tcg_gen_ext16u_tl(t1, t1);
5462         break;
5463     case 0x0D:
5464         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
5465         /* maclhws   - maclhws.   - maclhwso  - maclhwso.  */
5466         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
5467         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
5468         /* mullhw - mullhw. */
5469         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5470         tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5471         break;
5472     case 0x0C:
5473         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
5474         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
5475         /* mullhwu - mullhwu. */
5476         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5477         tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5478         break;
5479     }
5480     if (opc2 & 0x04) {
5481         /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5482         tcg_gen_mul_tl(t1, t0, t1);
5483         if (opc2 & 0x02) {
5484             /* nmultiply-and-accumulate (0x0E) */
5485             tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5486         } else {
5487             /* multiply-and-accumulate (0x0C) */
5488             tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5489         }
5490 
5491         if (opc3 & 0x12) {
5492             /* Check overflow and/or saturate */
5493             TCGLabel *l1 = gen_new_label();
5494 
5495             if (opc3 & 0x10) {
5496                 /* Start with XER OV disabled, the most likely case */
5497                 tcg_gen_movi_tl(cpu_ov, 0);
5498             }
5499             if (opc3 & 0x01) {
5500                 /* Signed */
5501                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5502                 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5503                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5504                 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5505                 if (opc3 & 0x02) {
5506                     /* Saturate */
5507                     tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5508                     tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5509                 }
5510             } else {
5511                 /* Unsigned */
5512                 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5513                 if (opc3 & 0x02) {
5514                     /* Saturate */
5515                     tcg_gen_movi_tl(t0, UINT32_MAX);
5516                 }
5517             }
5518             if (opc3 & 0x10) {
5519                 /* Check overflow */
5520                 tcg_gen_movi_tl(cpu_ov, 1);
5521                 tcg_gen_movi_tl(cpu_so, 1);
5522             }
5523             gen_set_label(l1);
5524             tcg_gen_mov_tl(cpu_gpr[rt], t0);
5525         }
5526     } else {
5527         tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5528     }
5529     if (unlikely(Rc) != 0) {
5530         /* Update Rc0 */
5531         gen_set_Rc0(ctx, cpu_gpr[rt]);
5532     }
5533 }
5534 
5535 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
5536 static void glue(gen_, name)(DisasContext *ctx)                               \
5537 {                                                                             \
5538     gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode),   \
5539                          rD(ctx->opcode), Rc(ctx->opcode));                   \
5540 }
5541 
5542 /* macchw    - macchw.    */
5543 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5544 /* macchwo   - macchwo.   */
5545 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5546 /* macchws   - macchws.   */
5547 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5548 /* macchwso  - macchwso.  */
5549 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5550 /* macchwsu  - macchwsu.  */
5551 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5552 /* macchwsuo - macchwsuo. */
5553 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5554 /* macchwu   - macchwu.   */
5555 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5556 /* macchwuo  - macchwuo.  */
5557 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5558 /* machhw    - machhw.    */
5559 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5560 /* machhwo   - machhwo.   */
5561 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5562 /* machhws   - machhws.   */
5563 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5564 /* machhwso  - machhwso.  */
5565 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5566 /* machhwsu  - machhwsu.  */
5567 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5568 /* machhwsuo - machhwsuo. */
5569 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5570 /* machhwu   - machhwu.   */
5571 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5572 /* machhwuo  - machhwuo.  */
5573 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5574 /* maclhw    - maclhw.    */
5575 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5576 /* maclhwo   - maclhwo.   */
5577 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5578 /* maclhws   - maclhws.   */
5579 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5580 /* maclhwso  - maclhwso.  */
5581 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5582 /* maclhwu   - maclhwu.   */
5583 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5584 /* maclhwuo  - maclhwuo.  */
5585 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5586 /* maclhwsu  - maclhwsu.  */
5587 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5588 /* maclhwsuo - maclhwsuo. */
5589 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5590 /* nmacchw   - nmacchw.   */
5591 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5592 /* nmacchwo  - nmacchwo.  */
5593 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5594 /* nmacchws  - nmacchws.  */
5595 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5596 /* nmacchwso - nmacchwso. */
5597 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5598 /* nmachhw   - nmachhw.   */
5599 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5600 /* nmachhwo  - nmachhwo.  */
5601 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5602 /* nmachhws  - nmachhws.  */
5603 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5604 /* nmachhwso - nmachhwso. */
5605 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5606 /* nmaclhw   - nmaclhw.   */
5607 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5608 /* nmaclhwo  - nmaclhwo.  */
5609 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5610 /* nmaclhws  - nmaclhws.  */
5611 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5612 /* nmaclhwso - nmaclhwso. */
5613 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5614 
5615 /* mulchw  - mulchw.  */
5616 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5617 /* mulchwu - mulchwu. */
5618 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5619 /* mulhhw  - mulhhw.  */
5620 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5621 /* mulhhwu - mulhhwu. */
5622 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5623 /* mullhw  - mullhw.  */
5624 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5625 /* mullhwu - mullhwu. */
5626 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5627 
5628 /* mfdcr */
5629 static void gen_mfdcr(DisasContext *ctx)
5630 {
5631 #if defined(CONFIG_USER_ONLY)
5632     GEN_PRIV(ctx);
5633 #else
5634     TCGv dcrn;
5635 
5636     CHK_SV(ctx);
5637     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5638     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5639 #endif /* defined(CONFIG_USER_ONLY) */
5640 }
5641 
5642 /* mtdcr */
5643 static void gen_mtdcr(DisasContext *ctx)
5644 {
5645 #if defined(CONFIG_USER_ONLY)
5646     GEN_PRIV(ctx);
5647 #else
5648     TCGv dcrn;
5649 
5650     CHK_SV(ctx);
5651     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5652     gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5653 #endif /* defined(CONFIG_USER_ONLY) */
5654 }
5655 
5656 /* mfdcrx */
5657 /* XXX: not implemented on 440 ? */
5658 static void gen_mfdcrx(DisasContext *ctx)
5659 {
5660 #if defined(CONFIG_USER_ONLY)
5661     GEN_PRIV(ctx);
5662 #else
5663     CHK_SV(ctx);
5664     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5665                         cpu_gpr[rA(ctx->opcode)]);
5666     /* Note: Rc update flag set leads to undefined state of Rc0 */
5667 #endif /* defined(CONFIG_USER_ONLY) */
5668 }
5669 
5670 /* mtdcrx */
5671 /* XXX: not implemented on 440 ? */
5672 static void gen_mtdcrx(DisasContext *ctx)
5673 {
5674 #if defined(CONFIG_USER_ONLY)
5675     GEN_PRIV(ctx);
5676 #else
5677     CHK_SV(ctx);
5678     gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5679                          cpu_gpr[rS(ctx->opcode)]);
5680     /* Note: Rc update flag set leads to undefined state of Rc0 */
5681 #endif /* defined(CONFIG_USER_ONLY) */
5682 }
5683 
5684 /* dccci */
5685 static void gen_dccci(DisasContext *ctx)
5686 {
5687     CHK_SV(ctx);
5688     /* interpreted as no-op */
5689 }
5690 
5691 /* dcread */
5692 static void gen_dcread(DisasContext *ctx)
5693 {
5694 #if defined(CONFIG_USER_ONLY)
5695     GEN_PRIV(ctx);
5696 #else
5697     TCGv EA, val;
5698 
5699     CHK_SV(ctx);
5700     gen_set_access_type(ctx, ACCESS_CACHE);
5701     EA = tcg_temp_new();
5702     gen_addr_reg_index(ctx, EA);
5703     val = tcg_temp_new();
5704     gen_qemu_ld32u(ctx, val, EA);
5705     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5706 #endif /* defined(CONFIG_USER_ONLY) */
5707 }
5708 
5709 /* icbt */
5710 static void gen_icbt_40x(DisasContext *ctx)
5711 {
5712     /*
5713      * interpreted as no-op
5714      * XXX: specification say this is treated as a load by the MMU but
5715      *      does not generate any exception
5716      */
5717 }
5718 
5719 /* iccci */
5720 static void gen_iccci(DisasContext *ctx)
5721 {
5722     CHK_SV(ctx);
5723     /* interpreted as no-op */
5724 }
5725 
5726 /* icread */
5727 static void gen_icread(DisasContext *ctx)
5728 {
5729     CHK_SV(ctx);
5730     /* interpreted as no-op */
5731 }
5732 
5733 /* rfci (supervisor only) */
5734 static void gen_rfci_40x(DisasContext *ctx)
5735 {
5736 #if defined(CONFIG_USER_ONLY)
5737     GEN_PRIV(ctx);
5738 #else
5739     CHK_SV(ctx);
5740     /* Restore CPU state */
5741     gen_helper_40x_rfci(cpu_env);
5742     ctx->base.is_jmp = DISAS_EXIT;
5743 #endif /* defined(CONFIG_USER_ONLY) */
5744 }
5745 
5746 static void gen_rfci(DisasContext *ctx)
5747 {
5748 #if defined(CONFIG_USER_ONLY)
5749     GEN_PRIV(ctx);
5750 #else
5751     CHK_SV(ctx);
5752     /* Restore CPU state */
5753     gen_helper_rfci(cpu_env);
5754     ctx->base.is_jmp = DISAS_EXIT;
5755 #endif /* defined(CONFIG_USER_ONLY) */
5756 }
5757 
5758 /* BookE specific */
5759 
5760 /* XXX: not implemented on 440 ? */
5761 static void gen_rfdi(DisasContext *ctx)
5762 {
5763 #if defined(CONFIG_USER_ONLY)
5764     GEN_PRIV(ctx);
5765 #else
5766     CHK_SV(ctx);
5767     /* Restore CPU state */
5768     gen_helper_rfdi(cpu_env);
5769     ctx->base.is_jmp = DISAS_EXIT;
5770 #endif /* defined(CONFIG_USER_ONLY) */
5771 }
5772 
5773 /* XXX: not implemented on 440 ? */
5774 static void gen_rfmci(DisasContext *ctx)
5775 {
5776 #if defined(CONFIG_USER_ONLY)
5777     GEN_PRIV(ctx);
5778 #else
5779     CHK_SV(ctx);
5780     /* Restore CPU state */
5781     gen_helper_rfmci(cpu_env);
5782     ctx->base.is_jmp = DISAS_EXIT;
5783 #endif /* defined(CONFIG_USER_ONLY) */
5784 }
5785 
5786 /* TLB management - PowerPC 405 implementation */
5787 
5788 /* tlbre */
5789 static void gen_tlbre_40x(DisasContext *ctx)
5790 {
5791 #if defined(CONFIG_USER_ONLY)
5792     GEN_PRIV(ctx);
5793 #else
5794     CHK_SV(ctx);
5795     switch (rB(ctx->opcode)) {
5796     case 0:
5797         gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5798                                 cpu_gpr[rA(ctx->opcode)]);
5799         break;
5800     case 1:
5801         gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5802                                 cpu_gpr[rA(ctx->opcode)]);
5803         break;
5804     default:
5805         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5806         break;
5807     }
5808 #endif /* defined(CONFIG_USER_ONLY) */
5809 }
5810 
5811 /* tlbsx - tlbsx. */
5812 static void gen_tlbsx_40x(DisasContext *ctx)
5813 {
5814 #if defined(CONFIG_USER_ONLY)
5815     GEN_PRIV(ctx);
5816 #else
5817     TCGv t0;
5818 
5819     CHK_SV(ctx);
5820     t0 = tcg_temp_new();
5821     gen_addr_reg_index(ctx, t0);
5822     gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5823     if (Rc(ctx->opcode)) {
5824         TCGLabel *l1 = gen_new_label();
5825         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5826         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5827         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5828         gen_set_label(l1);
5829     }
5830 #endif /* defined(CONFIG_USER_ONLY) */
5831 }
5832 
5833 /* tlbwe */
5834 static void gen_tlbwe_40x(DisasContext *ctx)
5835 {
5836 #if defined(CONFIG_USER_ONLY)
5837     GEN_PRIV(ctx);
5838 #else
5839     CHK_SV(ctx);
5840 
5841     switch (rB(ctx->opcode)) {
5842     case 0:
5843         gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5844                                 cpu_gpr[rS(ctx->opcode)]);
5845         break;
5846     case 1:
5847         gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5848                                 cpu_gpr[rS(ctx->opcode)]);
5849         break;
5850     default:
5851         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5852         break;
5853     }
5854 #endif /* defined(CONFIG_USER_ONLY) */
5855 }
5856 
5857 /* TLB management - PowerPC 440 implementation */
5858 
5859 /* tlbre */
5860 static void gen_tlbre_440(DisasContext *ctx)
5861 {
5862 #if defined(CONFIG_USER_ONLY)
5863     GEN_PRIV(ctx);
5864 #else
5865     CHK_SV(ctx);
5866 
5867     switch (rB(ctx->opcode)) {
5868     case 0:
5869     case 1:
5870     case 2:
5871         {
5872             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5873             gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
5874                                  t0, cpu_gpr[rA(ctx->opcode)]);
5875         }
5876         break;
5877     default:
5878         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5879         break;
5880     }
5881 #endif /* defined(CONFIG_USER_ONLY) */
5882 }
5883 
5884 /* tlbsx - tlbsx. */
5885 static void gen_tlbsx_440(DisasContext *ctx)
5886 {
5887 #if defined(CONFIG_USER_ONLY)
5888     GEN_PRIV(ctx);
5889 #else
5890     TCGv t0;
5891 
5892     CHK_SV(ctx);
5893     t0 = tcg_temp_new();
5894     gen_addr_reg_index(ctx, t0);
5895     gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5896     if (Rc(ctx->opcode)) {
5897         TCGLabel *l1 = gen_new_label();
5898         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5899         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5900         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5901         gen_set_label(l1);
5902     }
5903 #endif /* defined(CONFIG_USER_ONLY) */
5904 }
5905 
5906 /* tlbwe */
5907 static void gen_tlbwe_440(DisasContext *ctx)
5908 {
5909 #if defined(CONFIG_USER_ONLY)
5910     GEN_PRIV(ctx);
5911 #else
5912     CHK_SV(ctx);
5913     switch (rB(ctx->opcode)) {
5914     case 0:
5915     case 1:
5916     case 2:
5917         {
5918             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5919             gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
5920                                  cpu_gpr[rS(ctx->opcode)]);
5921         }
5922         break;
5923     default:
5924         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5925         break;
5926     }
5927 #endif /* defined(CONFIG_USER_ONLY) */
5928 }
5929 
5930 /* TLB management - PowerPC BookE 2.06 implementation */
5931 
5932 /* tlbre */
5933 static void gen_tlbre_booke206(DisasContext *ctx)
5934 {
5935  #if defined(CONFIG_USER_ONLY)
5936     GEN_PRIV(ctx);
5937 #else
5938    CHK_SV(ctx);
5939     gen_helper_booke206_tlbre(cpu_env);
5940 #endif /* defined(CONFIG_USER_ONLY) */
5941 }
5942 
5943 /* tlbsx - tlbsx. */
5944 static void gen_tlbsx_booke206(DisasContext *ctx)
5945 {
5946 #if defined(CONFIG_USER_ONLY)
5947     GEN_PRIV(ctx);
5948 #else
5949     TCGv t0;
5950 
5951     CHK_SV(ctx);
5952     if (rA(ctx->opcode)) {
5953         t0 = tcg_temp_new();
5954         tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5955     } else {
5956         t0 = cpu_gpr[rB(ctx->opcode)];
5957     }
5958     gen_helper_booke206_tlbsx(cpu_env, t0);
5959 #endif /* defined(CONFIG_USER_ONLY) */
5960 }
5961 
5962 /* tlbwe */
5963 static void gen_tlbwe_booke206(DisasContext *ctx)
5964 {
5965 #if defined(CONFIG_USER_ONLY)
5966     GEN_PRIV(ctx);
5967 #else
5968     CHK_SV(ctx);
5969     gen_helper_booke206_tlbwe(cpu_env);
5970 #endif /* defined(CONFIG_USER_ONLY) */
5971 }
5972 
5973 static void gen_tlbivax_booke206(DisasContext *ctx)
5974 {
5975 #if defined(CONFIG_USER_ONLY)
5976     GEN_PRIV(ctx);
5977 #else
5978     TCGv t0;
5979 
5980     CHK_SV(ctx);
5981     t0 = tcg_temp_new();
5982     gen_addr_reg_index(ctx, t0);
5983     gen_helper_booke206_tlbivax(cpu_env, t0);
5984 #endif /* defined(CONFIG_USER_ONLY) */
5985 }
5986 
5987 static void gen_tlbilx_booke206(DisasContext *ctx)
5988 {
5989 #if defined(CONFIG_USER_ONLY)
5990     GEN_PRIV(ctx);
5991 #else
5992     TCGv t0;
5993 
5994     CHK_SV(ctx);
5995     t0 = tcg_temp_new();
5996     gen_addr_reg_index(ctx, t0);
5997 
5998     switch ((ctx->opcode >> 21) & 0x3) {
5999     case 0:
6000         gen_helper_booke206_tlbilx0(cpu_env, t0);
6001         break;
6002     case 1:
6003         gen_helper_booke206_tlbilx1(cpu_env, t0);
6004         break;
6005     case 3:
6006         gen_helper_booke206_tlbilx3(cpu_env, t0);
6007         break;
6008     default:
6009         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6010         break;
6011     }
6012 #endif /* defined(CONFIG_USER_ONLY) */
6013 }
6014 
6015 /* wrtee */
6016 static void gen_wrtee(DisasContext *ctx)
6017 {
6018 #if defined(CONFIG_USER_ONLY)
6019     GEN_PRIV(ctx);
6020 #else
6021     TCGv t0;
6022 
6023     CHK_SV(ctx);
6024     t0 = tcg_temp_new();
6025     tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
6026     tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6027     tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
6028     gen_ppc_maybe_interrupt(ctx);
6029     /*
6030      * Stop translation to have a chance to raise an exception if we
6031      * just set msr_ee to 1
6032      */
6033     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6034 #endif /* defined(CONFIG_USER_ONLY) */
6035 }
6036 
6037 /* wrteei */
6038 static void gen_wrteei(DisasContext *ctx)
6039 {
6040 #if defined(CONFIG_USER_ONLY)
6041     GEN_PRIV(ctx);
6042 #else
6043     CHK_SV(ctx);
6044     if (ctx->opcode & 0x00008000) {
6045         tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
6046         gen_ppc_maybe_interrupt(ctx);
6047         /* Stop translation to have a chance to raise an exception */
6048         ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6049     } else {
6050         tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6051     }
6052 #endif /* defined(CONFIG_USER_ONLY) */
6053 }
6054 
6055 /* PowerPC 440 specific instructions */
6056 
6057 /* dlmzb */
6058 static void gen_dlmzb(DisasContext *ctx)
6059 {
6060     TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
6061     gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
6062                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
6063 }
6064 
6065 /* mbar replaces eieio on 440 */
6066 static void gen_mbar(DisasContext *ctx)
6067 {
6068     /* interpreted as no-op */
6069 }
6070 
6071 /* msync replaces sync on 440 */
6072 static void gen_msync_4xx(DisasContext *ctx)
6073 {
6074     /* Only e500 seems to treat reserved bits as invalid */
6075     if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6076         (ctx->opcode & 0x03FFF801)) {
6077         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6078     }
6079     /* otherwise interpreted as no-op */
6080 }
6081 
6082 /* icbt */
6083 static void gen_icbt_440(DisasContext *ctx)
6084 {
6085     /*
6086      * interpreted as no-op
6087      * XXX: specification say this is treated as a load by the MMU but
6088      *      does not generate any exception
6089      */
6090 }
6091 
6092 #if defined(TARGET_PPC64)
6093 static void gen_maddld(DisasContext *ctx)
6094 {
6095     TCGv_i64 t1 = tcg_temp_new_i64();
6096 
6097     tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6098     tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6099 }
6100 
6101 /* maddhd maddhdu */
6102 static void gen_maddhd_maddhdu(DisasContext *ctx)
6103 {
6104     TCGv_i64 lo = tcg_temp_new_i64();
6105     TCGv_i64 hi = tcg_temp_new_i64();
6106     TCGv_i64 t1 = tcg_temp_new_i64();
6107 
6108     if (Rc(ctx->opcode)) {
6109         tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6110                           cpu_gpr[rB(ctx->opcode)]);
6111         tcg_gen_movi_i64(t1, 0);
6112     } else {
6113         tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6114                           cpu_gpr[rB(ctx->opcode)]);
6115         tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6116     }
6117     tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6118                      cpu_gpr[rC(ctx->opcode)], t1);
6119 }
6120 #endif /* defined(TARGET_PPC64) */
6121 
6122 static void gen_tbegin(DisasContext *ctx)
6123 {
6124     if (unlikely(!ctx->tm_enabled)) {
6125         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6126         return;
6127     }
6128     gen_helper_tbegin(cpu_env);
6129 }
6130 
6131 #define GEN_TM_NOOP(name)                                      \
6132 static inline void gen_##name(DisasContext *ctx)               \
6133 {                                                              \
6134     if (unlikely(!ctx->tm_enabled)) {                          \
6135         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6136         return;                                                \
6137     }                                                          \
6138     /*                                                         \
6139      * Because tbegin always fails in QEMU, these user         \
6140      * space instructions all have a simple implementation:    \
6141      *                                                         \
6142      *     CR[0] = 0b0 || MSR[TS] || 0b0                       \
6143      *           = 0b0 || 0b00    || 0b0                       \
6144      */                                                        \
6145     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6146 }
6147 
6148 GEN_TM_NOOP(tend);
6149 GEN_TM_NOOP(tabort);
6150 GEN_TM_NOOP(tabortwc);
6151 GEN_TM_NOOP(tabortwci);
6152 GEN_TM_NOOP(tabortdc);
6153 GEN_TM_NOOP(tabortdci);
6154 GEN_TM_NOOP(tsr);
6155 
6156 static inline void gen_cp_abort(DisasContext *ctx)
6157 {
6158     /* Do Nothing */
6159 }
6160 
6161 #define GEN_CP_PASTE_NOOP(name)                           \
6162 static inline void gen_##name(DisasContext *ctx)          \
6163 {                                                         \
6164     /*                                                    \
6165      * Generate invalid exception until we have an        \
6166      * implementation of the copy paste facility          \
6167      */                                                   \
6168     gen_invalid(ctx);                                     \
6169 }
6170 
6171 GEN_CP_PASTE_NOOP(copy)
6172 GEN_CP_PASTE_NOOP(paste)
6173 
6174 static void gen_tcheck(DisasContext *ctx)
6175 {
6176     if (unlikely(!ctx->tm_enabled)) {
6177         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6178         return;
6179     }
6180     /*
6181      * Because tbegin always fails, the tcheck implementation is
6182      * simple:
6183      *
6184      * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6185      *         = 0b1 || 0b00 || 0b0
6186      */
6187     tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6188 }
6189 
6190 #if defined(CONFIG_USER_ONLY)
6191 #define GEN_TM_PRIV_NOOP(name)                                 \
6192 static inline void gen_##name(DisasContext *ctx)               \
6193 {                                                              \
6194     gen_priv_opc(ctx);                                         \
6195 }
6196 
6197 #else
6198 
6199 #define GEN_TM_PRIV_NOOP(name)                                 \
6200 static inline void gen_##name(DisasContext *ctx)               \
6201 {                                                              \
6202     CHK_SV(ctx);                                               \
6203     if (unlikely(!ctx->tm_enabled)) {                          \
6204         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6205         return;                                                \
6206     }                                                          \
6207     /*                                                         \
6208      * Because tbegin always fails, the implementation is      \
6209      * simple:                                                 \
6210      *                                                         \
6211      *   CR[0] = 0b0 || MSR[TS] || 0b0                         \
6212      *         = 0b0 || 0b00 | 0b0                             \
6213      */                                                        \
6214     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6215 }
6216 
6217 #endif
6218 
6219 GEN_TM_PRIV_NOOP(treclaim);
6220 GEN_TM_PRIV_NOOP(trechkpt);
6221 
6222 static inline void get_fpr(TCGv_i64 dst, int regno)
6223 {
6224     tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6225 }
6226 
6227 static inline void set_fpr(int regno, TCGv_i64 src)
6228 {
6229     tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6230     /*
6231      * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6232      * corresponding to the target FPR was undefined. However,
6233      * most (if not all) real hardware were setting the result to 0.
6234      * Starting at ISA v3.1, the result for doubleword 1 is now defined
6235      * to be 0.
6236      */
6237     tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6238 }
6239 
6240 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6241 {
6242     tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6243 }
6244 
6245 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6246 {
6247     tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6248 }
6249 
6250 /*
6251  * Helpers for decodetree used by !function for decoding arguments.
6252  */
6253 static int times_2(DisasContext *ctx, int x)
6254 {
6255     return x * 2;
6256 }
6257 
6258 static int times_4(DisasContext *ctx, int x)
6259 {
6260     return x * 4;
6261 }
6262 
6263 static int times_16(DisasContext *ctx, int x)
6264 {
6265     return x * 16;
6266 }
6267 
6268 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6269 {
6270     return deposit64(0xfffffffffffffe00, 3, 6, x);
6271 }
6272 
6273 /*
6274  * Helpers for trans_* functions to check for specific insns flags.
6275  * Use token pasting to ensure that we use the proper flag with the
6276  * proper variable.
6277  */
6278 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6279     do {                                                \
6280         if (((CTX)->insns_flags & PPC_##NAME) == 0) {   \
6281             return false;                               \
6282         }                                               \
6283     } while (0)
6284 
6285 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6286     do {                                                \
6287         if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6288             return false;                               \
6289         }                                               \
6290     } while (0)
6291 
6292 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6293 #if TARGET_LONG_BITS == 32
6294 # define REQUIRE_64BIT(CTX)  return false
6295 #else
6296 # define REQUIRE_64BIT(CTX)  REQUIRE_INSNS_FLAGS(CTX, 64B)
6297 #endif
6298 
6299 #define REQUIRE_VECTOR(CTX)                             \
6300     do {                                                \
6301         if (unlikely(!(CTX)->altivec_enabled)) {        \
6302             gen_exception((CTX), POWERPC_EXCP_VPU);     \
6303             return true;                                \
6304         }                                               \
6305     } while (0)
6306 
6307 #define REQUIRE_VSX(CTX)                                \
6308     do {                                                \
6309         if (unlikely(!(CTX)->vsx_enabled)) {            \
6310             gen_exception((CTX), POWERPC_EXCP_VSXU);    \
6311             return true;                                \
6312         }                                               \
6313     } while (0)
6314 
6315 #define REQUIRE_FPU(ctx)                                \
6316     do {                                                \
6317         if (unlikely(!(ctx)->fpu_enabled)) {            \
6318             gen_exception((ctx), POWERPC_EXCP_FPU);     \
6319             return true;                                \
6320         }                                               \
6321     } while (0)
6322 
6323 #if !defined(CONFIG_USER_ONLY)
6324 #define REQUIRE_SV(CTX)             \
6325     do {                            \
6326         if (unlikely((CTX)->pr)) {  \
6327             gen_priv_opc(CTX);      \
6328             return true;            \
6329         }                           \
6330     } while (0)
6331 
6332 #define REQUIRE_HV(CTX)                             \
6333     do {                                            \
6334         if (unlikely((CTX)->pr || !(CTX)->hv)) {    \
6335             gen_priv_opc(CTX);                      \
6336             return true;                            \
6337         }                                           \
6338     } while (0)
6339 #else
6340 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6341 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6342 #endif
6343 
6344 /*
6345  * Helpers for implementing sets of trans_* functions.
6346  * Defer the implementation of NAME to FUNC, with optional extra arguments.
6347  */
6348 #define TRANS(NAME, FUNC, ...) \
6349     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6350     { return FUNC(ctx, a, __VA_ARGS__); }
6351 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6352     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6353     {                                                          \
6354         REQUIRE_INSNS_FLAGS(ctx, FLAGS);                       \
6355         return FUNC(ctx, a, __VA_ARGS__);                      \
6356     }
6357 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6358     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6359     {                                                          \
6360         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6361         return FUNC(ctx, a, __VA_ARGS__);                      \
6362     }
6363 
6364 #define TRANS64(NAME, FUNC, ...) \
6365     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6366     { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6367 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6368     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6369     {                                                          \
6370         REQUIRE_64BIT(ctx);                                    \
6371         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6372         return FUNC(ctx, a, __VA_ARGS__);                      \
6373     }
6374 
6375 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6376 
6377 
6378 #include "decode-insn32.c.inc"
6379 #include "decode-insn64.c.inc"
6380 #include "power8-pmu-regs.c.inc"
6381 
6382 /*
6383  * Incorporate CIA into the constant when R=1.
6384  * Validate that when R=1, RA=0.
6385  */
6386 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6387 {
6388     d->rt = a->rt;
6389     d->ra = a->ra;
6390     d->si = a->si;
6391     if (a->r) {
6392         if (unlikely(a->ra != 0)) {
6393             gen_invalid(ctx);
6394             return false;
6395         }
6396         d->si += ctx->cia;
6397     }
6398     return true;
6399 }
6400 
6401 #include "translate/fixedpoint-impl.c.inc"
6402 
6403 #include "translate/fp-impl.c.inc"
6404 
6405 #include "translate/vmx-impl.c.inc"
6406 
6407 #include "translate/vsx-impl.c.inc"
6408 
6409 #include "translate/dfp-impl.c.inc"
6410 
6411 #include "translate/spe-impl.c.inc"
6412 
6413 #include "translate/branch-impl.c.inc"
6414 
6415 #include "translate/processor-ctrl-impl.c.inc"
6416 
6417 #include "translate/storage-ctrl-impl.c.inc"
6418 
6419 /* Handles lfdp */
6420 static void gen_dform39(DisasContext *ctx)
6421 {
6422     if ((ctx->opcode & 0x3) == 0) {
6423         if (ctx->insns_flags2 & PPC2_ISA205) {
6424             return gen_lfdp(ctx);
6425         }
6426     }
6427     return gen_invalid(ctx);
6428 }
6429 
6430 /* Handles stfdp */
6431 static void gen_dform3D(DisasContext *ctx)
6432 {
6433     if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6434         /* stfdp */
6435         if (ctx->insns_flags2 & PPC2_ISA205) {
6436             return gen_stfdp(ctx);
6437         }
6438     }
6439     return gen_invalid(ctx);
6440 }
6441 
6442 #if defined(TARGET_PPC64)
6443 /* brd */
6444 static void gen_brd(DisasContext *ctx)
6445 {
6446     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6447 }
6448 
6449 /* brw */
6450 static void gen_brw(DisasContext *ctx)
6451 {
6452     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6453     tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6454 
6455 }
6456 
6457 /* brh */
6458 static void gen_brh(DisasContext *ctx)
6459 {
6460     TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6461     TCGv_i64 t1 = tcg_temp_new_i64();
6462     TCGv_i64 t2 = tcg_temp_new_i64();
6463 
6464     tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6465     tcg_gen_and_i64(t2, t1, mask);
6466     tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6467     tcg_gen_shli_i64(t1, t1, 8);
6468     tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6469 }
6470 #endif
6471 
6472 static opcode_t opcodes[] = {
6473 #if defined(TARGET_PPC64)
6474 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6475 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6476 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6477 #endif
6478 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6479 #if defined(TARGET_PPC64)
6480 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6481 #endif
6482 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6483 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6484 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6485 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6486 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6487 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6488 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6489 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6490 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6491 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6492 #if defined(TARGET_PPC64)
6493 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6494 #endif
6495 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6496 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6497 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6498 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6499 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6500 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6501 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6502 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6503 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6504 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6505 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6506 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6507 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6508 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6509 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6510 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6511 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6512 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6513 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6514 #if defined(TARGET_PPC64)
6515 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6516 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6517 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6518 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6519 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6520 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6521 #endif
6522 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6523 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6524 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6525 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6526 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6527 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6528 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6529 #if defined(TARGET_PPC64)
6530 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6531 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6532 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6533 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6534 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6535 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6536                PPC_NONE, PPC2_ISA300),
6537 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6538                PPC_NONE, PPC2_ISA300),
6539 #endif
6540 /* handles lfdp, lxsd, lxssp */
6541 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6542 /* handles stfdp, stxsd, stxssp */
6543 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6544 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6545 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6546 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6547 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6548 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6549 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6550 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6551 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6552 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6553 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6554 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6555 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6556 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6557 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6558 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6559 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6560 #if defined(TARGET_PPC64)
6561 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6562 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6563 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6564 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6565 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6566 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6567 #endif
6568 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6569 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6570 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6571 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6572 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6573 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6574 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6575 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6576 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6577 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6578 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6579 #if defined(TARGET_PPC64)
6580 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6581 #if !defined(CONFIG_USER_ONLY)
6582 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6583 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6584 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6585 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6586 #endif
6587 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6588 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6589 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6590 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6591 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6592 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6593 #endif
6594 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6595 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6596 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6597 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6598 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6599 #if defined(TARGET_PPC64)
6600 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6601 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6602 #endif
6603 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6604 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6605 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6606 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6607 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6608 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6609 #if defined(TARGET_PPC64)
6610 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6611 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6612 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6613 #endif
6614 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6615 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6616 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6617 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6618 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6619 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6620 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6621 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6622 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6623 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6624 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6625 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6626 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6627 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6628 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6629 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6630 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6631 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6632 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6633 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6634 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6635 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6636 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6637 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6638 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6639 #if defined(TARGET_PPC64)
6640 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6641 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6642              PPC_SEGMENT_64B),
6643 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6644 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6645              PPC_SEGMENT_64B),
6646 #endif
6647 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6648 /*
6649  * XXX Those instructions will need to be handled differently for
6650  * different ISA versions
6651  */
6652 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6653 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6654 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6655 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6656 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6657 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6658 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6659 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6660 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6661 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6662 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6663 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6664 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6665 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6666 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6667 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6668 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6669 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6670 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6671 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6672 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6673 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6674 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6675 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6676 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6677 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6678 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6679                PPC_NONE, PPC2_BOOKE206),
6680 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6681                PPC_NONE, PPC2_BOOKE206),
6682 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6683                PPC_NONE, PPC2_BOOKE206),
6684 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6685                PPC_NONE, PPC2_BOOKE206),
6686 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6687                PPC_NONE, PPC2_BOOKE206),
6688 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6689 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6690 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6691 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6692               PPC_BOOKE, PPC2_BOOKE206),
6693 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6694 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6695                PPC_BOOKE, PPC2_BOOKE206),
6696 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6697              PPC_440_SPEC),
6698 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6699 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6700 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6701 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6702 #if defined(TARGET_PPC64)
6703 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6704               PPC2_ISA300),
6705 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6706 #endif
6707 
6708 #undef GEN_INT_ARITH_ADD
6709 #undef GEN_INT_ARITH_ADD_CONST
6710 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
6711 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6712 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
6713                                 add_ca, compute_ca, compute_ov)               \
6714 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6715 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6716 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6717 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6718 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6719 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6720 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6721 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6722 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6723 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6724 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6725 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6726 
6727 #undef GEN_INT_ARITH_DIVW
6728 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
6729 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6730 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6731 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6732 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6733 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6734 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6735 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6736 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6737 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6738 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6739 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6740 
6741 #if defined(TARGET_PPC64)
6742 #undef GEN_INT_ARITH_DIVD
6743 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
6744 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6745 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6746 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6747 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6748 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6749 
6750 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6751 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6752 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6753 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6754 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6755 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6756 
6757 #undef GEN_INT_ARITH_MUL_HELPER
6758 #define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
6759 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6760 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6761 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6762 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6763 #endif
6764 
6765 #undef GEN_INT_ARITH_SUBF
6766 #undef GEN_INT_ARITH_SUBF_CONST
6767 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
6768 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6769 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
6770                                 add_ca, compute_ca, compute_ov)               \
6771 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6772 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6773 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6774 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6775 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6776 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6777 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6778 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6779 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6780 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6781 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6782 
6783 #undef GEN_LOGICAL1
6784 #undef GEN_LOGICAL2
6785 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
6786 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6787 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
6788 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6789 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6790 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6791 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6792 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6793 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6794 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6795 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6796 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6797 #if defined(TARGET_PPC64)
6798 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6799 #endif
6800 
6801 #if defined(TARGET_PPC64)
6802 #undef GEN_PPC64_R2
6803 #undef GEN_PPC64_R4
6804 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
6805 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6806 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6807              PPC_64B)
6808 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
6809 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6810 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000,   \
6811              PPC_64B),                                                        \
6812 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6813              PPC_64B),                                                        \
6814 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
6815              PPC_64B)
6816 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6817 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6818 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6819 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6820 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6821 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6822 #endif
6823 
6824 #undef GEN_LDX_E
6825 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
6826 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6827 
6828 #if defined(TARGET_PPC64)
6829 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6830 
6831 /* HV/P7 and later only */
6832 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6833 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6834 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6835 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6836 #endif
6837 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6838 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6839 
6840 /* External PID based load */
6841 #undef GEN_LDEPX
6842 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
6843 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6844               0x00000001, PPC_NONE, PPC2_BOOKE206),
6845 
6846 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6847 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6848 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6849 #if defined(TARGET_PPC64)
6850 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6851 #endif
6852 
6853 #undef GEN_STX_E
6854 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
6855 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6856 
6857 #if defined(TARGET_PPC64)
6858 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6859 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6860 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6861 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6862 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6863 #endif
6864 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6865 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6866 
6867 #undef GEN_STEPX
6868 #define GEN_STEPX(name, ldop, opc2, opc3)                                     \
6869 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6870               0x00000001, PPC_NONE, PPC2_BOOKE206),
6871 
6872 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6873 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6874 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6875 #if defined(TARGET_PPC64)
6876 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6877 #endif
6878 
6879 #undef GEN_CRLOGIC
6880 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
6881 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6882 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6883 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6884 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6885 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6886 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6887 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6888 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6889 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6890 
6891 #undef GEN_MAC_HANDLER
6892 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
6893 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6894 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6895 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6896 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6897 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6898 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6899 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6900 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6901 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6902 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6903 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6904 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6905 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6906 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6907 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6908 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6909 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6910 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6911 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6912 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6913 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6914 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6915 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6916 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6917 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6918 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6919 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6920 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6921 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6922 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6923 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6924 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6925 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6926 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6927 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6928 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6929 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6930 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6931 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6932 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6933 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6934 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6935 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6936 
6937 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6938                PPC_NONE, PPC2_TM),
6939 GEN_HANDLER2_E(tend,   "tend",   0x1F, 0x0E, 0x15, 0x01FFF800, \
6940                PPC_NONE, PPC2_TM),
6941 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6942                PPC_NONE, PPC2_TM),
6943 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6944                PPC_NONE, PPC2_TM),
6945 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6946                PPC_NONE, PPC2_TM),
6947 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6948                PPC_NONE, PPC2_TM),
6949 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6950                PPC_NONE, PPC2_TM),
6951 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6952                PPC_NONE, PPC2_TM),
6953 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6954                PPC_NONE, PPC2_TM),
6955 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6956                PPC_NONE, PPC2_TM),
6957 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
6958                PPC_NONE, PPC2_TM),
6959 
6960 #include "translate/fp-ops.c.inc"
6961 
6962 #include "translate/vmx-ops.c.inc"
6963 
6964 #include "translate/vsx-ops.c.inc"
6965 
6966 #include "translate/spe-ops.c.inc"
6967 };
6968 
6969 /*****************************************************************************/
6970 /* Opcode types */
6971 enum {
6972     PPC_DIRECT   = 0, /* Opcode routine        */
6973     PPC_INDIRECT = 1, /* Indirect opcode table */
6974 };
6975 
6976 #define PPC_OPCODE_MASK 0x3
6977 
6978 static inline int is_indirect_opcode(void *handler)
6979 {
6980     return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
6981 }
6982 
6983 static inline opc_handler_t **ind_table(void *handler)
6984 {
6985     return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
6986 }
6987 
6988 /* Instruction table creation */
6989 /* Opcodes tables creation */
6990 static void fill_new_table(opc_handler_t **table, int len)
6991 {
6992     int i;
6993 
6994     for (i = 0; i < len; i++) {
6995         table[i] = &invalid_handler;
6996     }
6997 }
6998 
6999 static int create_new_table(opc_handler_t **table, unsigned char idx)
7000 {
7001     opc_handler_t **tmp;
7002 
7003     tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
7004     fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
7005     table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
7006 
7007     return 0;
7008 }
7009 
7010 static int insert_in_table(opc_handler_t **table, unsigned char idx,
7011                             opc_handler_t *handler)
7012 {
7013     if (table[idx] != &invalid_handler) {
7014         return -1;
7015     }
7016     table[idx] = handler;
7017 
7018     return 0;
7019 }
7020 
7021 static int register_direct_insn(opc_handler_t **ppc_opcodes,
7022                                 unsigned char idx, opc_handler_t *handler)
7023 {
7024     if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
7025         printf("*** ERROR: opcode %02x already assigned in main "
7026                "opcode table\n", idx);
7027         return -1;
7028     }
7029 
7030     return 0;
7031 }
7032 
7033 static int register_ind_in_table(opc_handler_t **table,
7034                                  unsigned char idx1, unsigned char idx2,
7035                                  opc_handler_t *handler)
7036 {
7037     if (table[idx1] == &invalid_handler) {
7038         if (create_new_table(table, idx1) < 0) {
7039             printf("*** ERROR: unable to create indirect table "
7040                    "idx=%02x\n", idx1);
7041             return -1;
7042         }
7043     } else {
7044         if (!is_indirect_opcode(table[idx1])) {
7045             printf("*** ERROR: idx %02x already assigned to a direct "
7046                    "opcode\n", idx1);
7047             return -1;
7048         }
7049     }
7050     if (handler != NULL &&
7051         insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
7052         printf("*** ERROR: opcode %02x already assigned in "
7053                "opcode table %02x\n", idx2, idx1);
7054         return -1;
7055     }
7056 
7057     return 0;
7058 }
7059 
7060 static int register_ind_insn(opc_handler_t **ppc_opcodes,
7061                              unsigned char idx1, unsigned char idx2,
7062                              opc_handler_t *handler)
7063 {
7064     return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
7065 }
7066 
7067 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7068                                 unsigned char idx1, unsigned char idx2,
7069                                 unsigned char idx3, opc_handler_t *handler)
7070 {
7071     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7072         printf("*** ERROR: unable to join indirect table idx "
7073                "[%02x-%02x]\n", idx1, idx2);
7074         return -1;
7075     }
7076     if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7077                               handler) < 0) {
7078         printf("*** ERROR: unable to insert opcode "
7079                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7080         return -1;
7081     }
7082 
7083     return 0;
7084 }
7085 
7086 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7087                                  unsigned char idx1, unsigned char idx2,
7088                                  unsigned char idx3, unsigned char idx4,
7089                                  opc_handler_t *handler)
7090 {
7091     opc_handler_t **table;
7092 
7093     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7094         printf("*** ERROR: unable to join indirect table idx "
7095                "[%02x-%02x]\n", idx1, idx2);
7096         return -1;
7097     }
7098     table = ind_table(ppc_opcodes[idx1]);
7099     if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7100         printf("*** ERROR: unable to join 2nd-level indirect table idx "
7101                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7102         return -1;
7103     }
7104     table = ind_table(table[idx2]);
7105     if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7106         printf("*** ERROR: unable to insert opcode "
7107                "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7108         return -1;
7109     }
7110     return 0;
7111 }
7112 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7113 {
7114     if (insn->opc2 != 0xFF) {
7115         if (insn->opc3 != 0xFF) {
7116             if (insn->opc4 != 0xFF) {
7117                 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7118                                           insn->opc3, insn->opc4,
7119                                           &insn->handler) < 0) {
7120                     return -1;
7121                 }
7122             } else {
7123                 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7124                                          insn->opc3, &insn->handler) < 0) {
7125                     return -1;
7126                 }
7127             }
7128         } else {
7129             if (register_ind_insn(ppc_opcodes, insn->opc1,
7130                                   insn->opc2, &insn->handler) < 0) {
7131                 return -1;
7132             }
7133         }
7134     } else {
7135         if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7136             return -1;
7137         }
7138     }
7139 
7140     return 0;
7141 }
7142 
7143 static int test_opcode_table(opc_handler_t **table, int len)
7144 {
7145     int i, count, tmp;
7146 
7147     for (i = 0, count = 0; i < len; i++) {
7148         /* Consistency fixup */
7149         if (table[i] == NULL) {
7150             table[i] = &invalid_handler;
7151         }
7152         if (table[i] != &invalid_handler) {
7153             if (is_indirect_opcode(table[i])) {
7154                 tmp = test_opcode_table(ind_table(table[i]),
7155                     PPC_CPU_INDIRECT_OPCODES_LEN);
7156                 if (tmp == 0) {
7157                     free(table[i]);
7158                     table[i] = &invalid_handler;
7159                 } else {
7160                     count++;
7161                 }
7162             } else {
7163                 count++;
7164             }
7165         }
7166     }
7167 
7168     return count;
7169 }
7170 
7171 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7172 {
7173     if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7174         printf("*** WARNING: no opcode defined !\n");
7175     }
7176 }
7177 
7178 /*****************************************************************************/
7179 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7180 {
7181     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7182     opcode_t *opc;
7183 
7184     fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7185     for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7186         if (((opc->handler.type & pcc->insns_flags) != 0) ||
7187             ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7188             if (register_insn(cpu->opcodes, opc) < 0) {
7189                 error_setg(errp, "ERROR initializing PowerPC instruction "
7190                            "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7191                            opc->opc3);
7192                 return;
7193             }
7194         }
7195     }
7196     fix_opcode_tables(cpu->opcodes);
7197     fflush(stdout);
7198     fflush(stderr);
7199 }
7200 
7201 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7202 {
7203     opc_handler_t **table, **table_2;
7204     int i, j, k;
7205 
7206     for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7207         if (cpu->opcodes[i] == &invalid_handler) {
7208             continue;
7209         }
7210         if (is_indirect_opcode(cpu->opcodes[i])) {
7211             table = ind_table(cpu->opcodes[i]);
7212             for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7213                 if (table[j] == &invalid_handler) {
7214                     continue;
7215                 }
7216                 if (is_indirect_opcode(table[j])) {
7217                     table_2 = ind_table(table[j]);
7218                     for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7219                         if (table_2[k] != &invalid_handler &&
7220                             is_indirect_opcode(table_2[k])) {
7221                             g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7222                                                      ~PPC_INDIRECT));
7223                         }
7224                     }
7225                     g_free((opc_handler_t *)((uintptr_t)table[j] &
7226                                              ~PPC_INDIRECT));
7227                 }
7228             }
7229             g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7230                 ~PPC_INDIRECT));
7231         }
7232     }
7233 }
7234 
7235 int ppc_fixup_cpu(PowerPCCPU *cpu)
7236 {
7237     CPUPPCState *env = &cpu->env;
7238 
7239     /*
7240      * TCG doesn't (yet) emulate some groups of instructions that are
7241      * implemented on some otherwise supported CPUs (e.g. VSX and
7242      * decimal floating point instructions on POWER7).  We remove
7243      * unsupported instruction groups from the cpu state's instruction
7244      * masks and hope the guest can cope.  For at least the pseries
7245      * machine, the unavailability of these instructions can be
7246      * advertised to the guest via the device tree.
7247      */
7248     if ((env->insns_flags & ~PPC_TCG_INSNS)
7249         || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7250         warn_report("Disabling some instructions which are not "
7251                     "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7252                     env->insns_flags & ~PPC_TCG_INSNS,
7253                     env->insns_flags2 & ~PPC_TCG_INSNS2);
7254     }
7255     env->insns_flags &= PPC_TCG_INSNS;
7256     env->insns_flags2 &= PPC_TCG_INSNS2;
7257     return 0;
7258 }
7259 
7260 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7261 {
7262     opc_handler_t **table, *handler;
7263     uint32_t inval;
7264 
7265     ctx->opcode = insn;
7266 
7267     LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7268               insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7269               ctx->le_mode ? "little" : "big");
7270 
7271     table = cpu->opcodes;
7272     handler = table[opc1(insn)];
7273     if (is_indirect_opcode(handler)) {
7274         table = ind_table(handler);
7275         handler = table[opc2(insn)];
7276         if (is_indirect_opcode(handler)) {
7277             table = ind_table(handler);
7278             handler = table[opc3(insn)];
7279             if (is_indirect_opcode(handler)) {
7280                 table = ind_table(handler);
7281                 handler = table[opc4(insn)];
7282             }
7283         }
7284     }
7285 
7286     /* Is opcode *REALLY* valid ? */
7287     if (unlikely(handler->handler == &gen_invalid)) {
7288         qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7289                       "%02x - %02x - %02x - %02x (%08x) "
7290                       TARGET_FMT_lx "\n",
7291                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7292                       insn, ctx->cia);
7293         return false;
7294     }
7295 
7296     if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7297                  && Rc(insn))) {
7298         inval = handler->inval2;
7299     } else {
7300         inval = handler->inval1;
7301     }
7302 
7303     if (unlikely((insn & inval) != 0)) {
7304         qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7305                       "%02x - %02x - %02x - %02x (%08x) "
7306                       TARGET_FMT_lx "\n", insn & inval,
7307                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7308                       insn, ctx->cia);
7309         return false;
7310     }
7311 
7312     handler->handler(ctx);
7313     return true;
7314 }
7315 
7316 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7317 {
7318     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7319     CPUPPCState *env = cs->env_ptr;
7320     uint32_t hflags = ctx->base.tb->flags;
7321 
7322     ctx->spr_cb = env->spr_cb;
7323     ctx->pr = (hflags >> HFLAGS_PR) & 1;
7324     ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7325     ctx->dr = (hflags >> HFLAGS_DR) & 1;
7326     ctx->hv = (hflags >> HFLAGS_HV) & 1;
7327     ctx->insns_flags = env->insns_flags;
7328     ctx->insns_flags2 = env->insns_flags2;
7329     ctx->access_type = -1;
7330     ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7331     ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7332     ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7333     ctx->flags = env->flags;
7334 #if defined(TARGET_PPC64)
7335     ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7336     ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7337 #endif
7338     ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7339         || env->mmu_model & POWERPC_MMU_64;
7340 
7341     ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7342     ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7343     ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7344     ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7345     ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7346     ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7347     ctx->hr = (hflags >> HFLAGS_HR) & 1;
7348     ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7349     ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7350     ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7351     ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7352     ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7353 
7354     ctx->singlestep_enabled = 0;
7355     if ((hflags >> HFLAGS_SE) & 1) {
7356         ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7357         ctx->base.max_insns = 1;
7358     }
7359     if ((hflags >> HFLAGS_BE) & 1) {
7360         ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7361     }
7362 }
7363 
7364 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7365 {
7366 }
7367 
7368 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7369 {
7370     tcg_gen_insn_start(dcbase->pc_next);
7371 }
7372 
7373 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7374 {
7375     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7376     return opc1(insn) == 1;
7377 }
7378 
7379 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7380 {
7381     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7382     PowerPCCPU *cpu = POWERPC_CPU(cs);
7383     CPUPPCState *env = cs->env_ptr;
7384     target_ulong pc;
7385     uint32_t insn;
7386     bool ok;
7387 
7388     LOG_DISAS("----------------\n");
7389     LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7390               ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7391 
7392     ctx->cia = pc = ctx->base.pc_next;
7393     insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7394     ctx->base.pc_next = pc += 4;
7395 
7396     if (!is_prefix_insn(ctx, insn)) {
7397         ok = (decode_insn32(ctx, insn) ||
7398               decode_legacy(cpu, ctx, insn));
7399     } else if ((pc & 63) == 0) {
7400         /*
7401          * Power v3.1, section 1.9 Exceptions:
7402          * attempt to execute a prefixed instruction that crosses a
7403          * 64-byte address boundary (system alignment error).
7404          */
7405         gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7406         ok = true;
7407     } else {
7408         uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7409                                              need_byteswap(ctx));
7410         ctx->base.pc_next = pc += 4;
7411         ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7412     }
7413     if (!ok) {
7414         gen_invalid(ctx);
7415     }
7416 
7417     /* End the TB when crossing a page boundary. */
7418     if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7419         ctx->base.is_jmp = DISAS_TOO_MANY;
7420     }
7421 }
7422 
7423 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7424 {
7425     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7426     DisasJumpType is_jmp = ctx->base.is_jmp;
7427     target_ulong nip = ctx->base.pc_next;
7428 
7429     if (is_jmp == DISAS_NORETURN) {
7430         /* We have already exited the TB. */
7431         return;
7432     }
7433 
7434     /* Honor single stepping. */
7435     if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) {
7436         bool rfi_type = false;
7437 
7438         switch (is_jmp) {
7439         case DISAS_TOO_MANY:
7440         case DISAS_EXIT_UPDATE:
7441         case DISAS_CHAIN_UPDATE:
7442             gen_update_nip(ctx, nip);
7443             break;
7444         case DISAS_EXIT:
7445         case DISAS_CHAIN:
7446             /*
7447              * This is a heuristic, to put it kindly. The rfi class of
7448              * instructions are among the few outside branches that change
7449              * NIP without taking an interrupt. Single step trace interrupts
7450              * do not fire on completion of these instructions.
7451              */
7452             rfi_type = true;
7453             break;
7454         default:
7455             g_assert_not_reached();
7456         }
7457 
7458         gen_debug_exception(ctx, rfi_type);
7459         return;
7460     }
7461 
7462     switch (is_jmp) {
7463     case DISAS_TOO_MANY:
7464         if (use_goto_tb(ctx, nip)) {
7465             pmu_count_insns(ctx);
7466             tcg_gen_goto_tb(0);
7467             gen_update_nip(ctx, nip);
7468             tcg_gen_exit_tb(ctx->base.tb, 0);
7469             break;
7470         }
7471         /* fall through */
7472     case DISAS_CHAIN_UPDATE:
7473         gen_update_nip(ctx, nip);
7474         /* fall through */
7475     case DISAS_CHAIN:
7476         /*
7477          * tcg_gen_lookup_and_goto_ptr will exit the TB if
7478          * CF_NO_GOTO_PTR is set. Count insns now.
7479          */
7480         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7481             pmu_count_insns(ctx);
7482         }
7483 
7484         tcg_gen_lookup_and_goto_ptr();
7485         break;
7486 
7487     case DISAS_EXIT_UPDATE:
7488         gen_update_nip(ctx, nip);
7489         /* fall through */
7490     case DISAS_EXIT:
7491         pmu_count_insns(ctx);
7492         tcg_gen_exit_tb(NULL, 0);
7493         break;
7494 
7495     default:
7496         g_assert_not_reached();
7497     }
7498 }
7499 
7500 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7501                              CPUState *cs, FILE *logfile)
7502 {
7503     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7504     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7505 }
7506 
7507 static const TranslatorOps ppc_tr_ops = {
7508     .init_disas_context = ppc_tr_init_disas_context,
7509     .tb_start           = ppc_tr_tb_start,
7510     .insn_start         = ppc_tr_insn_start,
7511     .translate_insn     = ppc_tr_translate_insn,
7512     .tb_stop            = ppc_tr_tb_stop,
7513     .disas_log          = ppc_tr_disas_log,
7514 };
7515 
7516 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7517                            target_ulong pc, void *host_pc)
7518 {
7519     DisasContext ctx;
7520 
7521     translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7522 }
7523