xref: /openbmc/qemu/target/ppc/translate.c (revision e9206163)
1 /*
2  *  PowerPC emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (C) 2011 Freescale Semiconductor, Inc.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 
33 #include "exec/translator.h"
34 #include "exec/log.h"
35 #include "qemu/atomic128.h"
36 #include "spr_common.h"
37 #include "power8-pmu.h"
38 
39 #include "qemu/qemu-print.h"
40 #include "qapi/error.h"
41 
42 #define HELPER_H "helper.h"
43 #include "exec/helper-info.c.inc"
44 #undef  HELPER_H
45 
46 #define CPU_SINGLE_STEP 0x1
47 #define CPU_BRANCH_STEP 0x2
48 
49 /* Include definitions for instructions classes and implementations flags */
50 /* #define PPC_DEBUG_DISAS */
51 
52 #ifdef PPC_DEBUG_DISAS
53 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
54 #else
55 #  define LOG_DISAS(...) do { } while (0)
56 #endif
57 /*****************************************************************************/
58 /* Code translation helpers                                                  */
59 
60 /* global register indexes */
61 static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
62                           + 10 * 4 + 22 * 5 /* SPE GPRh */
63                           + 8 * 5           /* CRF */];
64 static TCGv cpu_gpr[32];
65 static TCGv cpu_gprh[32];
66 static TCGv_i32 cpu_crf[8];
67 static TCGv cpu_nip;
68 static TCGv cpu_msr;
69 static TCGv cpu_ctr;
70 static TCGv cpu_lr;
71 #if defined(TARGET_PPC64)
72 static TCGv cpu_cfar;
73 #endif
74 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
75 static TCGv cpu_reserve;
76 static TCGv cpu_reserve_length;
77 static TCGv cpu_reserve_val;
78 #if defined(TARGET_PPC64)
79 static TCGv cpu_reserve_val2;
80 #endif
81 static TCGv cpu_fpscr;
82 static TCGv_i32 cpu_access_type;
83 
84 void ppc_translate_init(void)
85 {
86     int i;
87     char *p;
88     size_t cpu_reg_names_size;
89 
90     p = cpu_reg_names;
91     cpu_reg_names_size = sizeof(cpu_reg_names);
92 
93     for (i = 0; i < 8; i++) {
94         snprintf(p, cpu_reg_names_size, "crf%d", i);
95         cpu_crf[i] = tcg_global_mem_new_i32(tcg_env,
96                                             offsetof(CPUPPCState, crf[i]), p);
97         p += 5;
98         cpu_reg_names_size -= 5;
99     }
100 
101     for (i = 0; i < 32; i++) {
102         snprintf(p, cpu_reg_names_size, "r%d", i);
103         cpu_gpr[i] = tcg_global_mem_new(tcg_env,
104                                         offsetof(CPUPPCState, gpr[i]), p);
105         p += (i < 10) ? 3 : 4;
106         cpu_reg_names_size -= (i < 10) ? 3 : 4;
107         snprintf(p, cpu_reg_names_size, "r%dH", i);
108         cpu_gprh[i] = tcg_global_mem_new(tcg_env,
109                                          offsetof(CPUPPCState, gprh[i]), p);
110         p += (i < 10) ? 4 : 5;
111         cpu_reg_names_size -= (i < 10) ? 4 : 5;
112     }
113 
114     cpu_nip = tcg_global_mem_new(tcg_env,
115                                  offsetof(CPUPPCState, nip), "nip");
116 
117     cpu_msr = tcg_global_mem_new(tcg_env,
118                                  offsetof(CPUPPCState, msr), "msr");
119 
120     cpu_ctr = tcg_global_mem_new(tcg_env,
121                                  offsetof(CPUPPCState, ctr), "ctr");
122 
123     cpu_lr = tcg_global_mem_new(tcg_env,
124                                 offsetof(CPUPPCState, lr), "lr");
125 
126 #if defined(TARGET_PPC64)
127     cpu_cfar = tcg_global_mem_new(tcg_env,
128                                   offsetof(CPUPPCState, cfar), "cfar");
129 #endif
130 
131     cpu_xer = tcg_global_mem_new(tcg_env,
132                                  offsetof(CPUPPCState, xer), "xer");
133     cpu_so = tcg_global_mem_new(tcg_env,
134                                 offsetof(CPUPPCState, so), "SO");
135     cpu_ov = tcg_global_mem_new(tcg_env,
136                                 offsetof(CPUPPCState, ov), "OV");
137     cpu_ca = tcg_global_mem_new(tcg_env,
138                                 offsetof(CPUPPCState, ca), "CA");
139     cpu_ov32 = tcg_global_mem_new(tcg_env,
140                                   offsetof(CPUPPCState, ov32), "OV32");
141     cpu_ca32 = tcg_global_mem_new(tcg_env,
142                                   offsetof(CPUPPCState, ca32), "CA32");
143 
144     cpu_reserve = tcg_global_mem_new(tcg_env,
145                                      offsetof(CPUPPCState, reserve_addr),
146                                      "reserve_addr");
147     cpu_reserve_length = tcg_global_mem_new(tcg_env,
148                                             offsetof(CPUPPCState,
149                                                      reserve_length),
150                                             "reserve_length");
151     cpu_reserve_val = tcg_global_mem_new(tcg_env,
152                                          offsetof(CPUPPCState, reserve_val),
153                                          "reserve_val");
154 #if defined(TARGET_PPC64)
155     cpu_reserve_val2 = tcg_global_mem_new(tcg_env,
156                                           offsetof(CPUPPCState, reserve_val2),
157                                           "reserve_val2");
158 #endif
159 
160     cpu_fpscr = tcg_global_mem_new(tcg_env,
161                                    offsetof(CPUPPCState, fpscr), "fpscr");
162 
163     cpu_access_type = tcg_global_mem_new_i32(tcg_env,
164                                              offsetof(CPUPPCState, access_type),
165                                              "access_type");
166 }
167 
168 /* internal defines */
169 struct DisasContext {
170     DisasContextBase base;
171     target_ulong cia;  /* current instruction address */
172     uint32_t opcode;
173     /* Routine used to access memory */
174     bool pr, hv, dr, le_mode;
175     bool lazy_tlb_flush;
176     bool need_access_type;
177     int mem_idx;
178     int access_type;
179     /* Translation flags */
180     MemOp default_tcg_memop_mask;
181 #if defined(TARGET_PPC64)
182     bool sf_mode;
183     bool has_cfar;
184 #endif
185     bool fpu_enabled;
186     bool altivec_enabled;
187     bool vsx_enabled;
188     bool spe_enabled;
189     bool tm_enabled;
190     bool gtse;
191     bool hr;
192     bool mmcr0_pmcc0;
193     bool mmcr0_pmcc1;
194     bool mmcr0_pmcjce;
195     bool pmc_other;
196     bool pmu_insn_cnt;
197     ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
198     int singlestep_enabled;
199     uint32_t flags;
200     uint64_t insns_flags;
201     uint64_t insns_flags2;
202 };
203 
204 #define DISAS_EXIT         DISAS_TARGET_0  /* exit to main loop, pc updated */
205 #define DISAS_EXIT_UPDATE  DISAS_TARGET_1  /* exit to main loop, pc stale */
206 #define DISAS_CHAIN        DISAS_TARGET_2  /* lookup next tb, pc updated */
207 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3  /* lookup next tb, pc stale */
208 
209 /* Return true iff byteswap is needed in a scalar memop */
210 static inline bool need_byteswap(const DisasContext *ctx)
211 {
212 #if TARGET_BIG_ENDIAN
213      return ctx->le_mode;
214 #else
215      return !ctx->le_mode;
216 #endif
217 }
218 
219 /* True when active word size < size of target_long.  */
220 #ifdef TARGET_PPC64
221 # define NARROW_MODE(C)  (!(C)->sf_mode)
222 #else
223 # define NARROW_MODE(C)  0
224 #endif
225 
226 struct opc_handler_t {
227     /* invalid bits for instruction 1 (Rc(opcode) == 0) */
228     uint32_t inval1;
229     /* invalid bits for instruction 2 (Rc(opcode) == 1) */
230     uint32_t inval2;
231     /* instruction type */
232     uint64_t type;
233     /* extended instruction type */
234     uint64_t type2;
235     /* handler */
236     void (*handler)(DisasContext *ctx);
237 };
238 
239 static inline bool gen_serialize(DisasContext *ctx)
240 {
241     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
242         /* Restart with exclusive lock.  */
243         gen_helper_exit_atomic(tcg_env);
244         ctx->base.is_jmp = DISAS_NORETURN;
245         return false;
246     }
247     return true;
248 }
249 
250 #if !defined(CONFIG_USER_ONLY)
251 #if defined(TARGET_PPC64)
252 static inline bool gen_serialize_core(DisasContext *ctx)
253 {
254     if (ctx->flags & POWERPC_FLAG_SMT) {
255         return gen_serialize(ctx);
256     }
257     return true;
258 }
259 #endif
260 
261 static inline bool gen_serialize_core_lpar(DisasContext *ctx)
262 {
263 #if defined(TARGET_PPC64)
264     if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) {
265         return gen_serialize(ctx);
266     }
267 #endif
268     return true;
269 }
270 #endif
271 
272 /* SPR load/store helpers */
273 static inline void gen_load_spr(TCGv t, int reg)
274 {
275     tcg_gen_ld_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
276 }
277 
278 static inline void gen_store_spr(int reg, TCGv t)
279 {
280     tcg_gen_st_tl(t, tcg_env, offsetof(CPUPPCState, spr[reg]));
281 }
282 
283 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
284 {
285     if (ctx->need_access_type && ctx->access_type != access_type) {
286         tcg_gen_movi_i32(cpu_access_type, access_type);
287         ctx->access_type = access_type;
288     }
289 }
290 
291 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
292 {
293     if (NARROW_MODE(ctx)) {
294         nip = (uint32_t)nip;
295     }
296     tcg_gen_movi_tl(cpu_nip, nip);
297 }
298 
299 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
300 {
301     TCGv_i32 t0, t1;
302 
303     /*
304      * These are all synchronous exceptions, we set the PC back to the
305      * faulting instruction
306      */
307     gen_update_nip(ctx, ctx->cia);
308     t0 = tcg_constant_i32(excp);
309     t1 = tcg_constant_i32(error);
310     gen_helper_raise_exception_err(tcg_env, t0, t1);
311     ctx->base.is_jmp = DISAS_NORETURN;
312 }
313 
314 static void gen_exception(DisasContext *ctx, uint32_t excp)
315 {
316     TCGv_i32 t0;
317 
318     /*
319      * These are all synchronous exceptions, we set the PC back to the
320      * faulting instruction
321      */
322     gen_update_nip(ctx, ctx->cia);
323     t0 = tcg_constant_i32(excp);
324     gen_helper_raise_exception(tcg_env, t0);
325     ctx->base.is_jmp = DISAS_NORETURN;
326 }
327 
328 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
329                               target_ulong nip)
330 {
331     TCGv_i32 t0;
332 
333     gen_update_nip(ctx, nip);
334     t0 = tcg_constant_i32(excp);
335     gen_helper_raise_exception(tcg_env, t0);
336     ctx->base.is_jmp = DISAS_NORETURN;
337 }
338 
339 #if !defined(CONFIG_USER_ONLY)
340 static void gen_ppc_maybe_interrupt(DisasContext *ctx)
341 {
342     translator_io_start(&ctx->base);
343     gen_helper_ppc_maybe_interrupt(tcg_env);
344 }
345 #endif
346 
347 /*
348  * Tells the caller what is the appropriate exception to generate and prepares
349  * SPR registers for this exception.
350  *
351  * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
352  * POWERPC_EXCP_DEBUG (on BookE).
353  */
354 static void gen_debug_exception(DisasContext *ctx, bool rfi_type)
355 {
356 #if !defined(CONFIG_USER_ONLY)
357     if (ctx->flags & POWERPC_FLAG_DE) {
358         target_ulong dbsr = 0;
359         if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
360             dbsr = DBCR0_ICMP;
361         } else {
362             /* Must have been branch */
363             dbsr = DBCR0_BRT;
364         }
365         TCGv t0 = tcg_temp_new();
366         gen_load_spr(t0, SPR_BOOKE_DBSR);
367         tcg_gen_ori_tl(t0, t0, dbsr);
368         gen_store_spr(SPR_BOOKE_DBSR, t0);
369         gen_helper_raise_exception(tcg_env,
370                                    tcg_constant_i32(POWERPC_EXCP_DEBUG));
371         ctx->base.is_jmp = DISAS_NORETURN;
372     } else {
373         if (!rfi_type) { /* BookS does not single step rfi type instructions */
374             TCGv t0 = tcg_temp_new();
375             tcg_gen_movi_tl(t0, ctx->cia);
376             gen_helper_book3s_trace(tcg_env, t0);
377             ctx->base.is_jmp = DISAS_NORETURN;
378         }
379     }
380 #endif
381 }
382 
383 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
384 {
385     /* Will be converted to program check if needed */
386     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
387 }
388 
389 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
390 {
391     gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
392 }
393 
394 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
395 {
396     /* Will be converted to program check if needed */
397     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
398 }
399 
400 /*****************************************************************************/
401 /* SPR READ/WRITE CALLBACKS */
402 
403 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
404 {
405 #if 0
406     sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
407     printf("ERROR: try to access SPR %d !\n", sprn);
408 #endif
409 }
410 
411 /* #define PPC_DUMP_SPR_ACCESSES */
412 
413 /*
414  * Generic callbacks:
415  * do nothing but store/retrieve spr value
416  */
417 static void spr_load_dump_spr(int sprn)
418 {
419 #ifdef PPC_DUMP_SPR_ACCESSES
420     TCGv_i32 t0 = tcg_constant_i32(sprn);
421     gen_helper_load_dump_spr(tcg_env, t0);
422 #endif
423 }
424 
425 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
426 {
427     gen_load_spr(cpu_gpr[gprn], sprn);
428     spr_load_dump_spr(sprn);
429 }
430 
431 static void spr_store_dump_spr(int sprn)
432 {
433 #ifdef PPC_DUMP_SPR_ACCESSES
434     TCGv_i32 t0 = tcg_constant_i32(sprn);
435     gen_helper_store_dump_spr(tcg_env, t0);
436 #endif
437 }
438 
439 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
440 {
441     gen_store_spr(sprn, cpu_gpr[gprn]);
442     spr_store_dump_spr(sprn);
443 }
444 
445 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
446 {
447 #ifdef TARGET_PPC64
448     TCGv t0 = tcg_temp_new();
449     tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
450     gen_store_spr(sprn, t0);
451     spr_store_dump_spr(sprn);
452 #else
453     spr_write_generic(ctx, sprn, gprn);
454 #endif
455 }
456 
457 void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn)
458 {
459     if (!(ctx->flags & POWERPC_FLAG_SMT)) {
460         spr_write_generic(ctx, sprn, gprn);
461         return;
462     }
463 
464     if (!gen_serialize(ctx)) {
465         return;
466     }
467 
468     gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn),
469                                       cpu_gpr[gprn]);
470     spr_store_dump_spr(sprn);
471 }
472 
473 static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn)
474 {
475     /* This does not implement >1 thread */
476     TCGv t0 = tcg_temp_new();
477     TCGv t1 = tcg_temp_new();
478     tcg_gen_extract_tl(t0, cpu_gpr[gprn], 0, 1); /* Extract RUN field */
479     tcg_gen_shli_tl(t1, t0, 8); /* Duplicate the bit in TS */
480     tcg_gen_or_tl(t1, t1, t0);
481     gen_store_spr(sprn, t1);
482 }
483 
484 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
485 {
486     if (!(ctx->flags & POWERPC_FLAG_SMT_1LPAR)) {
487         /* CTRL behaves as 1-thread in LPAR-per-thread mode */
488         spr_write_CTRL_ST(ctx, sprn, gprn);
489         goto out;
490     }
491 
492     if (!gen_serialize(ctx)) {
493         return;
494     }
495 
496     gen_helper_spr_write_CTRL(tcg_env, tcg_constant_i32(sprn),
497                               cpu_gpr[gprn]);
498 out:
499     spr_store_dump_spr(sprn);
500 
501     /*
502      * SPR_CTRL writes must force a new translation block,
503      * allowing the PMU to calculate the run latch events with
504      * more accuracy.
505      */
506     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
507 }
508 
509 #if !defined(CONFIG_USER_ONLY)
510 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
511 {
512     TCGv t0 = tcg_temp_new();
513     TCGv t1 = tcg_temp_new();
514     gen_load_spr(t0, sprn);
515     tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
516     tcg_gen_and_tl(t0, t0, t1);
517     gen_store_spr(sprn, t0);
518 }
519 
520 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
521 {
522 }
523 
524 #endif
525 
526 /* SPR common to all PowerPC */
527 /* XER */
528 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
529 {
530     TCGv dst = cpu_gpr[gprn];
531     TCGv t0 = tcg_temp_new();
532     TCGv t1 = tcg_temp_new();
533     TCGv t2 = tcg_temp_new();
534     tcg_gen_mov_tl(dst, cpu_xer);
535     tcg_gen_shli_tl(t0, cpu_so, XER_SO);
536     tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
537     tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
538     tcg_gen_or_tl(t0, t0, t1);
539     tcg_gen_or_tl(dst, dst, t2);
540     tcg_gen_or_tl(dst, dst, t0);
541     if (is_isa300(ctx)) {
542         tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
543         tcg_gen_or_tl(dst, dst, t0);
544         tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
545         tcg_gen_or_tl(dst, dst, t0);
546     }
547 }
548 
549 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
550 {
551     TCGv src = cpu_gpr[gprn];
552     /* Write all flags, while reading back check for isa300 */
553     tcg_gen_andi_tl(cpu_xer, src,
554                     ~((1u << XER_SO) |
555                       (1u << XER_OV) | (1u << XER_OV32) |
556                       (1u << XER_CA) | (1u << XER_CA32)));
557     tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
558     tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
559     tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
560     tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
561     tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
562 }
563 
564 /* LR */
565 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
566 {
567     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
568 }
569 
570 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
571 {
572     tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
573 }
574 
575 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
576 /* Debug facilities */
577 /* CFAR */
578 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
579 {
580     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
581 }
582 
583 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
584 {
585     tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
586 }
587 
588 /* Breakpoint */
589 void spr_write_ciabr(DisasContext *ctx, int sprn, int gprn)
590 {
591     translator_io_start(&ctx->base);
592     gen_helper_store_ciabr(tcg_env, cpu_gpr[gprn]);
593 }
594 
595 /* Watchpoint */
596 void spr_write_dawr0(DisasContext *ctx, int sprn, int gprn)
597 {
598     translator_io_start(&ctx->base);
599     gen_helper_store_dawr0(tcg_env, cpu_gpr[gprn]);
600 }
601 
602 void spr_write_dawrx0(DisasContext *ctx, int sprn, int gprn)
603 {
604     translator_io_start(&ctx->base);
605     gen_helper_store_dawrx0(tcg_env, cpu_gpr[gprn]);
606 }
607 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
608 
609 /* CTR */
610 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
611 {
612     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
613 }
614 
615 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
616 {
617     tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
618 }
619 
620 /* User read access to SPR */
621 /* USPRx */
622 /* UMMCRx */
623 /* UPMCx */
624 /* USIA */
625 /* UDECR */
626 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
627 {
628     gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
629 }
630 
631 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
632 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
633 {
634     gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
635 }
636 #endif
637 
638 /* SPR common to all non-embedded PowerPC */
639 /* DECR */
640 #if !defined(CONFIG_USER_ONLY)
641 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
642 {
643     translator_io_start(&ctx->base);
644     gen_helper_load_decr(cpu_gpr[gprn], tcg_env);
645 }
646 
647 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
648 {
649     translator_io_start(&ctx->base);
650     gen_helper_store_decr(tcg_env, cpu_gpr[gprn]);
651 }
652 #endif
653 
654 /* SPR common to all non-embedded PowerPC, except 601 */
655 /* Time base */
656 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
657 {
658     translator_io_start(&ctx->base);
659     gen_helper_load_tbl(cpu_gpr[gprn], tcg_env);
660 }
661 
662 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
663 {
664     translator_io_start(&ctx->base);
665     gen_helper_load_tbu(cpu_gpr[gprn], tcg_env);
666 }
667 
668 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
669 {
670     gen_helper_load_atbl(cpu_gpr[gprn], tcg_env);
671 }
672 
673 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
674 {
675     gen_helper_load_atbu(cpu_gpr[gprn], tcg_env);
676 }
677 
678 #if !defined(CONFIG_USER_ONLY)
679 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
680 {
681     if (!gen_serialize_core_lpar(ctx)) {
682         return;
683     }
684 
685     translator_io_start(&ctx->base);
686     gen_helper_store_tbl(tcg_env, cpu_gpr[gprn]);
687 }
688 
689 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
690 {
691     if (!gen_serialize_core_lpar(ctx)) {
692         return;
693     }
694 
695     translator_io_start(&ctx->base);
696     gen_helper_store_tbu(tcg_env, cpu_gpr[gprn]);
697 }
698 
699 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
700 {
701     gen_helper_store_atbl(tcg_env, cpu_gpr[gprn]);
702 }
703 
704 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
705 {
706     gen_helper_store_atbu(tcg_env, cpu_gpr[gprn]);
707 }
708 
709 #if defined(TARGET_PPC64)
710 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
711 {
712     translator_io_start(&ctx->base);
713     gen_helper_load_purr(cpu_gpr[gprn], tcg_env);
714 }
715 
716 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
717 {
718     if (!gen_serialize_core_lpar(ctx)) {
719         return;
720     }
721     translator_io_start(&ctx->base);
722     gen_helper_store_purr(tcg_env, cpu_gpr[gprn]);
723 }
724 
725 /* HDECR */
726 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
727 {
728     translator_io_start(&ctx->base);
729     gen_helper_load_hdecr(cpu_gpr[gprn], tcg_env);
730 }
731 
732 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
733 {
734     if (!gen_serialize_core_lpar(ctx)) {
735         return;
736     }
737     translator_io_start(&ctx->base);
738     gen_helper_store_hdecr(tcg_env, cpu_gpr[gprn]);
739 }
740 
741 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
742 {
743     translator_io_start(&ctx->base);
744     gen_helper_load_vtb(cpu_gpr[gprn], tcg_env);
745 }
746 
747 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
748 {
749     if (!gen_serialize_core_lpar(ctx)) {
750         return;
751     }
752     translator_io_start(&ctx->base);
753     gen_helper_store_vtb(tcg_env, cpu_gpr[gprn]);
754 }
755 
756 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
757 {
758     if (!gen_serialize_core_lpar(ctx)) {
759         return;
760     }
761     translator_io_start(&ctx->base);
762     gen_helper_store_tbu40(tcg_env, cpu_gpr[gprn]);
763 }
764 
765 #endif
766 #endif
767 
768 #if !defined(CONFIG_USER_ONLY)
769 /* IBAT0U...IBAT0U */
770 /* IBAT0L...IBAT7L */
771 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
772 {
773     tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
774                   offsetof(CPUPPCState,
775                            IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
776 }
777 
778 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
779 {
780     tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
781                   offsetof(CPUPPCState,
782                            IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
783 }
784 
785 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
786 {
787     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0U) / 2);
788     gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
789 }
790 
791 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
792 {
793     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4U) / 2) + 4);
794     gen_helper_store_ibatu(tcg_env, t0, cpu_gpr[gprn]);
795 }
796 
797 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
798 {
799     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_IBAT0L) / 2);
800     gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
801 }
802 
803 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
804 {
805     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_IBAT4L) / 2) + 4);
806     gen_helper_store_ibatl(tcg_env, t0, cpu_gpr[gprn]);
807 }
808 
809 /* DBAT0U...DBAT7U */
810 /* DBAT0L...DBAT7L */
811 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
812 {
813     tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
814                   offsetof(CPUPPCState,
815                            DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
816 }
817 
818 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
819 {
820     tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env,
821                   offsetof(CPUPPCState,
822                            DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
823 }
824 
825 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
826 {
827     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0U) / 2);
828     gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
829 }
830 
831 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
832 {
833     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4U) / 2) + 4);
834     gen_helper_store_dbatu(tcg_env, t0, cpu_gpr[gprn]);
835 }
836 
837 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
838 {
839     TCGv_i32 t0 = tcg_constant_i32((sprn - SPR_DBAT0L) / 2);
840     gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
841 }
842 
843 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
844 {
845     TCGv_i32 t0 = tcg_constant_i32(((sprn - SPR_DBAT4L) / 2) + 4);
846     gen_helper_store_dbatl(tcg_env, t0, cpu_gpr[gprn]);
847 }
848 
849 /* SDR1 */
850 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
851 {
852     gen_helper_store_sdr1(tcg_env, cpu_gpr[gprn]);
853 }
854 
855 #if defined(TARGET_PPC64)
856 /* 64 bits PowerPC specific SPRs */
857 /* PIDR */
858 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
859 {
860     gen_helper_store_pidr(tcg_env, cpu_gpr[gprn]);
861 }
862 
863 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
864 {
865     gen_helper_store_lpidr(tcg_env, cpu_gpr[gprn]);
866 }
867 
868 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
869 {
870     tcg_gen_ld_tl(cpu_gpr[gprn], tcg_env, offsetof(CPUPPCState, excp_prefix));
871 }
872 
873 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
874 {
875     TCGv t0 = tcg_temp_new();
876     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
877     tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
878 }
879 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
880 {
881     gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]);
882 }
883 
884 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
885 {
886     gen_helper_store_pcr(tcg_env, cpu_gpr[gprn]);
887 }
888 
889 /* DPDES */
890 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
891 {
892     if (!gen_serialize_core_lpar(ctx)) {
893         return;
894     }
895 
896     gen_helper_load_dpdes(cpu_gpr[gprn], tcg_env);
897 }
898 
899 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
900 {
901     if (!gen_serialize_core_lpar(ctx)) {
902         return;
903     }
904 
905     gen_helper_store_dpdes(tcg_env, cpu_gpr[gprn]);
906 }
907 #endif
908 #endif
909 
910 /* PowerPC 40x specific registers */
911 #if !defined(CONFIG_USER_ONLY)
912 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
913 {
914     translator_io_start(&ctx->base);
915     gen_helper_load_40x_pit(cpu_gpr[gprn], tcg_env);
916 }
917 
918 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
919 {
920     translator_io_start(&ctx->base);
921     gen_helper_store_40x_pit(tcg_env, cpu_gpr[gprn]);
922 }
923 
924 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
925 {
926     translator_io_start(&ctx->base);
927     gen_store_spr(sprn, cpu_gpr[gprn]);
928     gen_helper_store_40x_dbcr0(tcg_env, cpu_gpr[gprn]);
929     /* We must stop translation as we may have rebooted */
930     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
931 }
932 
933 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
934 {
935     translator_io_start(&ctx->base);
936     gen_helper_store_40x_sler(tcg_env, cpu_gpr[gprn]);
937 }
938 
939 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
940 {
941     translator_io_start(&ctx->base);
942     gen_helper_store_40x_tcr(tcg_env, cpu_gpr[gprn]);
943 }
944 
945 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
946 {
947     translator_io_start(&ctx->base);
948     gen_helper_store_40x_tsr(tcg_env, cpu_gpr[gprn]);
949 }
950 
951 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
952 {
953     TCGv t0 = tcg_temp_new();
954     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
955     gen_helper_store_40x_pid(tcg_env, t0);
956 }
957 
958 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
959 {
960     translator_io_start(&ctx->base);
961     gen_helper_store_booke_tcr(tcg_env, cpu_gpr[gprn]);
962 }
963 
964 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
965 {
966     translator_io_start(&ctx->base);
967     gen_helper_store_booke_tsr(tcg_env, cpu_gpr[gprn]);
968 }
969 #endif
970 
971 /* PIR */
972 #if !defined(CONFIG_USER_ONLY)
973 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
974 {
975     TCGv t0 = tcg_temp_new();
976     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
977     gen_store_spr(SPR_PIR, t0);
978 }
979 #endif
980 
981 /* SPE specific registers */
982 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
983 {
984     TCGv_i32 t0 = tcg_temp_new_i32();
985     tcg_gen_ld_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
986     tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
987 }
988 
989 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
990 {
991     TCGv_i32 t0 = tcg_temp_new_i32();
992     tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
993     tcg_gen_st_i32(t0, tcg_env, offsetof(CPUPPCState, spe_fscr));
994 }
995 
996 #if !defined(CONFIG_USER_ONLY)
997 /* Callback used to write the exception vector base */
998 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
999 {
1000     TCGv t0 = tcg_temp_new();
1001     tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivpr_mask));
1002     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
1003     tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_prefix));
1004     gen_store_spr(sprn, t0);
1005 }
1006 
1007 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
1008 {
1009     int sprn_offs;
1010 
1011     if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
1012         sprn_offs = sprn - SPR_BOOKE_IVOR0;
1013     } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
1014         sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
1015     } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
1016         sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
1017     } else {
1018         qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
1019                       " vector 0x%03x\n", sprn);
1020         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1021         return;
1022     }
1023 
1024     TCGv t0 = tcg_temp_new();
1025     tcg_gen_ld_tl(t0, tcg_env, offsetof(CPUPPCState, ivor_mask));
1026     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
1027     tcg_gen_st_tl(t0, tcg_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
1028     gen_store_spr(sprn, t0);
1029 }
1030 #endif
1031 
1032 #ifdef TARGET_PPC64
1033 #ifndef CONFIG_USER_ONLY
1034 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
1035 {
1036     TCGv t0 = tcg_temp_new();
1037     TCGv t1 = tcg_temp_new();
1038     TCGv t2 = tcg_temp_new();
1039 
1040     /*
1041      * Note, the HV=1 PR=0 case is handled earlier by simply using
1042      * spr_write_generic for HV mode in the SPR table
1043      */
1044 
1045     /* Build insertion mask into t1 based on context */
1046     if (ctx->pr) {
1047         gen_load_spr(t1, SPR_UAMOR);
1048     } else {
1049         gen_load_spr(t1, SPR_AMOR);
1050     }
1051 
1052     /* Mask new bits into t2 */
1053     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1054 
1055     /* Load AMR and clear new bits in t0 */
1056     gen_load_spr(t0, SPR_AMR);
1057     tcg_gen_andc_tl(t0, t0, t1);
1058 
1059     /* Or'in new bits and write it out */
1060     tcg_gen_or_tl(t0, t0, t2);
1061     gen_store_spr(SPR_AMR, t0);
1062     spr_store_dump_spr(SPR_AMR);
1063 }
1064 
1065 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
1066 {
1067     TCGv t0 = tcg_temp_new();
1068     TCGv t1 = tcg_temp_new();
1069     TCGv t2 = tcg_temp_new();
1070 
1071     /*
1072      * Note, the HV=1 case is handled earlier by simply using
1073      * spr_write_generic for HV mode in the SPR table
1074      */
1075 
1076     /* Build insertion mask into t1 based on context */
1077     gen_load_spr(t1, SPR_AMOR);
1078 
1079     /* Mask new bits into t2 */
1080     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1081 
1082     /* Load AMR and clear new bits in t0 */
1083     gen_load_spr(t0, SPR_UAMOR);
1084     tcg_gen_andc_tl(t0, t0, t1);
1085 
1086     /* Or'in new bits and write it out */
1087     tcg_gen_or_tl(t0, t0, t2);
1088     gen_store_spr(SPR_UAMOR, t0);
1089     spr_store_dump_spr(SPR_UAMOR);
1090 }
1091 
1092 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
1093 {
1094     TCGv t0 = tcg_temp_new();
1095     TCGv t1 = tcg_temp_new();
1096     TCGv t2 = tcg_temp_new();
1097 
1098     /*
1099      * Note, the HV=1 case is handled earlier by simply using
1100      * spr_write_generic for HV mode in the SPR table
1101      */
1102 
1103     /* Build insertion mask into t1 based on context */
1104     gen_load_spr(t1, SPR_AMOR);
1105 
1106     /* Mask new bits into t2 */
1107     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1108 
1109     /* Load AMR and clear new bits in t0 */
1110     gen_load_spr(t0, SPR_IAMR);
1111     tcg_gen_andc_tl(t0, t0, t1);
1112 
1113     /* Or'in new bits and write it out */
1114     tcg_gen_or_tl(t0, t0, t2);
1115     gen_store_spr(SPR_IAMR, t0);
1116     spr_store_dump_spr(SPR_IAMR);
1117 }
1118 #endif
1119 #endif
1120 
1121 #ifndef CONFIG_USER_ONLY
1122 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1123 {
1124     gen_helper_fixup_thrm(tcg_env);
1125     gen_load_spr(cpu_gpr[gprn], sprn);
1126     spr_load_dump_spr(sprn);
1127 }
1128 #endif /* !CONFIG_USER_ONLY */
1129 
1130 #if !defined(CONFIG_USER_ONLY)
1131 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1132 {
1133     TCGv t0 = tcg_temp_new();
1134 
1135     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1136     gen_store_spr(sprn, t0);
1137 }
1138 
1139 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1140 {
1141     TCGv t0 = tcg_temp_new();
1142 
1143     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1144     gen_store_spr(sprn, t0);
1145 }
1146 
1147 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1148 {
1149     TCGv t0 = tcg_temp_new();
1150 
1151     tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1152                     ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1153     gen_store_spr(sprn, t0);
1154 }
1155 
1156 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1157 {
1158     gen_helper_booke206_tlbflush(tcg_env, cpu_gpr[gprn]);
1159 }
1160 
1161 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1162 {
1163     TCGv_i32 t0 = tcg_constant_i32(sprn);
1164     gen_helper_booke_setpid(tcg_env, t0, cpu_gpr[gprn]);
1165 }
1166 
1167 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1168 {
1169     gen_helper_booke_set_eplc(tcg_env, cpu_gpr[gprn]);
1170 }
1171 
1172 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1173 {
1174     gen_helper_booke_set_epsc(tcg_env, cpu_gpr[gprn]);
1175 }
1176 
1177 #endif
1178 
1179 #if !defined(CONFIG_USER_ONLY)
1180 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1181 {
1182     TCGv val = tcg_temp_new();
1183     tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1184     gen_store_spr(SPR_BOOKE_MAS3, val);
1185     tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1186     gen_store_spr(SPR_BOOKE_MAS7, val);
1187 }
1188 
1189 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1190 {
1191     TCGv mas7 = tcg_temp_new();
1192     TCGv mas3 = tcg_temp_new();
1193     gen_load_spr(mas7, SPR_BOOKE_MAS7);
1194     tcg_gen_shli_tl(mas7, mas7, 32);
1195     gen_load_spr(mas3, SPR_BOOKE_MAS3);
1196     tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1197 }
1198 
1199 #endif
1200 
1201 #ifdef TARGET_PPC64
1202 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1203                                     int bit, int sprn, int cause)
1204 {
1205     TCGv_i32 t1 = tcg_constant_i32(bit);
1206     TCGv_i32 t2 = tcg_constant_i32(sprn);
1207     TCGv_i32 t3 = tcg_constant_i32(cause);
1208 
1209     gen_helper_fscr_facility_check(tcg_env, t1, t2, t3);
1210 }
1211 
1212 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1213                                    int bit, int sprn, int cause)
1214 {
1215     TCGv_i32 t1 = tcg_constant_i32(bit);
1216     TCGv_i32 t2 = tcg_constant_i32(sprn);
1217     TCGv_i32 t3 = tcg_constant_i32(cause);
1218 
1219     gen_helper_msr_facility_check(tcg_env, t1, t2, t3);
1220 }
1221 
1222 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1223 {
1224     TCGv spr_up = tcg_temp_new();
1225     TCGv spr = tcg_temp_new();
1226 
1227     gen_load_spr(spr, sprn - 1);
1228     tcg_gen_shri_tl(spr_up, spr, 32);
1229     tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1230 }
1231 
1232 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1233 {
1234     TCGv spr = tcg_temp_new();
1235 
1236     gen_load_spr(spr, sprn - 1);
1237     tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1238     gen_store_spr(sprn - 1, spr);
1239 }
1240 
1241 #if !defined(CONFIG_USER_ONLY)
1242 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1243 {
1244     TCGv hmer = tcg_temp_new();
1245 
1246     gen_load_spr(hmer, sprn);
1247     tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1248     gen_store_spr(sprn, hmer);
1249     spr_store_dump_spr(sprn);
1250 }
1251 
1252 void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn)
1253 {
1254     /* Reading TFMR can cause it to be updated, so serialize threads here too */
1255     if (!gen_serialize_core(ctx)) {
1256         return;
1257     }
1258     gen_helper_load_tfmr(cpu_gpr[gprn], tcg_env);
1259 }
1260 
1261 void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn)
1262 {
1263     if (!gen_serialize_core(ctx)) {
1264         return;
1265     }
1266     gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]);
1267 }
1268 
1269 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1270 {
1271     translator_io_start(&ctx->base);
1272     gen_helper_store_lpcr(tcg_env, cpu_gpr[gprn]);
1273 }
1274 #endif /* !defined(CONFIG_USER_ONLY) */
1275 
1276 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1277 {
1278     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1279     spr_read_generic(ctx, gprn, sprn);
1280 }
1281 
1282 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1283 {
1284     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1285     spr_write_generic(ctx, sprn, gprn);
1286 }
1287 
1288 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1289 {
1290     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1291     spr_read_generic(ctx, gprn, sprn);
1292 }
1293 
1294 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1295 {
1296     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1297     spr_write_generic(ctx, sprn, gprn);
1298 }
1299 
1300 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1301 {
1302     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1303     spr_read_prev_upper32(ctx, gprn, sprn);
1304 }
1305 
1306 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1307 {
1308     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1309     spr_write_prev_upper32(ctx, sprn, gprn);
1310 }
1311 
1312 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1313 {
1314     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1315     spr_read_generic(ctx, gprn, sprn);
1316 }
1317 
1318 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1319 {
1320     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1321     spr_write_generic(ctx, sprn, gprn);
1322 }
1323 
1324 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1325 {
1326     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1327     spr_read_prev_upper32(ctx, gprn, sprn);
1328 }
1329 
1330 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1331 {
1332     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1333     spr_write_prev_upper32(ctx, sprn, gprn);
1334 }
1335 
1336 void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn)
1337 {
1338     TCGv t0 = tcg_temp_new();
1339 
1340     /*
1341      * Access to the (H)DEXCR in problem state is done using separated
1342      * SPR indexes which are 16 below the SPR indexes which have full
1343      * access to the (H)DEXCR in privileged state. Problem state can
1344      * only read bits 32:63, bits 0:31 return 0.
1345      *
1346      * See section 9.3.1-9.3.2 of PowerISA v3.1B
1347      */
1348 
1349     gen_load_spr(t0, sprn + 16);
1350     tcg_gen_ext32u_tl(cpu_gpr[gprn], t0);
1351 }
1352 #endif
1353 
1354 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
1355 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1356 
1357 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
1358 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1359 
1360 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
1361 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1362 
1363 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
1364 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1365 
1366 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
1367 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1368 
1369 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1370 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1371 
1372 typedef struct opcode_t {
1373     unsigned char opc1, opc2, opc3, opc4;
1374 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1375     unsigned char pad[4];
1376 #endif
1377     opc_handler_t handler;
1378     const char *oname;
1379 } opcode_t;
1380 
1381 static void gen_priv_opc(DisasContext *ctx)
1382 {
1383     gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1384 }
1385 
1386 /* Helpers for priv. check */
1387 #define GEN_PRIV(CTX)              \
1388     do {                           \
1389         gen_priv_opc(CTX); return; \
1390     } while (0)
1391 
1392 #if defined(CONFIG_USER_ONLY)
1393 #define CHK_HV(CTX) GEN_PRIV(CTX)
1394 #define CHK_SV(CTX) GEN_PRIV(CTX)
1395 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1396 #else
1397 #define CHK_HV(CTX)                         \
1398     do {                                    \
1399         if (unlikely(ctx->pr || !ctx->hv)) {\
1400             GEN_PRIV(CTX);                  \
1401         }                                   \
1402     } while (0)
1403 #define CHK_SV(CTX)              \
1404     do {                         \
1405         if (unlikely(ctx->pr)) { \
1406             GEN_PRIV(CTX);       \
1407         }                        \
1408     } while (0)
1409 #define CHK_HVRM(CTX)                                   \
1410     do {                                                \
1411         if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1412             GEN_PRIV(CTX);                              \
1413         }                                               \
1414     } while (0)
1415 #endif
1416 
1417 #define CHK_NONE(CTX)
1418 
1419 /*****************************************************************************/
1420 /* PowerPC instructions table                                                */
1421 
1422 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
1423 {                                                                             \
1424     .opc1 = op1,                                                              \
1425     .opc2 = op2,                                                              \
1426     .opc3 = op3,                                                              \
1427     .opc4 = 0xff,                                                             \
1428     .handler = {                                                              \
1429         .inval1  = invl,                                                      \
1430         .type = _typ,                                                         \
1431         .type2 = _typ2,                                                       \
1432         .handler = &gen_##name,                                               \
1433     },                                                                        \
1434     .oname = stringify(name),                                                 \
1435 }
1436 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
1437 {                                                                             \
1438     .opc1 = op1,                                                              \
1439     .opc2 = op2,                                                              \
1440     .opc3 = op3,                                                              \
1441     .opc4 = 0xff,                                                             \
1442     .handler = {                                                              \
1443         .inval1  = invl1,                                                     \
1444         .inval2  = invl2,                                                     \
1445         .type = _typ,                                                         \
1446         .type2 = _typ2,                                                       \
1447         .handler = &gen_##name,                                               \
1448     },                                                                        \
1449     .oname = stringify(name),                                                 \
1450 }
1451 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
1452 {                                                                             \
1453     .opc1 = op1,                                                              \
1454     .opc2 = op2,                                                              \
1455     .opc3 = op3,                                                              \
1456     .opc4 = 0xff,                                                             \
1457     .handler = {                                                              \
1458         .inval1  = invl,                                                      \
1459         .type = _typ,                                                         \
1460         .type2 = _typ2,                                                       \
1461         .handler = &gen_##name,                                               \
1462     },                                                                        \
1463     .oname = onam,                                                            \
1464 }
1465 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
1466 {                                                                             \
1467     .opc1 = op1,                                                              \
1468     .opc2 = op2,                                                              \
1469     .opc3 = op3,                                                              \
1470     .opc4 = op4,                                                              \
1471     .handler = {                                                              \
1472         .inval1  = invl,                                                      \
1473         .type = _typ,                                                         \
1474         .type2 = _typ2,                                                       \
1475         .handler = &gen_##name,                                               \
1476     },                                                                        \
1477     .oname = stringify(name),                                                 \
1478 }
1479 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
1480 {                                                                             \
1481     .opc1 = op1,                                                              \
1482     .opc2 = op2,                                                              \
1483     .opc3 = op3,                                                              \
1484     .opc4 = op4,                                                              \
1485     .handler = {                                                              \
1486         .inval1  = invl,                                                      \
1487         .type = _typ,                                                         \
1488         .type2 = _typ2,                                                       \
1489         .handler = &gen_##name,                                               \
1490     },                                                                        \
1491     .oname = onam,                                                            \
1492 }
1493 
1494 /* Invalid instruction */
1495 static void gen_invalid(DisasContext *ctx)
1496 {
1497     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1498 }
1499 
1500 static opc_handler_t invalid_handler = {
1501     .inval1  = 0xFFFFFFFF,
1502     .inval2  = 0xFFFFFFFF,
1503     .type    = PPC_NONE,
1504     .type2   = PPC_NONE,
1505     .handler = gen_invalid,
1506 };
1507 
1508 /***                           Integer comparison                          ***/
1509 
1510 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1511 {
1512     TCGv t0 = tcg_temp_new();
1513     TCGv t1 = tcg_temp_new();
1514     TCGv_i32 t = tcg_temp_new_i32();
1515 
1516     tcg_gen_movi_tl(t0, CRF_EQ);
1517     tcg_gen_movi_tl(t1, CRF_LT);
1518     tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1519                        t0, arg0, arg1, t1, t0);
1520     tcg_gen_movi_tl(t1, CRF_GT);
1521     tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1522                        t0, arg0, arg1, t1, t0);
1523 
1524     tcg_gen_trunc_tl_i32(t, t0);
1525     tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1526     tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1527 }
1528 
1529 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1530 {
1531     TCGv t0 = tcg_constant_tl(arg1);
1532     gen_op_cmp(arg0, t0, s, crf);
1533 }
1534 
1535 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1536 {
1537     TCGv t0, t1;
1538     t0 = tcg_temp_new();
1539     t1 = tcg_temp_new();
1540     if (s) {
1541         tcg_gen_ext32s_tl(t0, arg0);
1542         tcg_gen_ext32s_tl(t1, arg1);
1543     } else {
1544         tcg_gen_ext32u_tl(t0, arg0);
1545         tcg_gen_ext32u_tl(t1, arg1);
1546     }
1547     gen_op_cmp(t0, t1, s, crf);
1548 }
1549 
1550 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1551 {
1552     TCGv t0 = tcg_constant_tl(arg1);
1553     gen_op_cmp32(arg0, t0, s, crf);
1554 }
1555 
1556 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1557 {
1558     if (NARROW_MODE(ctx)) {
1559         gen_op_cmpi32(reg, 0, 1, 0);
1560     } else {
1561         gen_op_cmpi(reg, 0, 1, 0);
1562     }
1563 }
1564 
1565 /* cmprb - range comparison: isupper, isaplha, islower*/
1566 static void gen_cmprb(DisasContext *ctx)
1567 {
1568     TCGv_i32 src1 = tcg_temp_new_i32();
1569     TCGv_i32 src2 = tcg_temp_new_i32();
1570     TCGv_i32 src2lo = tcg_temp_new_i32();
1571     TCGv_i32 src2hi = tcg_temp_new_i32();
1572     TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1573 
1574     tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1575     tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1576 
1577     tcg_gen_andi_i32(src1, src1, 0xFF);
1578     tcg_gen_ext8u_i32(src2lo, src2);
1579     tcg_gen_shri_i32(src2, src2, 8);
1580     tcg_gen_ext8u_i32(src2hi, src2);
1581 
1582     tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1583     tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1584     tcg_gen_and_i32(crf, src2lo, src2hi);
1585 
1586     if (ctx->opcode & 0x00200000) {
1587         tcg_gen_shri_i32(src2, src2, 8);
1588         tcg_gen_ext8u_i32(src2lo, src2);
1589         tcg_gen_shri_i32(src2, src2, 8);
1590         tcg_gen_ext8u_i32(src2hi, src2);
1591         tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1592         tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1593         tcg_gen_and_i32(src2lo, src2lo, src2hi);
1594         tcg_gen_or_i32(crf, crf, src2lo);
1595     }
1596     tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1597 }
1598 
1599 #if defined(TARGET_PPC64)
1600 /* cmpeqb */
1601 static void gen_cmpeqb(DisasContext *ctx)
1602 {
1603     gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1604                       cpu_gpr[rB(ctx->opcode)]);
1605 }
1606 #endif
1607 
1608 /* isel (PowerPC 2.03 specification) */
1609 static void gen_isel(DisasContext *ctx)
1610 {
1611     uint32_t bi = rC(ctx->opcode);
1612     uint32_t mask = 0x08 >> (bi & 0x03);
1613     TCGv t0 = tcg_temp_new();
1614     TCGv zr;
1615 
1616     tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1617     tcg_gen_andi_tl(t0, t0, mask);
1618 
1619     zr = tcg_constant_tl(0);
1620     tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1621                        rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1622                        cpu_gpr[rB(ctx->opcode)]);
1623 }
1624 
1625 /* cmpb: PowerPC 2.05 specification */
1626 static void gen_cmpb(DisasContext *ctx)
1627 {
1628     gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1629                     cpu_gpr[rB(ctx->opcode)]);
1630 }
1631 
1632 /***                           Integer arithmetic                          ***/
1633 
1634 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1635                                            TCGv arg1, TCGv arg2, int sub)
1636 {
1637     TCGv t0 = tcg_temp_new();
1638 
1639     tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1640     tcg_gen_xor_tl(t0, arg1, arg2);
1641     if (sub) {
1642         tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1643     } else {
1644         tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1645     }
1646     if (NARROW_MODE(ctx)) {
1647         tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1648         if (is_isa300(ctx)) {
1649             tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1650         }
1651     } else {
1652         if (is_isa300(ctx)) {
1653             tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1654         }
1655         tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1656     }
1657     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1658 }
1659 
1660 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1661                                              TCGv res, TCGv arg0, TCGv arg1,
1662                                              TCGv ca32, int sub)
1663 {
1664     TCGv t0;
1665 
1666     if (!is_isa300(ctx)) {
1667         return;
1668     }
1669 
1670     t0 = tcg_temp_new();
1671     if (sub) {
1672         tcg_gen_eqv_tl(t0, arg0, arg1);
1673     } else {
1674         tcg_gen_xor_tl(t0, arg0, arg1);
1675     }
1676     tcg_gen_xor_tl(t0, t0, res);
1677     tcg_gen_extract_tl(ca32, t0, 32, 1);
1678 }
1679 
1680 /* Common add function */
1681 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1682                                     TCGv arg2, TCGv ca, TCGv ca32,
1683                                     bool add_ca, bool compute_ca,
1684                                     bool compute_ov, bool compute_rc0)
1685 {
1686     TCGv t0 = ret;
1687 
1688     if (compute_ca || compute_ov) {
1689         t0 = tcg_temp_new();
1690     }
1691 
1692     if (compute_ca) {
1693         if (NARROW_MODE(ctx)) {
1694             /*
1695              * Caution: a non-obvious corner case of the spec is that
1696              * we must produce the *entire* 64-bit addition, but
1697              * produce the carry into bit 32.
1698              */
1699             TCGv t1 = tcg_temp_new();
1700             tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
1701             tcg_gen_add_tl(t0, arg1, arg2);
1702             if (add_ca) {
1703                 tcg_gen_add_tl(t0, t0, ca);
1704             }
1705             tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
1706             tcg_gen_extract_tl(ca, ca, 32, 1);
1707             if (is_isa300(ctx)) {
1708                 tcg_gen_mov_tl(ca32, ca);
1709             }
1710         } else {
1711             TCGv zero = tcg_constant_tl(0);
1712             if (add_ca) {
1713                 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1714                 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1715             } else {
1716                 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1717             }
1718             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1719         }
1720     } else {
1721         tcg_gen_add_tl(t0, arg1, arg2);
1722         if (add_ca) {
1723             tcg_gen_add_tl(t0, t0, ca);
1724         }
1725     }
1726 
1727     if (compute_ov) {
1728         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1729     }
1730     if (unlikely(compute_rc0)) {
1731         gen_set_Rc0(ctx, t0);
1732     }
1733 
1734     if (t0 != ret) {
1735         tcg_gen_mov_tl(ret, t0);
1736     }
1737 }
1738 /* Add functions with two operands */
1739 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
1740 static void glue(gen_, name)(DisasContext *ctx)                               \
1741 {                                                                             \
1742     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1743                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1744                      ca, glue(ca, 32),                                        \
1745                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1746 }
1747 /* Add functions with one operand and one immediate */
1748 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
1749                                 add_ca, compute_ca, compute_ov)               \
1750 static void glue(gen_, name)(DisasContext *ctx)                               \
1751 {                                                                             \
1752     TCGv t0 = tcg_constant_tl(const_val);                                     \
1753     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1754                      cpu_gpr[rA(ctx->opcode)], t0,                            \
1755                      ca, glue(ca, 32),                                        \
1756                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1757 }
1758 
1759 /* add  add.  addo  addo. */
1760 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1761 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1762 /* addc  addc.  addco  addco. */
1763 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1764 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1765 /* adde  adde.  addeo  addeo. */
1766 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1767 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1768 /* addme  addme.  addmeo  addmeo.  */
1769 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1770 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1771 /* addex */
1772 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1773 /* addze  addze.  addzeo  addzeo.*/
1774 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1775 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1776 /* addic  addic.*/
1777 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1778 {
1779     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
1780     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1781                      c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1782 }
1783 
1784 static void gen_addic(DisasContext *ctx)
1785 {
1786     gen_op_addic(ctx, 0);
1787 }
1788 
1789 static void gen_addic_(DisasContext *ctx)
1790 {
1791     gen_op_addic(ctx, 1);
1792 }
1793 
1794 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1795                                      TCGv arg2, int sign, int compute_ov)
1796 {
1797     TCGv_i32 t0 = tcg_temp_new_i32();
1798     TCGv_i32 t1 = tcg_temp_new_i32();
1799     TCGv_i32 t2 = tcg_temp_new_i32();
1800     TCGv_i32 t3 = tcg_temp_new_i32();
1801 
1802     tcg_gen_trunc_tl_i32(t0, arg1);
1803     tcg_gen_trunc_tl_i32(t1, arg2);
1804     if (sign) {
1805         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1806         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1807         tcg_gen_and_i32(t2, t2, t3);
1808         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1809         tcg_gen_or_i32(t2, t2, t3);
1810         tcg_gen_movi_i32(t3, 0);
1811         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1812         tcg_gen_div_i32(t3, t0, t1);
1813         tcg_gen_extu_i32_tl(ret, t3);
1814     } else {
1815         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1816         tcg_gen_movi_i32(t3, 0);
1817         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1818         tcg_gen_divu_i32(t3, t0, t1);
1819         tcg_gen_extu_i32_tl(ret, t3);
1820     }
1821     if (compute_ov) {
1822         tcg_gen_extu_i32_tl(cpu_ov, t2);
1823         if (is_isa300(ctx)) {
1824             tcg_gen_extu_i32_tl(cpu_ov32, t2);
1825         }
1826         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1827     }
1828 
1829     if (unlikely(Rc(ctx->opcode) != 0)) {
1830         gen_set_Rc0(ctx, ret);
1831     }
1832 }
1833 /* Div functions */
1834 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1835 static void glue(gen_, name)(DisasContext *ctx)                               \
1836 {                                                                             \
1837     gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1838                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1839                      sign, compute_ov);                                       \
1840 }
1841 /* divwu  divwu.  divwuo  divwuo.   */
1842 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1843 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1844 /* divw  divw.  divwo  divwo.   */
1845 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1846 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1847 
1848 /* div[wd]eu[o][.] */
1849 #define GEN_DIVE(name, hlpr, compute_ov)                                      \
1850 static void gen_##name(DisasContext *ctx)                                     \
1851 {                                                                             \
1852     TCGv_i32 t0 = tcg_constant_i32(compute_ov);                               \
1853     gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], tcg_env,                      \
1854                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1855     if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1856         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1857     }                                                                         \
1858 }
1859 
1860 GEN_DIVE(divweu, divweu, 0);
1861 GEN_DIVE(divweuo, divweu, 1);
1862 GEN_DIVE(divwe, divwe, 0);
1863 GEN_DIVE(divweo, divwe, 1);
1864 
1865 #if defined(TARGET_PPC64)
1866 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1867                                      TCGv arg2, int sign, int compute_ov)
1868 {
1869     TCGv_i64 t0 = tcg_temp_new_i64();
1870     TCGv_i64 t1 = tcg_temp_new_i64();
1871     TCGv_i64 t2 = tcg_temp_new_i64();
1872     TCGv_i64 t3 = tcg_temp_new_i64();
1873 
1874     tcg_gen_mov_i64(t0, arg1);
1875     tcg_gen_mov_i64(t1, arg2);
1876     if (sign) {
1877         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1878         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1879         tcg_gen_and_i64(t2, t2, t3);
1880         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1881         tcg_gen_or_i64(t2, t2, t3);
1882         tcg_gen_movi_i64(t3, 0);
1883         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1884         tcg_gen_div_i64(ret, t0, t1);
1885     } else {
1886         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1887         tcg_gen_movi_i64(t3, 0);
1888         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1889         tcg_gen_divu_i64(ret, t0, t1);
1890     }
1891     if (compute_ov) {
1892         tcg_gen_mov_tl(cpu_ov, t2);
1893         if (is_isa300(ctx)) {
1894             tcg_gen_mov_tl(cpu_ov32, t2);
1895         }
1896         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1897     }
1898 
1899     if (unlikely(Rc(ctx->opcode) != 0)) {
1900         gen_set_Rc0(ctx, ret);
1901     }
1902 }
1903 
1904 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1905 static void glue(gen_, name)(DisasContext *ctx)                               \
1906 {                                                                             \
1907     gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1908                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1909                       sign, compute_ov);                                      \
1910 }
1911 /* divdu  divdu.  divduo  divduo.   */
1912 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1913 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1914 /* divd  divd.  divdo  divdo.   */
1915 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1916 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1917 
1918 GEN_DIVE(divdeu, divdeu, 0);
1919 GEN_DIVE(divdeuo, divdeu, 1);
1920 GEN_DIVE(divde, divde, 0);
1921 GEN_DIVE(divdeo, divde, 1);
1922 #endif
1923 
1924 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1925                                      TCGv arg2, int sign)
1926 {
1927     TCGv_i32 t0 = tcg_temp_new_i32();
1928     TCGv_i32 t1 = tcg_temp_new_i32();
1929 
1930     tcg_gen_trunc_tl_i32(t0, arg1);
1931     tcg_gen_trunc_tl_i32(t1, arg2);
1932     if (sign) {
1933         TCGv_i32 t2 = tcg_temp_new_i32();
1934         TCGv_i32 t3 = tcg_temp_new_i32();
1935         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1936         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1937         tcg_gen_and_i32(t2, t2, t3);
1938         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1939         tcg_gen_or_i32(t2, t2, t3);
1940         tcg_gen_movi_i32(t3, 0);
1941         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1942         tcg_gen_rem_i32(t3, t0, t1);
1943         tcg_gen_ext_i32_tl(ret, t3);
1944     } else {
1945         TCGv_i32 t2 = tcg_constant_i32(1);
1946         TCGv_i32 t3 = tcg_constant_i32(0);
1947         tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1948         tcg_gen_remu_i32(t0, t0, t1);
1949         tcg_gen_extu_i32_tl(ret, t0);
1950     }
1951 }
1952 
1953 #define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1954 static void glue(gen_, name)(DisasContext *ctx)                             \
1955 {                                                                           \
1956     gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1957                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1958                       sign);                                                \
1959 }
1960 
1961 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1962 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1963 
1964 #if defined(TARGET_PPC64)
1965 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1966                                      TCGv arg2, int sign)
1967 {
1968     TCGv_i64 t0 = tcg_temp_new_i64();
1969     TCGv_i64 t1 = tcg_temp_new_i64();
1970 
1971     tcg_gen_mov_i64(t0, arg1);
1972     tcg_gen_mov_i64(t1, arg2);
1973     if (sign) {
1974         TCGv_i64 t2 = tcg_temp_new_i64();
1975         TCGv_i64 t3 = tcg_temp_new_i64();
1976         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1977         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1978         tcg_gen_and_i64(t2, t2, t3);
1979         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1980         tcg_gen_or_i64(t2, t2, t3);
1981         tcg_gen_movi_i64(t3, 0);
1982         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1983         tcg_gen_rem_i64(ret, t0, t1);
1984     } else {
1985         TCGv_i64 t2 = tcg_constant_i64(1);
1986         TCGv_i64 t3 = tcg_constant_i64(0);
1987         tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1988         tcg_gen_remu_i64(ret, t0, t1);
1989     }
1990 }
1991 
1992 #define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1993 static void glue(gen_, name)(DisasContext *ctx)                           \
1994 {                                                                         \
1995   gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1996                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1997                     sign);                                                \
1998 }
1999 
2000 GEN_INT_ARITH_MODD(modud, 0x08, 0);
2001 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
2002 #endif
2003 
2004 /* mulhw  mulhw. */
2005 static void gen_mulhw(DisasContext *ctx)
2006 {
2007     TCGv_i32 t0 = tcg_temp_new_i32();
2008     TCGv_i32 t1 = tcg_temp_new_i32();
2009 
2010     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
2011     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
2012     tcg_gen_muls2_i32(t0, t1, t0, t1);
2013     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
2014     if (unlikely(Rc(ctx->opcode) != 0)) {
2015         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2016     }
2017 }
2018 
2019 /* mulhwu  mulhwu.  */
2020 static void gen_mulhwu(DisasContext *ctx)
2021 {
2022     TCGv_i32 t0 = tcg_temp_new_i32();
2023     TCGv_i32 t1 = tcg_temp_new_i32();
2024 
2025     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
2026     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
2027     tcg_gen_mulu2_i32(t0, t1, t0, t1);
2028     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
2029     if (unlikely(Rc(ctx->opcode) != 0)) {
2030         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2031     }
2032 }
2033 
2034 /* mullw  mullw. */
2035 static void gen_mullw(DisasContext *ctx)
2036 {
2037 #if defined(TARGET_PPC64)
2038     TCGv_i64 t0, t1;
2039     t0 = tcg_temp_new_i64();
2040     t1 = tcg_temp_new_i64();
2041     tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
2042     tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
2043     tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2044 #else
2045     tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2046                     cpu_gpr[rB(ctx->opcode)]);
2047 #endif
2048     if (unlikely(Rc(ctx->opcode) != 0)) {
2049         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2050     }
2051 }
2052 
2053 /* mullwo  mullwo. */
2054 static void gen_mullwo(DisasContext *ctx)
2055 {
2056     TCGv_i32 t0 = tcg_temp_new_i32();
2057     TCGv_i32 t1 = tcg_temp_new_i32();
2058 
2059     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
2060     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
2061     tcg_gen_muls2_i32(t0, t1, t0, t1);
2062 #if defined(TARGET_PPC64)
2063     tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2064 #else
2065     tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
2066 #endif
2067 
2068     tcg_gen_sari_i32(t0, t0, 31);
2069     tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
2070     tcg_gen_extu_i32_tl(cpu_ov, t0);
2071     if (is_isa300(ctx)) {
2072         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2073     }
2074     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2075 
2076     if (unlikely(Rc(ctx->opcode) != 0)) {
2077         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2078     }
2079 }
2080 
2081 /* mulli */
2082 static void gen_mulli(DisasContext *ctx)
2083 {
2084     tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2085                     SIMM(ctx->opcode));
2086 }
2087 
2088 #if defined(TARGET_PPC64)
2089 /* mulhd  mulhd. */
2090 static void gen_mulhd(DisasContext *ctx)
2091 {
2092     TCGv lo = tcg_temp_new();
2093     tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2094                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2095     if (unlikely(Rc(ctx->opcode) != 0)) {
2096         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2097     }
2098 }
2099 
2100 /* mulhdu  mulhdu. */
2101 static void gen_mulhdu(DisasContext *ctx)
2102 {
2103     TCGv lo = tcg_temp_new();
2104     tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2105                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2106     if (unlikely(Rc(ctx->opcode) != 0)) {
2107         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2108     }
2109 }
2110 
2111 /* mulld  mulld. */
2112 static void gen_mulld(DisasContext *ctx)
2113 {
2114     tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2115                    cpu_gpr[rB(ctx->opcode)]);
2116     if (unlikely(Rc(ctx->opcode) != 0)) {
2117         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2118     }
2119 }
2120 
2121 /* mulldo  mulldo. */
2122 static void gen_mulldo(DisasContext *ctx)
2123 {
2124     TCGv_i64 t0 = tcg_temp_new_i64();
2125     TCGv_i64 t1 = tcg_temp_new_i64();
2126 
2127     tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2128                       cpu_gpr[rB(ctx->opcode)]);
2129     tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2130 
2131     tcg_gen_sari_i64(t0, t0, 63);
2132     tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2133     if (is_isa300(ctx)) {
2134         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2135     }
2136     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2137 
2138     if (unlikely(Rc(ctx->opcode) != 0)) {
2139         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2140     }
2141 }
2142 #endif
2143 
2144 /* Common subf function */
2145 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2146                                      TCGv arg2, bool add_ca, bool compute_ca,
2147                                      bool compute_ov, bool compute_rc0)
2148 {
2149     TCGv t0 = ret;
2150 
2151     if (compute_ca || compute_ov) {
2152         t0 = tcg_temp_new();
2153     }
2154 
2155     if (compute_ca) {
2156         /* dest = ~arg1 + arg2 [+ ca].  */
2157         if (NARROW_MODE(ctx)) {
2158             /*
2159              * Caution: a non-obvious corner case of the spec is that
2160              * we must produce the *entire* 64-bit addition, but
2161              * produce the carry into bit 32.
2162              */
2163             TCGv inv1 = tcg_temp_new();
2164             TCGv t1 = tcg_temp_new();
2165             tcg_gen_not_tl(inv1, arg1);
2166             if (add_ca) {
2167                 tcg_gen_add_tl(t0, arg2, cpu_ca);
2168             } else {
2169                 tcg_gen_addi_tl(t0, arg2, 1);
2170             }
2171             tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
2172             tcg_gen_add_tl(t0, t0, inv1);
2173             tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
2174             tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2175             if (is_isa300(ctx)) {
2176                 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2177             }
2178         } else if (add_ca) {
2179             TCGv zero, inv1 = tcg_temp_new();
2180             tcg_gen_not_tl(inv1, arg1);
2181             zero = tcg_constant_tl(0);
2182             tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2183             tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2184             gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2185         } else {
2186             tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2187             tcg_gen_sub_tl(t0, arg2, arg1);
2188             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2189         }
2190     } else if (add_ca) {
2191         /*
2192          * Since we're ignoring carry-out, we can simplify the
2193          * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2194          */
2195         tcg_gen_sub_tl(t0, arg2, arg1);
2196         tcg_gen_add_tl(t0, t0, cpu_ca);
2197         tcg_gen_subi_tl(t0, t0, 1);
2198     } else {
2199         tcg_gen_sub_tl(t0, arg2, arg1);
2200     }
2201 
2202     if (compute_ov) {
2203         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2204     }
2205     if (unlikely(compute_rc0)) {
2206         gen_set_Rc0(ctx, t0);
2207     }
2208 
2209     if (t0 != ret) {
2210         tcg_gen_mov_tl(ret, t0);
2211     }
2212 }
2213 /* Sub functions with Two operands functions */
2214 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
2215 static void glue(gen_, name)(DisasContext *ctx)                               \
2216 {                                                                             \
2217     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2218                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
2219                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2220 }
2221 /* Sub functions with one operand and one immediate */
2222 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
2223                                 add_ca, compute_ca, compute_ov)               \
2224 static void glue(gen_, name)(DisasContext *ctx)                               \
2225 {                                                                             \
2226     TCGv t0 = tcg_constant_tl(const_val);                                     \
2227     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2228                       cpu_gpr[rA(ctx->opcode)], t0,                           \
2229                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2230 }
2231 /* subf  subf.  subfo  subfo. */
2232 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2233 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2234 /* subfc  subfc.  subfco  subfco. */
2235 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2236 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2237 /* subfe  subfe.  subfeo  subfo. */
2238 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2239 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2240 /* subfme  subfme.  subfmeo  subfmeo.  */
2241 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2242 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2243 /* subfze  subfze.  subfzeo  subfzeo.*/
2244 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2245 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2246 
2247 /* subfic */
2248 static void gen_subfic(DisasContext *ctx)
2249 {
2250     TCGv c = tcg_constant_tl(SIMM(ctx->opcode));
2251     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2252                       c, 0, 1, 0, 0);
2253 }
2254 
2255 /* neg neg. nego nego. */
2256 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2257 {
2258     TCGv zero = tcg_constant_tl(0);
2259     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2260                       zero, 0, 0, compute_ov, Rc(ctx->opcode));
2261 }
2262 
2263 static void gen_neg(DisasContext *ctx)
2264 {
2265     tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2266     if (unlikely(Rc(ctx->opcode))) {
2267         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2268     }
2269 }
2270 
2271 static void gen_nego(DisasContext *ctx)
2272 {
2273     gen_op_arith_neg(ctx, 1);
2274 }
2275 
2276 /***                            Integer logical                            ***/
2277 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
2278 static void glue(gen_, name)(DisasContext *ctx)                               \
2279 {                                                                             \
2280     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
2281        cpu_gpr[rB(ctx->opcode)]);                                             \
2282     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2283         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2284 }
2285 
2286 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
2287 static void glue(gen_, name)(DisasContext *ctx)                               \
2288 {                                                                             \
2289     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
2290     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2291         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2292 }
2293 
2294 /* and & and. */
2295 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2296 /* andc & andc. */
2297 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2298 
2299 /* andi. */
2300 static void gen_andi_(DisasContext *ctx)
2301 {
2302     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2303                     UIMM(ctx->opcode));
2304     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2305 }
2306 
2307 /* andis. */
2308 static void gen_andis_(DisasContext *ctx)
2309 {
2310     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2311                     UIMM(ctx->opcode) << 16);
2312     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2313 }
2314 
2315 /* cntlzw */
2316 static void gen_cntlzw(DisasContext *ctx)
2317 {
2318     TCGv_i32 t = tcg_temp_new_i32();
2319 
2320     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2321     tcg_gen_clzi_i32(t, t, 32);
2322     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2323 
2324     if (unlikely(Rc(ctx->opcode) != 0)) {
2325         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2326     }
2327 }
2328 
2329 /* cnttzw */
2330 static void gen_cnttzw(DisasContext *ctx)
2331 {
2332     TCGv_i32 t = tcg_temp_new_i32();
2333 
2334     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2335     tcg_gen_ctzi_i32(t, t, 32);
2336     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2337 
2338     if (unlikely(Rc(ctx->opcode) != 0)) {
2339         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2340     }
2341 }
2342 
2343 /* eqv & eqv. */
2344 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2345 /* extsb & extsb. */
2346 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2347 /* extsh & extsh. */
2348 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2349 /* nand & nand. */
2350 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2351 /* nor & nor. */
2352 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2353 
2354 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2355 static void gen_pause(DisasContext *ctx)
2356 {
2357     TCGv_i32 t0 = tcg_constant_i32(0);
2358     tcg_gen_st_i32(t0, tcg_env,
2359                    -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2360 
2361     /* Stop translation, this gives other CPUs a chance to run */
2362     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2363 }
2364 #endif /* defined(TARGET_PPC64) */
2365 
2366 /* or & or. */
2367 static void gen_or(DisasContext *ctx)
2368 {
2369     int rs, ra, rb;
2370 
2371     rs = rS(ctx->opcode);
2372     ra = rA(ctx->opcode);
2373     rb = rB(ctx->opcode);
2374     /* Optimisation for mr. ri case */
2375     if (rs != ra || rs != rb) {
2376         if (rs != rb) {
2377             tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2378         } else {
2379             tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2380         }
2381         if (unlikely(Rc(ctx->opcode) != 0)) {
2382             gen_set_Rc0(ctx, cpu_gpr[ra]);
2383         }
2384     } else if (unlikely(Rc(ctx->opcode) != 0)) {
2385         gen_set_Rc0(ctx, cpu_gpr[rs]);
2386 #if defined(TARGET_PPC64)
2387     } else if (rs != 0) { /* 0 is nop */
2388         int prio = 0;
2389 
2390         switch (rs) {
2391         case 1:
2392             /* Set process priority to low */
2393             prio = 2;
2394             break;
2395         case 6:
2396             /* Set process priority to medium-low */
2397             prio = 3;
2398             break;
2399         case 2:
2400             /* Set process priority to normal */
2401             prio = 4;
2402             break;
2403 #if !defined(CONFIG_USER_ONLY)
2404         case 31:
2405             if (!ctx->pr) {
2406                 /* Set process priority to very low */
2407                 prio = 1;
2408             }
2409             break;
2410         case 5:
2411             if (!ctx->pr) {
2412                 /* Set process priority to medium-hight */
2413                 prio = 5;
2414             }
2415             break;
2416         case 3:
2417             if (!ctx->pr) {
2418                 /* Set process priority to high */
2419                 prio = 6;
2420             }
2421             break;
2422         case 7:
2423             if (ctx->hv && !ctx->pr) {
2424                 /* Set process priority to very high */
2425                 prio = 7;
2426             }
2427             break;
2428 #endif
2429         default:
2430             break;
2431         }
2432         if (prio) {
2433             TCGv t0 = tcg_temp_new();
2434             gen_load_spr(t0, SPR_PPR);
2435             tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2436             tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2437             gen_store_spr(SPR_PPR, t0);
2438         }
2439 #if !defined(CONFIG_USER_ONLY)
2440         /*
2441          * Pause out of TCG otherwise spin loops with smt_low eat too
2442          * much CPU and the kernel hangs.  This applies to all
2443          * encodings other than no-op, e.g., miso(rs=26), yield(27),
2444          * mdoio(29), mdoom(30), and all currently undefined.
2445          */
2446         gen_pause(ctx);
2447 #endif
2448 #endif
2449     }
2450 }
2451 /* orc & orc. */
2452 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2453 
2454 /* xor & xor. */
2455 static void gen_xor(DisasContext *ctx)
2456 {
2457     /* Optimisation for "set to zero" case */
2458     if (rS(ctx->opcode) != rB(ctx->opcode)) {
2459         tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2460                        cpu_gpr[rB(ctx->opcode)]);
2461     } else {
2462         tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2463     }
2464     if (unlikely(Rc(ctx->opcode) != 0)) {
2465         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2466     }
2467 }
2468 
2469 /* ori */
2470 static void gen_ori(DisasContext *ctx)
2471 {
2472     target_ulong uimm = UIMM(ctx->opcode);
2473 
2474     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2475         return;
2476     }
2477     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2478 }
2479 
2480 /* oris */
2481 static void gen_oris(DisasContext *ctx)
2482 {
2483     target_ulong uimm = UIMM(ctx->opcode);
2484 
2485     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2486         /* NOP */
2487         return;
2488     }
2489     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2490                    uimm << 16);
2491 }
2492 
2493 /* xori */
2494 static void gen_xori(DisasContext *ctx)
2495 {
2496     target_ulong uimm = UIMM(ctx->opcode);
2497 
2498     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2499         /* NOP */
2500         return;
2501     }
2502     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2503 }
2504 
2505 /* xoris */
2506 static void gen_xoris(DisasContext *ctx)
2507 {
2508     target_ulong uimm = UIMM(ctx->opcode);
2509 
2510     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2511         /* NOP */
2512         return;
2513     }
2514     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2515                     uimm << 16);
2516 }
2517 
2518 /* popcntb : PowerPC 2.03 specification */
2519 static void gen_popcntb(DisasContext *ctx)
2520 {
2521     gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2522 }
2523 
2524 static void gen_popcntw(DisasContext *ctx)
2525 {
2526 #if defined(TARGET_PPC64)
2527     gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2528 #else
2529     tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2530 #endif
2531 }
2532 
2533 #if defined(TARGET_PPC64)
2534 /* popcntd: PowerPC 2.06 specification */
2535 static void gen_popcntd(DisasContext *ctx)
2536 {
2537     tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2538 }
2539 #endif
2540 
2541 /* prtyw: PowerPC 2.05 specification */
2542 static void gen_prtyw(DisasContext *ctx)
2543 {
2544     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2545     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2546     TCGv t0 = tcg_temp_new();
2547     tcg_gen_shri_tl(t0, rs, 16);
2548     tcg_gen_xor_tl(ra, rs, t0);
2549     tcg_gen_shri_tl(t0, ra, 8);
2550     tcg_gen_xor_tl(ra, ra, t0);
2551     tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2552 }
2553 
2554 #if defined(TARGET_PPC64)
2555 /* prtyd: PowerPC 2.05 specification */
2556 static void gen_prtyd(DisasContext *ctx)
2557 {
2558     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2559     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2560     TCGv t0 = tcg_temp_new();
2561     tcg_gen_shri_tl(t0, rs, 32);
2562     tcg_gen_xor_tl(ra, rs, t0);
2563     tcg_gen_shri_tl(t0, ra, 16);
2564     tcg_gen_xor_tl(ra, ra, t0);
2565     tcg_gen_shri_tl(t0, ra, 8);
2566     tcg_gen_xor_tl(ra, ra, t0);
2567     tcg_gen_andi_tl(ra, ra, 1);
2568 }
2569 #endif
2570 
2571 #if defined(TARGET_PPC64)
2572 /* bpermd */
2573 static void gen_bpermd(DisasContext *ctx)
2574 {
2575     gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2576                       cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2577 }
2578 #endif
2579 
2580 #if defined(TARGET_PPC64)
2581 /* extsw & extsw. */
2582 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2583 
2584 /* cntlzd */
2585 static void gen_cntlzd(DisasContext *ctx)
2586 {
2587     tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2588     if (unlikely(Rc(ctx->opcode) != 0)) {
2589         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2590     }
2591 }
2592 
2593 /* cnttzd */
2594 static void gen_cnttzd(DisasContext *ctx)
2595 {
2596     tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2597     if (unlikely(Rc(ctx->opcode) != 0)) {
2598         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2599     }
2600 }
2601 
2602 /* darn */
2603 static void gen_darn(DisasContext *ctx)
2604 {
2605     int l = L(ctx->opcode);
2606 
2607     if (l > 2) {
2608         tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2609     } else {
2610         translator_io_start(&ctx->base);
2611         if (l == 0) {
2612             gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2613         } else {
2614             /* Return 64-bit random for both CRN and RRN */
2615             gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2616         }
2617     }
2618 }
2619 #endif
2620 
2621 /***                             Integer rotate                            ***/
2622 
2623 /* rlwimi & rlwimi. */
2624 static void gen_rlwimi(DisasContext *ctx)
2625 {
2626     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2627     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2628     uint32_t sh = SH(ctx->opcode);
2629     uint32_t mb = MB(ctx->opcode);
2630     uint32_t me = ME(ctx->opcode);
2631 
2632     if (sh == (31 - me) && mb <= me) {
2633         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2634     } else {
2635         target_ulong mask;
2636         bool mask_in_32b = true;
2637         TCGv t1;
2638 
2639 #if defined(TARGET_PPC64)
2640         mb += 32;
2641         me += 32;
2642 #endif
2643         mask = MASK(mb, me);
2644 
2645 #if defined(TARGET_PPC64)
2646         if (mask > 0xffffffffu) {
2647             mask_in_32b = false;
2648         }
2649 #endif
2650         t1 = tcg_temp_new();
2651         if (mask_in_32b) {
2652             TCGv_i32 t0 = tcg_temp_new_i32();
2653             tcg_gen_trunc_tl_i32(t0, t_rs);
2654             tcg_gen_rotli_i32(t0, t0, sh);
2655             tcg_gen_extu_i32_tl(t1, t0);
2656         } else {
2657 #if defined(TARGET_PPC64)
2658             tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2659             tcg_gen_rotli_i64(t1, t1, sh);
2660 #else
2661             g_assert_not_reached();
2662 #endif
2663         }
2664 
2665         tcg_gen_andi_tl(t1, t1, mask);
2666         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2667         tcg_gen_or_tl(t_ra, t_ra, t1);
2668     }
2669     if (unlikely(Rc(ctx->opcode) != 0)) {
2670         gen_set_Rc0(ctx, t_ra);
2671     }
2672 }
2673 
2674 /* rlwinm & rlwinm. */
2675 static void gen_rlwinm(DisasContext *ctx)
2676 {
2677     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2678     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2679     int sh = SH(ctx->opcode);
2680     int mb = MB(ctx->opcode);
2681     int me = ME(ctx->opcode);
2682     int len = me - mb + 1;
2683     int rsh = (32 - sh) & 31;
2684 
2685     if (sh != 0 && len > 0 && me == (31 - sh)) {
2686         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2687     } else if (me == 31 && rsh + len <= 32) {
2688         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2689     } else {
2690         target_ulong mask;
2691         bool mask_in_32b = true;
2692 #if defined(TARGET_PPC64)
2693         mb += 32;
2694         me += 32;
2695 #endif
2696         mask = MASK(mb, me);
2697 #if defined(TARGET_PPC64)
2698         if (mask > 0xffffffffu) {
2699             mask_in_32b = false;
2700         }
2701 #endif
2702         if (mask_in_32b) {
2703             if (sh == 0) {
2704                 tcg_gen_andi_tl(t_ra, t_rs, mask);
2705             } else {
2706                 TCGv_i32 t0 = tcg_temp_new_i32();
2707                 tcg_gen_trunc_tl_i32(t0, t_rs);
2708                 tcg_gen_rotli_i32(t0, t0, sh);
2709                 tcg_gen_andi_i32(t0, t0, mask);
2710                 tcg_gen_extu_i32_tl(t_ra, t0);
2711             }
2712         } else {
2713 #if defined(TARGET_PPC64)
2714             tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2715             tcg_gen_rotli_i64(t_ra, t_ra, sh);
2716             tcg_gen_andi_i64(t_ra, t_ra, mask);
2717 #else
2718             g_assert_not_reached();
2719 #endif
2720         }
2721     }
2722     if (unlikely(Rc(ctx->opcode) != 0)) {
2723         gen_set_Rc0(ctx, t_ra);
2724     }
2725 }
2726 
2727 /* rlwnm & rlwnm. */
2728 static void gen_rlwnm(DisasContext *ctx)
2729 {
2730     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2731     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2732     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2733     uint32_t mb = MB(ctx->opcode);
2734     uint32_t me = ME(ctx->opcode);
2735     target_ulong mask;
2736     bool mask_in_32b = true;
2737 
2738 #if defined(TARGET_PPC64)
2739     mb += 32;
2740     me += 32;
2741 #endif
2742     mask = MASK(mb, me);
2743 
2744 #if defined(TARGET_PPC64)
2745     if (mask > 0xffffffffu) {
2746         mask_in_32b = false;
2747     }
2748 #endif
2749     if (mask_in_32b) {
2750         TCGv_i32 t0 = tcg_temp_new_i32();
2751         TCGv_i32 t1 = tcg_temp_new_i32();
2752         tcg_gen_trunc_tl_i32(t0, t_rb);
2753         tcg_gen_trunc_tl_i32(t1, t_rs);
2754         tcg_gen_andi_i32(t0, t0, 0x1f);
2755         tcg_gen_rotl_i32(t1, t1, t0);
2756         tcg_gen_extu_i32_tl(t_ra, t1);
2757     } else {
2758 #if defined(TARGET_PPC64)
2759         TCGv_i64 t0 = tcg_temp_new_i64();
2760         tcg_gen_andi_i64(t0, t_rb, 0x1f);
2761         tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2762         tcg_gen_rotl_i64(t_ra, t_ra, t0);
2763 #else
2764         g_assert_not_reached();
2765 #endif
2766     }
2767 
2768     tcg_gen_andi_tl(t_ra, t_ra, mask);
2769 
2770     if (unlikely(Rc(ctx->opcode) != 0)) {
2771         gen_set_Rc0(ctx, t_ra);
2772     }
2773 }
2774 
2775 #if defined(TARGET_PPC64)
2776 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
2777 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2778 {                                                                             \
2779     gen_##name(ctx, 0);                                                       \
2780 }                                                                             \
2781                                                                               \
2782 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2783 {                                                                             \
2784     gen_##name(ctx, 1);                                                       \
2785 }
2786 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
2787 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2788 {                                                                             \
2789     gen_##name(ctx, 0, 0);                                                    \
2790 }                                                                             \
2791                                                                               \
2792 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2793 {                                                                             \
2794     gen_##name(ctx, 0, 1);                                                    \
2795 }                                                                             \
2796                                                                               \
2797 static void glue(gen_, name##2)(DisasContext *ctx)                            \
2798 {                                                                             \
2799     gen_##name(ctx, 1, 0);                                                    \
2800 }                                                                             \
2801                                                                               \
2802 static void glue(gen_, name##3)(DisasContext *ctx)                            \
2803 {                                                                             \
2804     gen_##name(ctx, 1, 1);                                                    \
2805 }
2806 
2807 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2808 {
2809     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2810     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2811     int len = me - mb + 1;
2812     int rsh = (64 - sh) & 63;
2813 
2814     if (sh != 0 && len > 0 && me == (63 - sh)) {
2815         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2816     } else if (me == 63 && rsh + len <= 64) {
2817         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2818     } else {
2819         tcg_gen_rotli_tl(t_ra, t_rs, sh);
2820         tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2821     }
2822     if (unlikely(Rc(ctx->opcode) != 0)) {
2823         gen_set_Rc0(ctx, t_ra);
2824     }
2825 }
2826 
2827 /* rldicl - rldicl. */
2828 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2829 {
2830     uint32_t sh, mb;
2831 
2832     sh = SH(ctx->opcode) | (shn << 5);
2833     mb = MB(ctx->opcode) | (mbn << 5);
2834     gen_rldinm(ctx, mb, 63, sh);
2835 }
2836 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2837 
2838 /* rldicr - rldicr. */
2839 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2840 {
2841     uint32_t sh, me;
2842 
2843     sh = SH(ctx->opcode) | (shn << 5);
2844     me = MB(ctx->opcode) | (men << 5);
2845     gen_rldinm(ctx, 0, me, sh);
2846 }
2847 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2848 
2849 /* rldic - rldic. */
2850 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2851 {
2852     uint32_t sh, mb;
2853 
2854     sh = SH(ctx->opcode) | (shn << 5);
2855     mb = MB(ctx->opcode) | (mbn << 5);
2856     gen_rldinm(ctx, mb, 63 - sh, sh);
2857 }
2858 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2859 
2860 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2861 {
2862     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2863     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2864     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2865     TCGv t0;
2866 
2867     t0 = tcg_temp_new();
2868     tcg_gen_andi_tl(t0, t_rb, 0x3f);
2869     tcg_gen_rotl_tl(t_ra, t_rs, t0);
2870 
2871     tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2872     if (unlikely(Rc(ctx->opcode) != 0)) {
2873         gen_set_Rc0(ctx, t_ra);
2874     }
2875 }
2876 
2877 /* rldcl - rldcl. */
2878 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2879 {
2880     uint32_t mb;
2881 
2882     mb = MB(ctx->opcode) | (mbn << 5);
2883     gen_rldnm(ctx, mb, 63);
2884 }
2885 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2886 
2887 /* rldcr - rldcr. */
2888 static inline void gen_rldcr(DisasContext *ctx, int men)
2889 {
2890     uint32_t me;
2891 
2892     me = MB(ctx->opcode) | (men << 5);
2893     gen_rldnm(ctx, 0, me);
2894 }
2895 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2896 
2897 /* rldimi - rldimi. */
2898 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2899 {
2900     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2901     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2902     uint32_t sh = SH(ctx->opcode) | (shn << 5);
2903     uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2904     uint32_t me = 63 - sh;
2905 
2906     if (mb <= me) {
2907         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2908     } else {
2909         target_ulong mask = MASK(mb, me);
2910         TCGv t1 = tcg_temp_new();
2911 
2912         tcg_gen_rotli_tl(t1, t_rs, sh);
2913         tcg_gen_andi_tl(t1, t1, mask);
2914         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2915         tcg_gen_or_tl(t_ra, t_ra, t1);
2916     }
2917     if (unlikely(Rc(ctx->opcode) != 0)) {
2918         gen_set_Rc0(ctx, t_ra);
2919     }
2920 }
2921 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2922 #endif
2923 
2924 /***                             Integer shift                             ***/
2925 
2926 /* slw & slw. */
2927 static void gen_slw(DisasContext *ctx)
2928 {
2929     TCGv t0, t1;
2930 
2931     t0 = tcg_temp_new();
2932     /* AND rS with a mask that is 0 when rB >= 0x20 */
2933 #if defined(TARGET_PPC64)
2934     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2935     tcg_gen_sari_tl(t0, t0, 0x3f);
2936 #else
2937     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2938     tcg_gen_sari_tl(t0, t0, 0x1f);
2939 #endif
2940     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2941     t1 = tcg_temp_new();
2942     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2943     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2944     tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2945     if (unlikely(Rc(ctx->opcode) != 0)) {
2946         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2947     }
2948 }
2949 
2950 /* sraw & sraw. */
2951 static void gen_sraw(DisasContext *ctx)
2952 {
2953     gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], tcg_env,
2954                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2955     if (unlikely(Rc(ctx->opcode) != 0)) {
2956         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2957     }
2958 }
2959 
2960 /* srawi & srawi. */
2961 static void gen_srawi(DisasContext *ctx)
2962 {
2963     int sh = SH(ctx->opcode);
2964     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2965     TCGv src = cpu_gpr[rS(ctx->opcode)];
2966     if (sh == 0) {
2967         tcg_gen_ext32s_tl(dst, src);
2968         tcg_gen_movi_tl(cpu_ca, 0);
2969         if (is_isa300(ctx)) {
2970             tcg_gen_movi_tl(cpu_ca32, 0);
2971         }
2972     } else {
2973         TCGv t0;
2974         tcg_gen_ext32s_tl(dst, src);
2975         tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2976         t0 = tcg_temp_new();
2977         tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2978         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2979         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2980         if (is_isa300(ctx)) {
2981             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2982         }
2983         tcg_gen_sari_tl(dst, dst, sh);
2984     }
2985     if (unlikely(Rc(ctx->opcode) != 0)) {
2986         gen_set_Rc0(ctx, dst);
2987     }
2988 }
2989 
2990 /* srw & srw. */
2991 static void gen_srw(DisasContext *ctx)
2992 {
2993     TCGv t0, t1;
2994 
2995     t0 = tcg_temp_new();
2996     /* AND rS with a mask that is 0 when rB >= 0x20 */
2997 #if defined(TARGET_PPC64)
2998     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2999     tcg_gen_sari_tl(t0, t0, 0x3f);
3000 #else
3001     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
3002     tcg_gen_sari_tl(t0, t0, 0x1f);
3003 #endif
3004     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3005     tcg_gen_ext32u_tl(t0, t0);
3006     t1 = tcg_temp_new();
3007     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
3008     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3009     if (unlikely(Rc(ctx->opcode) != 0)) {
3010         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3011     }
3012 }
3013 
3014 #if defined(TARGET_PPC64)
3015 /* sld & sld. */
3016 static void gen_sld(DisasContext *ctx)
3017 {
3018     TCGv t0, t1;
3019 
3020     t0 = tcg_temp_new();
3021     /* AND rS with a mask that is 0 when rB >= 0x40 */
3022     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3023     tcg_gen_sari_tl(t0, t0, 0x3f);
3024     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3025     t1 = tcg_temp_new();
3026     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3027     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3028     if (unlikely(Rc(ctx->opcode) != 0)) {
3029         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3030     }
3031 }
3032 
3033 /* srad & srad. */
3034 static void gen_srad(DisasContext *ctx)
3035 {
3036     gen_helper_srad(cpu_gpr[rA(ctx->opcode)], tcg_env,
3037                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3038     if (unlikely(Rc(ctx->opcode) != 0)) {
3039         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3040     }
3041 }
3042 /* sradi & sradi. */
3043 static inline void gen_sradi(DisasContext *ctx, int n)
3044 {
3045     int sh = SH(ctx->opcode) + (n << 5);
3046     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3047     TCGv src = cpu_gpr[rS(ctx->opcode)];
3048     if (sh == 0) {
3049         tcg_gen_mov_tl(dst, src);
3050         tcg_gen_movi_tl(cpu_ca, 0);
3051         if (is_isa300(ctx)) {
3052             tcg_gen_movi_tl(cpu_ca32, 0);
3053         }
3054     } else {
3055         TCGv t0;
3056         tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
3057         t0 = tcg_temp_new();
3058         tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
3059         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
3060         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
3061         if (is_isa300(ctx)) {
3062             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
3063         }
3064         tcg_gen_sari_tl(dst, src, sh);
3065     }
3066     if (unlikely(Rc(ctx->opcode) != 0)) {
3067         gen_set_Rc0(ctx, dst);
3068     }
3069 }
3070 
3071 static void gen_sradi0(DisasContext *ctx)
3072 {
3073     gen_sradi(ctx, 0);
3074 }
3075 
3076 static void gen_sradi1(DisasContext *ctx)
3077 {
3078     gen_sradi(ctx, 1);
3079 }
3080 
3081 /* extswsli & extswsli. */
3082 static inline void gen_extswsli(DisasContext *ctx, int n)
3083 {
3084     int sh = SH(ctx->opcode) + (n << 5);
3085     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3086     TCGv src = cpu_gpr[rS(ctx->opcode)];
3087 
3088     tcg_gen_ext32s_tl(dst, src);
3089     tcg_gen_shli_tl(dst, dst, sh);
3090     if (unlikely(Rc(ctx->opcode) != 0)) {
3091         gen_set_Rc0(ctx, dst);
3092     }
3093 }
3094 
3095 static void gen_extswsli0(DisasContext *ctx)
3096 {
3097     gen_extswsli(ctx, 0);
3098 }
3099 
3100 static void gen_extswsli1(DisasContext *ctx)
3101 {
3102     gen_extswsli(ctx, 1);
3103 }
3104 
3105 /* srd & srd. */
3106 static void gen_srd(DisasContext *ctx)
3107 {
3108     TCGv t0, t1;
3109 
3110     t0 = tcg_temp_new();
3111     /* AND rS with a mask that is 0 when rB >= 0x40 */
3112     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3113     tcg_gen_sari_tl(t0, t0, 0x3f);
3114     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3115     t1 = tcg_temp_new();
3116     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3117     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3118     if (unlikely(Rc(ctx->opcode) != 0)) {
3119         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3120     }
3121 }
3122 #endif
3123 
3124 /***                           Addressing modes                            ***/
3125 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3126 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3127                                       target_long maskl)
3128 {
3129     target_long simm = SIMM(ctx->opcode);
3130 
3131     simm &= ~maskl;
3132     if (rA(ctx->opcode) == 0) {
3133         if (NARROW_MODE(ctx)) {
3134             simm = (uint32_t)simm;
3135         }
3136         tcg_gen_movi_tl(EA, simm);
3137     } else if (likely(simm != 0)) {
3138         tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3139         if (NARROW_MODE(ctx)) {
3140             tcg_gen_ext32u_tl(EA, EA);
3141         }
3142     } else {
3143         if (NARROW_MODE(ctx)) {
3144             tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3145         } else {
3146             tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3147         }
3148     }
3149 }
3150 
3151 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3152 {
3153     if (rA(ctx->opcode) == 0) {
3154         if (NARROW_MODE(ctx)) {
3155             tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3156         } else {
3157             tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3158         }
3159     } else {
3160         tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3161         if (NARROW_MODE(ctx)) {
3162             tcg_gen_ext32u_tl(EA, EA);
3163         }
3164     }
3165 }
3166 
3167 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3168 {
3169     if (rA(ctx->opcode) == 0) {
3170         tcg_gen_movi_tl(EA, 0);
3171     } else if (NARROW_MODE(ctx)) {
3172         tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3173     } else {
3174         tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3175     }
3176 }
3177 
3178 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3179                                 target_long val)
3180 {
3181     tcg_gen_addi_tl(ret, arg1, val);
3182     if (NARROW_MODE(ctx)) {
3183         tcg_gen_ext32u_tl(ret, ret);
3184     }
3185 }
3186 
3187 static inline void gen_align_no_le(DisasContext *ctx)
3188 {
3189     gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3190                       (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3191 }
3192 
3193 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3194 {
3195     TCGv ea = tcg_temp_new();
3196     if (ra) {
3197         tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3198     } else {
3199         tcg_gen_mov_tl(ea, displ);
3200     }
3201     if (NARROW_MODE(ctx)) {
3202         tcg_gen_ext32u_tl(ea, ea);
3203     }
3204     return ea;
3205 }
3206 
3207 /***                             Integer load                              ***/
3208 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3209 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3210 
3211 #define GEN_QEMU_LOAD_TL(ldop, op)                                      \
3212 static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
3213                                   TCGv val,                             \
3214                                   TCGv addr)                            \
3215 {                                                                       \
3216     tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
3217 }
3218 
3219 GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
3220 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3221 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3222 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3223 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3224 
3225 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3226 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3227 
3228 #define GEN_QEMU_LOAD_64(ldop, op)                                  \
3229 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
3230                                              TCGv_i64 val,          \
3231                                              TCGv addr)             \
3232 {                                                                   \
3233     tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
3234 }
3235 
3236 GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
3237 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3238 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3239 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3240 GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_UQ))
3241 
3242 #if defined(TARGET_PPC64)
3243 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3244 #endif
3245 
3246 #define GEN_QEMU_STORE_TL(stop, op)                                     \
3247 static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
3248                                   TCGv val,                             \
3249                                   TCGv addr)                            \
3250 {                                                                       \
3251     tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
3252 }
3253 
3254 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3255 GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
3256 #endif
3257 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3258 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3259 
3260 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3261 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3262 
3263 #define GEN_QEMU_STORE_64(stop, op)                               \
3264 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
3265                                               TCGv_i64 val,       \
3266                                               TCGv addr)          \
3267 {                                                                 \
3268     tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
3269 }
3270 
3271 GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
3272 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3273 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3274 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3275 
3276 #if defined(TARGET_PPC64)
3277 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3278 #endif
3279 
3280 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
3281 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3282 {                                                                             \
3283     TCGv EA;                                                                  \
3284     chk(ctx);                                                                 \
3285     gen_set_access_type(ctx, ACCESS_INT);                                     \
3286     EA = tcg_temp_new();                                                      \
3287     gen_addr_reg_index(ctx, EA);                                              \
3288     gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
3289 }
3290 
3291 #define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
3292     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3293 
3294 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
3295     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3296 
3297 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
3298 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3299 {                                                                             \
3300     TCGv EA;                                                                  \
3301     CHK_SV(ctx);                                                              \
3302     gen_set_access_type(ctx, ACCESS_INT);                                     \
3303     EA = tcg_temp_new();                                                      \
3304     gen_addr_reg_index(ctx, EA);                                              \
3305     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3306 }
3307 
3308 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3309 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3310 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3311 #if defined(TARGET_PPC64)
3312 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3313 #endif
3314 
3315 #if defined(TARGET_PPC64)
3316 /* CI load/store variants */
3317 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3318 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3319 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3320 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3321 #endif
3322 
3323 /***                              Integer store                            ***/
3324 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
3325 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3326 {                                                                             \
3327     TCGv EA;                                                                  \
3328     chk(ctx);                                                                 \
3329     gen_set_access_type(ctx, ACCESS_INT);                                     \
3330     EA = tcg_temp_new();                                                      \
3331     gen_addr_reg_index(ctx, EA);                                              \
3332     gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
3333 }
3334 #define GEN_STX(name, stop, opc2, opc3, type)                                 \
3335     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3336 
3337 #define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
3338     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3339 
3340 #define GEN_STEPX(name, stop, opc2, opc3)                                     \
3341 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3342 {                                                                             \
3343     TCGv EA;                                                                  \
3344     CHK_SV(ctx);                                                              \
3345     gen_set_access_type(ctx, ACCESS_INT);                                     \
3346     EA = tcg_temp_new();                                                      \
3347     gen_addr_reg_index(ctx, EA);                                              \
3348     tcg_gen_qemu_st_tl(                                                       \
3349         cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
3350 }
3351 
3352 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3353 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3354 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3355 #if defined(TARGET_PPC64)
3356 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3357 #endif
3358 
3359 #if defined(TARGET_PPC64)
3360 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3361 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3362 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3363 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3364 #endif
3365 /***                Integer load and store with byte reverse               ***/
3366 
3367 /* lhbrx */
3368 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3369 
3370 /* lwbrx */
3371 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3372 
3373 #if defined(TARGET_PPC64)
3374 /* ldbrx */
3375 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3376 /* stdbrx */
3377 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3378 #endif  /* TARGET_PPC64 */
3379 
3380 /* sthbrx */
3381 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3382 /* stwbrx */
3383 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3384 
3385 /***                    Integer load and store multiple                    ***/
3386 
3387 /* lmw */
3388 static void gen_lmw(DisasContext *ctx)
3389 {
3390     TCGv t0;
3391     TCGv_i32 t1;
3392 
3393     if (ctx->le_mode) {
3394         gen_align_no_le(ctx);
3395         return;
3396     }
3397     gen_set_access_type(ctx, ACCESS_INT);
3398     t0 = tcg_temp_new();
3399     t1 = tcg_constant_i32(rD(ctx->opcode));
3400     gen_addr_imm_index(ctx, t0, 0);
3401     gen_helper_lmw(tcg_env, t0, t1);
3402 }
3403 
3404 /* stmw */
3405 static void gen_stmw(DisasContext *ctx)
3406 {
3407     TCGv t0;
3408     TCGv_i32 t1;
3409 
3410     if (ctx->le_mode) {
3411         gen_align_no_le(ctx);
3412         return;
3413     }
3414     gen_set_access_type(ctx, ACCESS_INT);
3415     t0 = tcg_temp_new();
3416     t1 = tcg_constant_i32(rS(ctx->opcode));
3417     gen_addr_imm_index(ctx, t0, 0);
3418     gen_helper_stmw(tcg_env, t0, t1);
3419 }
3420 
3421 /***                    Integer load and store strings                     ***/
3422 
3423 /* lswi */
3424 /*
3425  * PowerPC32 specification says we must generate an exception if rA is
3426  * in the range of registers to be loaded.  In an other hand, IBM says
3427  * this is valid, but rA won't be loaded.  For now, I'll follow the
3428  * spec...
3429  */
3430 static void gen_lswi(DisasContext *ctx)
3431 {
3432     TCGv t0;
3433     TCGv_i32 t1, t2;
3434     int nb = NB(ctx->opcode);
3435     int start = rD(ctx->opcode);
3436     int ra = rA(ctx->opcode);
3437     int nr;
3438 
3439     if (ctx->le_mode) {
3440         gen_align_no_le(ctx);
3441         return;
3442     }
3443     if (nb == 0) {
3444         nb = 32;
3445     }
3446     nr = DIV_ROUND_UP(nb, 4);
3447     if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3448         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3449         return;
3450     }
3451     gen_set_access_type(ctx, ACCESS_INT);
3452     t0 = tcg_temp_new();
3453     gen_addr_register(ctx, t0);
3454     t1 = tcg_constant_i32(nb);
3455     t2 = tcg_constant_i32(start);
3456     gen_helper_lsw(tcg_env, t0, t1, t2);
3457 }
3458 
3459 /* lswx */
3460 static void gen_lswx(DisasContext *ctx)
3461 {
3462     TCGv t0;
3463     TCGv_i32 t1, t2, t3;
3464 
3465     if (ctx->le_mode) {
3466         gen_align_no_le(ctx);
3467         return;
3468     }
3469     gen_set_access_type(ctx, ACCESS_INT);
3470     t0 = tcg_temp_new();
3471     gen_addr_reg_index(ctx, t0);
3472     t1 = tcg_constant_i32(rD(ctx->opcode));
3473     t2 = tcg_constant_i32(rA(ctx->opcode));
3474     t3 = tcg_constant_i32(rB(ctx->opcode));
3475     gen_helper_lswx(tcg_env, t0, t1, t2, t3);
3476 }
3477 
3478 /* stswi */
3479 static void gen_stswi(DisasContext *ctx)
3480 {
3481     TCGv t0;
3482     TCGv_i32 t1, t2;
3483     int nb = NB(ctx->opcode);
3484 
3485     if (ctx->le_mode) {
3486         gen_align_no_le(ctx);
3487         return;
3488     }
3489     gen_set_access_type(ctx, ACCESS_INT);
3490     t0 = tcg_temp_new();
3491     gen_addr_register(ctx, t0);
3492     if (nb == 0) {
3493         nb = 32;
3494     }
3495     t1 = tcg_constant_i32(nb);
3496     t2 = tcg_constant_i32(rS(ctx->opcode));
3497     gen_helper_stsw(tcg_env, t0, t1, t2);
3498 }
3499 
3500 /* stswx */
3501 static void gen_stswx(DisasContext *ctx)
3502 {
3503     TCGv t0;
3504     TCGv_i32 t1, t2;
3505 
3506     if (ctx->le_mode) {
3507         gen_align_no_le(ctx);
3508         return;
3509     }
3510     gen_set_access_type(ctx, ACCESS_INT);
3511     t0 = tcg_temp_new();
3512     gen_addr_reg_index(ctx, t0);
3513     t1 = tcg_temp_new_i32();
3514     tcg_gen_trunc_tl_i32(t1, cpu_xer);
3515     tcg_gen_andi_i32(t1, t1, 0x7F);
3516     t2 = tcg_constant_i32(rS(ctx->opcode));
3517     gen_helper_stsw(tcg_env, t0, t1, t2);
3518 }
3519 
3520 /***                        Memory synchronisation                         ***/
3521 /* eieio */
3522 static void gen_eieio(DisasContext *ctx)
3523 {
3524     TCGBar bar = TCG_MO_ALL;
3525 
3526     /*
3527      * eieio has complex semanitcs. It provides memory ordering between
3528      * operations in the set:
3529      * - loads from CI memory.
3530      * - stores to CI memory.
3531      * - stores to WT memory.
3532      *
3533      * It separately also orders memory for operations in the set:
3534      * - stores to cacheble memory.
3535      *
3536      * It also serializes instructions:
3537      * - dcbt and dcbst.
3538      *
3539      * It separately serializes:
3540      * - tlbie and tlbsync.
3541      *
3542      * And separately serializes:
3543      * - slbieg, slbiag, and slbsync.
3544      *
3545      * The end result is that CI memory ordering requires TCG_MO_ALL
3546      * and it is not possible to special-case more relaxed ordering for
3547      * cacheable accesses. TCG_BAR_SC is required to provide this
3548      * serialization.
3549      */
3550 
3551     /*
3552      * POWER9 has a eieio instruction variant using bit 6 as a hint to
3553      * tell the CPU it is a store-forwarding barrier.
3554      */
3555     if (ctx->opcode & 0x2000000) {
3556         /*
3557          * ISA says that "Reserved fields in instructions are ignored
3558          * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3559          * as this is not an instruction software should be using,
3560          * complain to the user.
3561          */
3562         if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3563             qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3564                           TARGET_FMT_lx "\n", ctx->cia);
3565         } else {
3566             bar = TCG_MO_ST_LD;
3567         }
3568     }
3569 
3570     tcg_gen_mb(bar | TCG_BAR_SC);
3571 }
3572 
3573 #if !defined(CONFIG_USER_ONLY)
3574 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3575 {
3576     TCGv_i32 t;
3577     TCGLabel *l;
3578 
3579     if (!ctx->lazy_tlb_flush) {
3580         return;
3581     }
3582     l = gen_new_label();
3583     t = tcg_temp_new_i32();
3584     tcg_gen_ld_i32(t, tcg_env, offsetof(CPUPPCState, tlb_need_flush));
3585     tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3586     if (global) {
3587         gen_helper_check_tlb_flush_global(tcg_env);
3588     } else {
3589         gen_helper_check_tlb_flush_local(tcg_env);
3590     }
3591     gen_set_label(l);
3592 }
3593 #else
3594 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3595 #endif
3596 
3597 /* isync */
3598 static void gen_isync(DisasContext *ctx)
3599 {
3600     /*
3601      * We need to check for a pending TLB flush. This can only happen in
3602      * kernel mode however so check MSR_PR
3603      */
3604     if (!ctx->pr) {
3605         gen_check_tlb_flush(ctx, false);
3606     }
3607     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3608     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3609 }
3610 
3611 #define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3612 
3613 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3614 {
3615     TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3616     TCGv t0 = tcg_temp_new();
3617 
3618     gen_set_access_type(ctx, ACCESS_RES);
3619     gen_addr_reg_index(ctx, t0);
3620     tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3621     tcg_gen_mov_tl(cpu_reserve, t0);
3622     tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop));
3623     tcg_gen_mov_tl(cpu_reserve_val, gpr);
3624 }
3625 
3626 #define LARX(name, memop)                  \
3627 static void gen_##name(DisasContext *ctx)  \
3628 {                                          \
3629     gen_load_locked(ctx, memop);           \
3630 }
3631 
3632 /* lwarx */
3633 LARX(lbarx, DEF_MEMOP(MO_UB))
3634 LARX(lharx, DEF_MEMOP(MO_UW))
3635 LARX(lwarx, DEF_MEMOP(MO_UL))
3636 
3637 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3638                                       TCGv EA, TCGCond cond, int addend)
3639 {
3640     TCGv t = tcg_temp_new();
3641     TCGv t2 = tcg_temp_new();
3642     TCGv u = tcg_temp_new();
3643 
3644     tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3645     tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3646     tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3647     tcg_gen_addi_tl(u, t, addend);
3648 
3649     /* E.g. for fetch and increment bounded... */
3650     /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3651     tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3652     tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3653 
3654     /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3655     tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3656     tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3657 }
3658 
3659 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3660 {
3661     uint32_t gpr_FC = FC(ctx->opcode);
3662     TCGv EA = tcg_temp_new();
3663     int rt = rD(ctx->opcode);
3664     bool need_serial;
3665     TCGv src, dst;
3666 
3667     gen_addr_register(ctx, EA);
3668     dst = cpu_gpr[rt];
3669     src = cpu_gpr[(rt + 1) & 31];
3670 
3671     need_serial = false;
3672     memop |= MO_ALIGN;
3673     switch (gpr_FC) {
3674     case 0: /* Fetch and add */
3675         tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3676         break;
3677     case 1: /* Fetch and xor */
3678         tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3679         break;
3680     case 2: /* Fetch and or */
3681         tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3682         break;
3683     case 3: /* Fetch and 'and' */
3684         tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3685         break;
3686     case 4:  /* Fetch and max unsigned */
3687         tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3688         break;
3689     case 5:  /* Fetch and max signed */
3690         tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3691         break;
3692     case 6:  /* Fetch and min unsigned */
3693         tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3694         break;
3695     case 7:  /* Fetch and min signed */
3696         tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3697         break;
3698     case 8: /* Swap */
3699         tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3700         break;
3701 
3702     case 16: /* Compare and swap not equal */
3703         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3704             need_serial = true;
3705         } else {
3706             TCGv t0 = tcg_temp_new();
3707             TCGv t1 = tcg_temp_new();
3708 
3709             tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3710             if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3711                 tcg_gen_mov_tl(t1, src);
3712             } else {
3713                 tcg_gen_ext32u_tl(t1, src);
3714             }
3715             tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3716                                cpu_gpr[(rt + 2) & 31], t0);
3717             tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3718             tcg_gen_mov_tl(dst, t0);
3719         }
3720         break;
3721 
3722     case 24: /* Fetch and increment bounded */
3723         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3724             need_serial = true;
3725         } else {
3726             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3727         }
3728         break;
3729     case 25: /* Fetch and increment equal */
3730         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3731             need_serial = true;
3732         } else {
3733             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3734         }
3735         break;
3736     case 28: /* Fetch and decrement bounded */
3737         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3738             need_serial = true;
3739         } else {
3740             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3741         }
3742         break;
3743 
3744     default:
3745         /* invoke data storage error handler */
3746         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3747     }
3748 
3749     if (need_serial) {
3750         /* Restart with exclusive lock.  */
3751         gen_helper_exit_atomic(tcg_env);
3752         ctx->base.is_jmp = DISAS_NORETURN;
3753     }
3754 }
3755 
3756 static void gen_lwat(DisasContext *ctx)
3757 {
3758     gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3759 }
3760 
3761 #ifdef TARGET_PPC64
3762 static void gen_ldat(DisasContext *ctx)
3763 {
3764     gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3765 }
3766 #endif
3767 
3768 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3769 {
3770     uint32_t gpr_FC = FC(ctx->opcode);
3771     TCGv EA = tcg_temp_new();
3772     TCGv src, discard;
3773 
3774     gen_addr_register(ctx, EA);
3775     src = cpu_gpr[rD(ctx->opcode)];
3776     discard = tcg_temp_new();
3777 
3778     memop |= MO_ALIGN;
3779     switch (gpr_FC) {
3780     case 0: /* add and Store */
3781         tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3782         break;
3783     case 1: /* xor and Store */
3784         tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3785         break;
3786     case 2: /* Or and Store */
3787         tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3788         break;
3789     case 3: /* 'and' and Store */
3790         tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3791         break;
3792     case 4:  /* Store max unsigned */
3793         tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3794         break;
3795     case 5:  /* Store max signed */
3796         tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3797         break;
3798     case 6:  /* Store min unsigned */
3799         tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3800         break;
3801     case 7:  /* Store min signed */
3802         tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3803         break;
3804     case 24: /* Store twin  */
3805         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3806             /* Restart with exclusive lock.  */
3807             gen_helper_exit_atomic(tcg_env);
3808             ctx->base.is_jmp = DISAS_NORETURN;
3809         } else {
3810             TCGv t = tcg_temp_new();
3811             TCGv t2 = tcg_temp_new();
3812             TCGv s = tcg_temp_new();
3813             TCGv s2 = tcg_temp_new();
3814             TCGv ea_plus_s = tcg_temp_new();
3815 
3816             tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3817             tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3818             tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3819             tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3820             tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3821             tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3822             tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3823         }
3824         break;
3825     default:
3826         /* invoke data storage error handler */
3827         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3828     }
3829 }
3830 
3831 static void gen_stwat(DisasContext *ctx)
3832 {
3833     gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3834 }
3835 
3836 #ifdef TARGET_PPC64
3837 static void gen_stdat(DisasContext *ctx)
3838 {
3839     gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3840 }
3841 #endif
3842 
3843 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3844 {
3845     TCGLabel *lfail;
3846     TCGv EA;
3847     TCGv cr0;
3848     TCGv t0;
3849     int rs = rS(ctx->opcode);
3850 
3851     lfail = gen_new_label();
3852     EA = tcg_temp_new();
3853     cr0 = tcg_temp_new();
3854     t0 = tcg_temp_new();
3855 
3856     tcg_gen_mov_tl(cr0, cpu_so);
3857     gen_set_access_type(ctx, ACCESS_RES);
3858     gen_addr_reg_index(ctx, EA);
3859     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3860     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, memop_size(memop), lfail);
3861 
3862     tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3863                               cpu_gpr[rs], ctx->mem_idx,
3864                               DEF_MEMOP(memop) | MO_ALIGN);
3865     tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3866     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3867     tcg_gen_or_tl(cr0, cr0, t0);
3868 
3869     gen_set_label(lfail);
3870     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3871     tcg_gen_movi_tl(cpu_reserve, -1);
3872 }
3873 
3874 #define STCX(name, memop)                  \
3875 static void gen_##name(DisasContext *ctx)  \
3876 {                                          \
3877     gen_conditional_store(ctx, memop);     \
3878 }
3879 
3880 STCX(stbcx_, DEF_MEMOP(MO_UB))
3881 STCX(sthcx_, DEF_MEMOP(MO_UW))
3882 STCX(stwcx_, DEF_MEMOP(MO_UL))
3883 
3884 #if defined(TARGET_PPC64)
3885 /* ldarx */
3886 LARX(ldarx, DEF_MEMOP(MO_UQ))
3887 /* stdcx. */
3888 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3889 
3890 /* lqarx */
3891 static void gen_lqarx(DisasContext *ctx)
3892 {
3893     int rd = rD(ctx->opcode);
3894     TCGv EA, hi, lo;
3895     TCGv_i128 t16;
3896 
3897     if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3898                  (rd == rB(ctx->opcode)))) {
3899         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3900         return;
3901     }
3902 
3903     gen_set_access_type(ctx, ACCESS_RES);
3904     EA = tcg_temp_new();
3905     gen_addr_reg_index(ctx, EA);
3906 
3907     /* Note that the low part is always in RD+1, even in LE mode.  */
3908     lo = cpu_gpr[rd + 1];
3909     hi = cpu_gpr[rd];
3910 
3911     t16 = tcg_temp_new_i128();
3912     tcg_gen_qemu_ld_i128(t16, EA, ctx->mem_idx, DEF_MEMOP(MO_128 | MO_ALIGN));
3913     tcg_gen_extr_i128_i64(lo, hi, t16);
3914 
3915     tcg_gen_mov_tl(cpu_reserve, EA);
3916     tcg_gen_movi_tl(cpu_reserve_length, 16);
3917     tcg_gen_st_tl(hi, tcg_env, offsetof(CPUPPCState, reserve_val));
3918     tcg_gen_st_tl(lo, tcg_env, offsetof(CPUPPCState, reserve_val2));
3919 }
3920 
3921 /* stqcx. */
3922 static void gen_stqcx_(DisasContext *ctx)
3923 {
3924     TCGLabel *lfail;
3925     TCGv EA, t0, t1;
3926     TCGv cr0;
3927     TCGv_i128 cmp, val;
3928     int rs = rS(ctx->opcode);
3929 
3930     if (unlikely(rs & 1)) {
3931         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3932         return;
3933     }
3934 
3935     lfail = gen_new_label();
3936     EA = tcg_temp_new();
3937     cr0 = tcg_temp_new();
3938 
3939     tcg_gen_mov_tl(cr0, cpu_so);
3940     gen_set_access_type(ctx, ACCESS_RES);
3941     gen_addr_reg_index(ctx, EA);
3942     tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lfail);
3943     tcg_gen_brcondi_tl(TCG_COND_NE, cpu_reserve_length, 16, lfail);
3944 
3945     cmp = tcg_temp_new_i128();
3946     val = tcg_temp_new_i128();
3947 
3948     tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
3949 
3950     /* Note that the low part is always in RS+1, even in LE mode.  */
3951     tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
3952 
3953     tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
3954                                 DEF_MEMOP(MO_128 | MO_ALIGN));
3955 
3956     t0 = tcg_temp_new();
3957     t1 = tcg_temp_new();
3958     tcg_gen_extr_i128_i64(t1, t0, val);
3959 
3960     tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
3961     tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
3962     tcg_gen_or_tl(t0, t0, t1);
3963 
3964     tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
3965     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3966     tcg_gen_or_tl(cr0, cr0, t0);
3967 
3968     gen_set_label(lfail);
3969     tcg_gen_trunc_tl_i32(cpu_crf[0], cr0);
3970     tcg_gen_movi_tl(cpu_reserve, -1);
3971 }
3972 #endif /* defined(TARGET_PPC64) */
3973 
3974 /* sync */
3975 static void gen_sync(DisasContext *ctx)
3976 {
3977     TCGBar bar = TCG_MO_ALL;
3978     uint32_t l = (ctx->opcode >> 21) & 3;
3979 
3980     if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
3981         bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
3982     }
3983 
3984     /*
3985      * We may need to check for a pending TLB flush.
3986      *
3987      * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
3988      *
3989      * Additionally, this can only happen in kernel mode however so
3990      * check MSR_PR as well.
3991      */
3992     if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
3993         gen_check_tlb_flush(ctx, true);
3994     }
3995 
3996     tcg_gen_mb(bar | TCG_BAR_SC);
3997 }
3998 
3999 /* wait */
4000 static void gen_wait(DisasContext *ctx)
4001 {
4002     uint32_t wc;
4003 
4004     if (ctx->insns_flags & PPC_WAIT) {
4005         /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
4006 
4007         if (ctx->insns_flags2 & PPC2_PM_ISA206) {
4008             /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
4009             wc = WC(ctx->opcode);
4010         } else {
4011             wc = 0;
4012         }
4013 
4014     } else if (ctx->insns_flags2 & PPC2_ISA300) {
4015         /* v3.0 defines a new 'wait' encoding. */
4016         wc = WC(ctx->opcode);
4017         if (ctx->insns_flags2 & PPC2_ISA310) {
4018             uint32_t pl = PL(ctx->opcode);
4019 
4020             /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
4021             if (wc == 3) {
4022                 gen_invalid(ctx);
4023                 return;
4024             }
4025 
4026             /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
4027             if (pl > 0 && wc != 2) {
4028                 gen_invalid(ctx);
4029                 return;
4030             }
4031 
4032         } else { /* ISA300 */
4033             /* WC 1-3 are reserved */
4034             if (wc > 0) {
4035                 gen_invalid(ctx);
4036                 return;
4037             }
4038         }
4039 
4040     } else {
4041         warn_report("wait instruction decoded with wrong ISA flags.");
4042         gen_invalid(ctx);
4043         return;
4044     }
4045 
4046     /*
4047      * wait without WC field or with WC=0 waits for an exception / interrupt
4048      * to occur.
4049      */
4050     if (wc == 0) {
4051         TCGv_i32 t0 = tcg_constant_i32(1);
4052         tcg_gen_st_i32(t0, tcg_env,
4053                        -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
4054         /* Stop translation, as the CPU is supposed to sleep from now */
4055         gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4056     }
4057 
4058     /*
4059      * Other wait types must not just wait until an exception occurs because
4060      * ignoring their other wake-up conditions could cause a hang.
4061      *
4062      * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
4063      * no-ops.
4064      *
4065      * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
4066      *
4067      * wc=2 waits for an implementation-specific condition, such could be
4068      * always true, so it can be implemented as a no-op.
4069      *
4070      * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
4071      *
4072      * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
4073      * Reservation-loss may have implementation-specific conditions, so it
4074      * can be implemented as a no-op.
4075      *
4076      * wc=2 waits for an exception or an amount of time to pass. This
4077      * amount is implementation-specific so it can be implemented as a
4078      * no-op.
4079      *
4080      * ISA v3.1 allows for execution to resume "in the rare case of
4081      * an implementation-dependent event", so in any case software must
4082      * not depend on the architected resumption condition to become
4083      * true, so no-op implementations should be architecturally correct
4084      * (if suboptimal).
4085      */
4086 }
4087 
4088 #if defined(TARGET_PPC64)
4089 static void gen_doze(DisasContext *ctx)
4090 {
4091 #if defined(CONFIG_USER_ONLY)
4092     GEN_PRIV(ctx);
4093 #else
4094     TCGv_i32 t;
4095 
4096     CHK_HV(ctx);
4097     translator_io_start(&ctx->base);
4098     t = tcg_constant_i32(PPC_PM_DOZE);
4099     gen_helper_pminsn(tcg_env, t);
4100     /* Stop translation, as the CPU is supposed to sleep from now */
4101     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4102 #endif /* defined(CONFIG_USER_ONLY) */
4103 }
4104 
4105 static void gen_nap(DisasContext *ctx)
4106 {
4107 #if defined(CONFIG_USER_ONLY)
4108     GEN_PRIV(ctx);
4109 #else
4110     TCGv_i32 t;
4111 
4112     CHK_HV(ctx);
4113     translator_io_start(&ctx->base);
4114     t = tcg_constant_i32(PPC_PM_NAP);
4115     gen_helper_pminsn(tcg_env, t);
4116     /* Stop translation, as the CPU is supposed to sleep from now */
4117     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4118 #endif /* defined(CONFIG_USER_ONLY) */
4119 }
4120 
4121 static void gen_stop(DisasContext *ctx)
4122 {
4123 #if defined(CONFIG_USER_ONLY)
4124     GEN_PRIV(ctx);
4125 #else
4126     TCGv_i32 t;
4127 
4128     CHK_HV(ctx);
4129     translator_io_start(&ctx->base);
4130     t = tcg_constant_i32(PPC_PM_STOP);
4131     gen_helper_pminsn(tcg_env, t);
4132     /* Stop translation, as the CPU is supposed to sleep from now */
4133     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4134 #endif /* defined(CONFIG_USER_ONLY) */
4135 }
4136 
4137 static void gen_sleep(DisasContext *ctx)
4138 {
4139 #if defined(CONFIG_USER_ONLY)
4140     GEN_PRIV(ctx);
4141 #else
4142     TCGv_i32 t;
4143 
4144     CHK_HV(ctx);
4145     translator_io_start(&ctx->base);
4146     t = tcg_constant_i32(PPC_PM_SLEEP);
4147     gen_helper_pminsn(tcg_env, t);
4148     /* Stop translation, as the CPU is supposed to sleep from now */
4149     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4150 #endif /* defined(CONFIG_USER_ONLY) */
4151 }
4152 
4153 static void gen_rvwinkle(DisasContext *ctx)
4154 {
4155 #if defined(CONFIG_USER_ONLY)
4156     GEN_PRIV(ctx);
4157 #else
4158     TCGv_i32 t;
4159 
4160     CHK_HV(ctx);
4161     translator_io_start(&ctx->base);
4162     t = tcg_constant_i32(PPC_PM_RVWINKLE);
4163     gen_helper_pminsn(tcg_env, t);
4164     /* Stop translation, as the CPU is supposed to sleep from now */
4165     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4166 #endif /* defined(CONFIG_USER_ONLY) */
4167 }
4168 #endif /* #if defined(TARGET_PPC64) */
4169 
4170 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4171 {
4172 #if defined(TARGET_PPC64)
4173     if (ctx->has_cfar) {
4174         tcg_gen_movi_tl(cpu_cfar, nip);
4175     }
4176 #endif
4177 }
4178 
4179 #if defined(TARGET_PPC64)
4180 static void pmu_count_insns(DisasContext *ctx)
4181 {
4182     /*
4183      * Do not bother calling the helper if the PMU isn't counting
4184      * instructions.
4185      */
4186     if (!ctx->pmu_insn_cnt) {
4187         return;
4188     }
4189 
4190  #if !defined(CONFIG_USER_ONLY)
4191     TCGLabel *l;
4192     TCGv t0;
4193 
4194     /*
4195      * The PMU insns_inc() helper stops the internal PMU timer if a
4196      * counter overflows happens. In that case, if the guest is
4197      * running with icount and we do not handle it beforehand,
4198      * the helper can trigger a 'bad icount read'.
4199      */
4200     translator_io_start(&ctx->base);
4201 
4202     /* Avoid helper calls when only PMC5-6 are enabled. */
4203     if (!ctx->pmc_other) {
4204         l = gen_new_label();
4205         t0 = tcg_temp_new();
4206 
4207         gen_load_spr(t0, SPR_POWER_PMC5);
4208         tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4209         gen_store_spr(SPR_POWER_PMC5, t0);
4210         /* Check for overflow, if it's enabled */
4211         if (ctx->mmcr0_pmcjce) {
4212             tcg_gen_brcondi_tl(TCG_COND_LT, t0, PMC_COUNTER_NEGATIVE_VAL, l);
4213             gen_helper_handle_pmc5_overflow(tcg_env);
4214         }
4215 
4216         gen_set_label(l);
4217     } else {
4218         gen_helper_insns_inc(tcg_env, tcg_constant_i32(ctx->base.num_insns));
4219     }
4220   #else
4221     /*
4222      * User mode can read (but not write) PMC5 and start/stop
4223      * the PMU via MMCR0_FC. In this case just increment
4224      * PMC5 with base.num_insns.
4225      */
4226     TCGv t0 = tcg_temp_new();
4227 
4228     gen_load_spr(t0, SPR_POWER_PMC5);
4229     tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4230     gen_store_spr(SPR_POWER_PMC5, t0);
4231   #endif /* #if !defined(CONFIG_USER_ONLY) */
4232 }
4233 #else
4234 static void pmu_count_insns(DisasContext *ctx)
4235 {
4236     return;
4237 }
4238 #endif /* #if defined(TARGET_PPC64) */
4239 
4240 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4241 {
4242     if (unlikely(ctx->singlestep_enabled)) {
4243         return false;
4244     }
4245     return translator_use_goto_tb(&ctx->base, dest);
4246 }
4247 
4248 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4249 {
4250     if (unlikely(ctx->singlestep_enabled)) {
4251         gen_debug_exception(ctx, false);
4252     } else {
4253         /*
4254          * tcg_gen_lookup_and_goto_ptr will exit the TB if
4255          * CF_NO_GOTO_PTR is set. Count insns now.
4256          */
4257         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4258             pmu_count_insns(ctx);
4259         }
4260 
4261         tcg_gen_lookup_and_goto_ptr();
4262     }
4263 }
4264 
4265 /***                                Branch                                 ***/
4266 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4267 {
4268     if (NARROW_MODE(ctx)) {
4269         dest = (uint32_t) dest;
4270     }
4271     if (use_goto_tb(ctx, dest)) {
4272         pmu_count_insns(ctx);
4273         tcg_gen_goto_tb(n);
4274         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4275         tcg_gen_exit_tb(ctx->base.tb, n);
4276     } else {
4277         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4278         gen_lookup_and_goto_ptr(ctx);
4279     }
4280 }
4281 
4282 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4283 {
4284     if (NARROW_MODE(ctx)) {
4285         nip = (uint32_t)nip;
4286     }
4287     tcg_gen_movi_tl(cpu_lr, nip);
4288 }
4289 
4290 /* b ba bl bla */
4291 static void gen_b(DisasContext *ctx)
4292 {
4293     target_ulong li, target;
4294 
4295     /* sign extend LI */
4296     li = LI(ctx->opcode);
4297     li = (li ^ 0x02000000) - 0x02000000;
4298     if (likely(AA(ctx->opcode) == 0)) {
4299         target = ctx->cia + li;
4300     } else {
4301         target = li;
4302     }
4303     if (LK(ctx->opcode)) {
4304         gen_setlr(ctx, ctx->base.pc_next);
4305     }
4306     gen_update_cfar(ctx, ctx->cia);
4307     gen_goto_tb(ctx, 0, target);
4308     ctx->base.is_jmp = DISAS_NORETURN;
4309 }
4310 
4311 #define BCOND_IM  0
4312 #define BCOND_LR  1
4313 #define BCOND_CTR 2
4314 #define BCOND_TAR 3
4315 
4316 static void gen_bcond(DisasContext *ctx, int type)
4317 {
4318     uint32_t bo = BO(ctx->opcode);
4319     TCGLabel *l1;
4320     TCGv target;
4321 
4322     if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4323         target = tcg_temp_new();
4324         if (type == BCOND_CTR) {
4325             tcg_gen_mov_tl(target, cpu_ctr);
4326         } else if (type == BCOND_TAR) {
4327             gen_load_spr(target, SPR_TAR);
4328         } else {
4329             tcg_gen_mov_tl(target, cpu_lr);
4330         }
4331     } else {
4332         target = NULL;
4333     }
4334     if (LK(ctx->opcode)) {
4335         gen_setlr(ctx, ctx->base.pc_next);
4336     }
4337     l1 = gen_new_label();
4338     if ((bo & 0x4) == 0) {
4339         /* Decrement and test CTR */
4340         TCGv temp = tcg_temp_new();
4341 
4342         if (type == BCOND_CTR) {
4343             /*
4344              * All ISAs up to v3 describe this form of bcctr as invalid but
4345              * some processors, ie. 64-bit server processors compliant with
4346              * arch 2.x, do implement a "test and decrement" logic instead,
4347              * as described in their respective UMs. This logic involves CTR
4348              * to act as both the branch target and a counter, which makes
4349              * it basically useless and thus never used in real code.
4350              *
4351              * This form was hence chosen to trigger extra micro-architectural
4352              * side-effect on real HW needed for the Spectre v2 workaround.
4353              * It is up to guests that implement such workaround, ie. linux, to
4354              * use this form in a way it just triggers the side-effect without
4355              * doing anything else harmful.
4356              */
4357             if (unlikely(!is_book3s_arch2x(ctx))) {
4358                 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4359                 return;
4360             }
4361 
4362             if (NARROW_MODE(ctx)) {
4363                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4364             } else {
4365                 tcg_gen_mov_tl(temp, cpu_ctr);
4366             }
4367             if (bo & 0x2) {
4368                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4369             } else {
4370                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4371             }
4372             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4373         } else {
4374             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4375             if (NARROW_MODE(ctx)) {
4376                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4377             } else {
4378                 tcg_gen_mov_tl(temp, cpu_ctr);
4379             }
4380             if (bo & 0x2) {
4381                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4382             } else {
4383                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4384             }
4385         }
4386     }
4387     if ((bo & 0x10) == 0) {
4388         /* Test CR */
4389         uint32_t bi = BI(ctx->opcode);
4390         uint32_t mask = 0x08 >> (bi & 0x03);
4391         TCGv_i32 temp = tcg_temp_new_i32();
4392 
4393         if (bo & 0x8) {
4394             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4395             tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4396         } else {
4397             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4398             tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4399         }
4400     }
4401     gen_update_cfar(ctx, ctx->cia);
4402     if (type == BCOND_IM) {
4403         target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4404         if (likely(AA(ctx->opcode) == 0)) {
4405             gen_goto_tb(ctx, 0, ctx->cia + li);
4406         } else {
4407             gen_goto_tb(ctx, 0, li);
4408         }
4409     } else {
4410         if (NARROW_MODE(ctx)) {
4411             tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4412         } else {
4413             tcg_gen_andi_tl(cpu_nip, target, ~3);
4414         }
4415         gen_lookup_and_goto_ptr(ctx);
4416     }
4417     if ((bo & 0x14) != 0x14) {
4418         /* fallthrough case */
4419         gen_set_label(l1);
4420         gen_goto_tb(ctx, 1, ctx->base.pc_next);
4421     }
4422     ctx->base.is_jmp = DISAS_NORETURN;
4423 }
4424 
4425 static void gen_bc(DisasContext *ctx)
4426 {
4427     gen_bcond(ctx, BCOND_IM);
4428 }
4429 
4430 static void gen_bcctr(DisasContext *ctx)
4431 {
4432     gen_bcond(ctx, BCOND_CTR);
4433 }
4434 
4435 static void gen_bclr(DisasContext *ctx)
4436 {
4437     gen_bcond(ctx, BCOND_LR);
4438 }
4439 
4440 static void gen_bctar(DisasContext *ctx)
4441 {
4442     gen_bcond(ctx, BCOND_TAR);
4443 }
4444 
4445 /***                      Condition register logical                       ***/
4446 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
4447 static void glue(gen_, name)(DisasContext *ctx)                               \
4448 {                                                                             \
4449     uint8_t bitmask;                                                          \
4450     int sh;                                                                   \
4451     TCGv_i32 t0, t1;                                                          \
4452     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
4453     t0 = tcg_temp_new_i32();                                                  \
4454     if (sh > 0)                                                               \
4455         tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
4456     else if (sh < 0)                                                          \
4457         tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
4458     else                                                                      \
4459         tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
4460     t1 = tcg_temp_new_i32();                                                  \
4461     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
4462     if (sh > 0)                                                               \
4463         tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
4464     else if (sh < 0)                                                          \
4465         tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
4466     else                                                                      \
4467         tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
4468     tcg_op(t0, t0, t1);                                                       \
4469     bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
4470     tcg_gen_andi_i32(t0, t0, bitmask);                                        \
4471     tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
4472     tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
4473 }
4474 
4475 /* crand */
4476 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4477 /* crandc */
4478 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4479 /* creqv */
4480 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4481 /* crnand */
4482 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4483 /* crnor */
4484 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4485 /* cror */
4486 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4487 /* crorc */
4488 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4489 /* crxor */
4490 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4491 
4492 /* mcrf */
4493 static void gen_mcrf(DisasContext *ctx)
4494 {
4495     tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4496 }
4497 
4498 /***                           System linkage                              ***/
4499 
4500 /* rfi (supervisor only) */
4501 static void gen_rfi(DisasContext *ctx)
4502 {
4503 #if defined(CONFIG_USER_ONLY)
4504     GEN_PRIV(ctx);
4505 #else
4506     /*
4507      * This instruction doesn't exist anymore on 64-bit server
4508      * processors compliant with arch 2.x
4509      */
4510     if (is_book3s_arch2x(ctx)) {
4511         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4512         return;
4513     }
4514     /* Restore CPU state */
4515     CHK_SV(ctx);
4516     translator_io_start(&ctx->base);
4517     gen_update_cfar(ctx, ctx->cia);
4518     gen_helper_rfi(tcg_env);
4519     ctx->base.is_jmp = DISAS_EXIT;
4520 #endif
4521 }
4522 
4523 #if defined(TARGET_PPC64)
4524 static void gen_rfid(DisasContext *ctx)
4525 {
4526 #if defined(CONFIG_USER_ONLY)
4527     GEN_PRIV(ctx);
4528 #else
4529     /* Restore CPU state */
4530     CHK_SV(ctx);
4531     translator_io_start(&ctx->base);
4532     gen_update_cfar(ctx, ctx->cia);
4533     gen_helper_rfid(tcg_env);
4534     ctx->base.is_jmp = DISAS_EXIT;
4535 #endif
4536 }
4537 
4538 #if !defined(CONFIG_USER_ONLY)
4539 static void gen_rfscv(DisasContext *ctx)
4540 {
4541 #if defined(CONFIG_USER_ONLY)
4542     GEN_PRIV(ctx);
4543 #else
4544     /* Restore CPU state */
4545     CHK_SV(ctx);
4546     translator_io_start(&ctx->base);
4547     gen_update_cfar(ctx, ctx->cia);
4548     gen_helper_rfscv(tcg_env);
4549     ctx->base.is_jmp = DISAS_EXIT;
4550 #endif
4551 }
4552 #endif
4553 
4554 static void gen_hrfid(DisasContext *ctx)
4555 {
4556 #if defined(CONFIG_USER_ONLY)
4557     GEN_PRIV(ctx);
4558 #else
4559     /* Restore CPU state */
4560     CHK_HV(ctx);
4561     translator_io_start(&ctx->base);
4562     gen_helper_hrfid(tcg_env);
4563     ctx->base.is_jmp = DISAS_EXIT;
4564 #endif
4565 }
4566 #endif
4567 
4568 /* sc */
4569 #if defined(CONFIG_USER_ONLY)
4570 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4571 #else
4572 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4573 #endif
4574 static void gen_sc(DisasContext *ctx)
4575 {
4576     uint32_t lev;
4577 
4578     /*
4579      * LEV is a 7-bit field, but the top 6 bits are treated as a reserved
4580      * field (i.e., ignored). ISA v3.1 changes that to 5 bits, but that is
4581      * for Ultravisor which TCG does not support, so just ignore the top 6.
4582      */
4583     lev = (ctx->opcode >> 5) & 0x1;
4584     gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4585 }
4586 
4587 #if defined(TARGET_PPC64)
4588 #if !defined(CONFIG_USER_ONLY)
4589 static void gen_scv(DisasContext *ctx)
4590 {
4591     uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4592 
4593     /* Set the PC back to the faulting instruction. */
4594     gen_update_nip(ctx, ctx->cia);
4595     gen_helper_scv(tcg_env, tcg_constant_i32(lev));
4596 
4597     ctx->base.is_jmp = DISAS_NORETURN;
4598 }
4599 #endif
4600 #endif
4601 
4602 /***                                Trap                                   ***/
4603 
4604 /* Check for unconditional traps (always or never) */
4605 static bool check_unconditional_trap(DisasContext *ctx)
4606 {
4607     /* Trap never */
4608     if (TO(ctx->opcode) == 0) {
4609         return true;
4610     }
4611     /* Trap always */
4612     if (TO(ctx->opcode) == 31) {
4613         gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4614         return true;
4615     }
4616     return false;
4617 }
4618 
4619 /* tw */
4620 static void gen_tw(DisasContext *ctx)
4621 {
4622     TCGv_i32 t0;
4623 
4624     if (check_unconditional_trap(ctx)) {
4625         return;
4626     }
4627     t0 = tcg_constant_i32(TO(ctx->opcode));
4628     gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4629                   t0);
4630 }
4631 
4632 /* twi */
4633 static void gen_twi(DisasContext *ctx)
4634 {
4635     TCGv t0;
4636     TCGv_i32 t1;
4637 
4638     if (check_unconditional_trap(ctx)) {
4639         return;
4640     }
4641     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4642     t1 = tcg_constant_i32(TO(ctx->opcode));
4643     gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4644 }
4645 
4646 #if defined(TARGET_PPC64)
4647 /* td */
4648 static void gen_td(DisasContext *ctx)
4649 {
4650     TCGv_i32 t0;
4651 
4652     if (check_unconditional_trap(ctx)) {
4653         return;
4654     }
4655     t0 = tcg_constant_i32(TO(ctx->opcode));
4656     gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4657                   t0);
4658 }
4659 
4660 /* tdi */
4661 static void gen_tdi(DisasContext *ctx)
4662 {
4663     TCGv t0;
4664     TCGv_i32 t1;
4665 
4666     if (check_unconditional_trap(ctx)) {
4667         return;
4668     }
4669     t0 = tcg_constant_tl(SIMM(ctx->opcode));
4670     t1 = tcg_constant_i32(TO(ctx->opcode));
4671     gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4672 }
4673 #endif
4674 
4675 /***                          Processor control                            ***/
4676 
4677 /* mcrxr */
4678 static void gen_mcrxr(DisasContext *ctx)
4679 {
4680     TCGv_i32 t0 = tcg_temp_new_i32();
4681     TCGv_i32 t1 = tcg_temp_new_i32();
4682     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4683 
4684     tcg_gen_trunc_tl_i32(t0, cpu_so);
4685     tcg_gen_trunc_tl_i32(t1, cpu_ov);
4686     tcg_gen_trunc_tl_i32(dst, cpu_ca);
4687     tcg_gen_shli_i32(t0, t0, 3);
4688     tcg_gen_shli_i32(t1, t1, 2);
4689     tcg_gen_shli_i32(dst, dst, 1);
4690     tcg_gen_or_i32(dst, dst, t0);
4691     tcg_gen_or_i32(dst, dst, t1);
4692 
4693     tcg_gen_movi_tl(cpu_so, 0);
4694     tcg_gen_movi_tl(cpu_ov, 0);
4695     tcg_gen_movi_tl(cpu_ca, 0);
4696 }
4697 
4698 #ifdef TARGET_PPC64
4699 /* mcrxrx */
4700 static void gen_mcrxrx(DisasContext *ctx)
4701 {
4702     TCGv t0 = tcg_temp_new();
4703     TCGv t1 = tcg_temp_new();
4704     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4705 
4706     /* copy OV and OV32 */
4707     tcg_gen_shli_tl(t0, cpu_ov, 1);
4708     tcg_gen_or_tl(t0, t0, cpu_ov32);
4709     tcg_gen_shli_tl(t0, t0, 2);
4710     /* copy CA and CA32 */
4711     tcg_gen_shli_tl(t1, cpu_ca, 1);
4712     tcg_gen_or_tl(t1, t1, cpu_ca32);
4713     tcg_gen_or_tl(t0, t0, t1);
4714     tcg_gen_trunc_tl_i32(dst, t0);
4715 }
4716 #endif
4717 
4718 /* mfcr mfocrf */
4719 static void gen_mfcr(DisasContext *ctx)
4720 {
4721     uint32_t crm, crn;
4722 
4723     if (likely(ctx->opcode & 0x00100000)) {
4724         crm = CRM(ctx->opcode);
4725         if (likely(crm && ((crm & (crm - 1)) == 0))) {
4726             crn = ctz32(crm);
4727             tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4728             tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4729                             cpu_gpr[rD(ctx->opcode)], crn * 4);
4730         }
4731     } else {
4732         TCGv_i32 t0 = tcg_temp_new_i32();
4733         tcg_gen_mov_i32(t0, cpu_crf[0]);
4734         tcg_gen_shli_i32(t0, t0, 4);
4735         tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4736         tcg_gen_shli_i32(t0, t0, 4);
4737         tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4738         tcg_gen_shli_i32(t0, t0, 4);
4739         tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4740         tcg_gen_shli_i32(t0, t0, 4);
4741         tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4742         tcg_gen_shli_i32(t0, t0, 4);
4743         tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4744         tcg_gen_shli_i32(t0, t0, 4);
4745         tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4746         tcg_gen_shli_i32(t0, t0, 4);
4747         tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4748         tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4749     }
4750 }
4751 
4752 /* mfmsr */
4753 static void gen_mfmsr(DisasContext *ctx)
4754 {
4755     CHK_SV(ctx);
4756     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4757 }
4758 
4759 /* mfspr */
4760 static inline void gen_op_mfspr(DisasContext *ctx)
4761 {
4762     void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4763     uint32_t sprn = SPR(ctx->opcode);
4764 
4765 #if defined(CONFIG_USER_ONLY)
4766     read_cb = ctx->spr_cb[sprn].uea_read;
4767 #else
4768     if (ctx->pr) {
4769         read_cb = ctx->spr_cb[sprn].uea_read;
4770     } else if (ctx->hv) {
4771         read_cb = ctx->spr_cb[sprn].hea_read;
4772     } else {
4773         read_cb = ctx->spr_cb[sprn].oea_read;
4774     }
4775 #endif
4776     if (likely(read_cb != NULL)) {
4777         if (likely(read_cb != SPR_NOACCESS)) {
4778             (*read_cb)(ctx, rD(ctx->opcode), sprn);
4779         } else {
4780             /* Privilege exception */
4781             /*
4782              * This is a hack to avoid warnings when running Linux:
4783              * this OS breaks the PowerPC virtualisation model,
4784              * allowing userland application to read the PVR
4785              */
4786             if (sprn != SPR_PVR) {
4787                 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4788                               "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4789                               ctx->cia);
4790             }
4791             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4792         }
4793     } else {
4794         /* ISA 2.07 defines these as no-ops */
4795         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4796             (sprn >= 808 && sprn <= 811)) {
4797             /* This is a nop */
4798             return;
4799         }
4800         /* Not defined */
4801         qemu_log_mask(LOG_GUEST_ERROR,
4802                       "Trying to read invalid spr %d (0x%03x) at "
4803                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4804 
4805         /*
4806          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4807          * generate a priv, a hv emu or a no-op
4808          */
4809         if (sprn & 0x10) {
4810             if (ctx->pr) {
4811                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4812             }
4813         } else {
4814             if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4815                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4816             }
4817         }
4818     }
4819 }
4820 
4821 static void gen_mfspr(DisasContext *ctx)
4822 {
4823     gen_op_mfspr(ctx);
4824 }
4825 
4826 /* mftb */
4827 static void gen_mftb(DisasContext *ctx)
4828 {
4829     gen_op_mfspr(ctx);
4830 }
4831 
4832 /* mtcrf mtocrf*/
4833 static void gen_mtcrf(DisasContext *ctx)
4834 {
4835     uint32_t crm, crn;
4836 
4837     crm = CRM(ctx->opcode);
4838     if (likely((ctx->opcode & 0x00100000))) {
4839         if (crm && ((crm & (crm - 1)) == 0)) {
4840             TCGv_i32 temp = tcg_temp_new_i32();
4841             crn = ctz32(crm);
4842             tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4843             tcg_gen_shri_i32(temp, temp, crn * 4);
4844             tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4845         }
4846     } else {
4847         TCGv_i32 temp = tcg_temp_new_i32();
4848         tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4849         for (crn = 0 ; crn < 8 ; crn++) {
4850             if (crm & (1 << crn)) {
4851                     tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4852                     tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4853             }
4854         }
4855     }
4856 }
4857 
4858 /* mtmsr */
4859 #if defined(TARGET_PPC64)
4860 static void gen_mtmsrd(DisasContext *ctx)
4861 {
4862     if (unlikely(!is_book3s_arch2x(ctx))) {
4863         gen_invalid(ctx);
4864         return;
4865     }
4866 
4867     CHK_SV(ctx);
4868 
4869 #if !defined(CONFIG_USER_ONLY)
4870     TCGv t0, t1;
4871     target_ulong mask;
4872 
4873     t0 = tcg_temp_new();
4874     t1 = tcg_temp_new();
4875 
4876     translator_io_start(&ctx->base);
4877 
4878     if (ctx->opcode & 0x00010000) {
4879         /* L=1 form only updates EE and RI */
4880         mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4881     } else {
4882         /* mtmsrd does not alter HV, S, ME, or LE */
4883         mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4884                  (1ULL << MSR_HV));
4885         /*
4886          * XXX: we need to update nip before the store if we enter
4887          *      power saving mode, we will exit the loop directly from
4888          *      ppc_store_msr
4889          */
4890         gen_update_nip(ctx, ctx->base.pc_next);
4891     }
4892 
4893     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4894     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4895     tcg_gen_or_tl(t0, t0, t1);
4896 
4897     gen_helper_store_msr(tcg_env, t0);
4898 
4899     /* Must stop the translation as machine state (may have) changed */
4900     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4901 #endif /* !defined(CONFIG_USER_ONLY) */
4902 }
4903 #endif /* defined(TARGET_PPC64) */
4904 
4905 static void gen_mtmsr(DisasContext *ctx)
4906 {
4907     CHK_SV(ctx);
4908 
4909 #if !defined(CONFIG_USER_ONLY)
4910     TCGv t0, t1;
4911     target_ulong mask = 0xFFFFFFFF;
4912 
4913     t0 = tcg_temp_new();
4914     t1 = tcg_temp_new();
4915 
4916     translator_io_start(&ctx->base);
4917     if (ctx->opcode & 0x00010000) {
4918         /* L=1 form only updates EE and RI */
4919         mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4920     } else {
4921         /* mtmsr does not alter S, ME, or LE */
4922         mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4923 
4924         /*
4925          * XXX: we need to update nip before the store if we enter
4926          *      power saving mode, we will exit the loop directly from
4927          *      ppc_store_msr
4928          */
4929         gen_update_nip(ctx, ctx->base.pc_next);
4930     }
4931 
4932     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4933     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4934     tcg_gen_or_tl(t0, t0, t1);
4935 
4936     gen_helper_store_msr(tcg_env, t0);
4937 
4938     /* Must stop the translation as machine state (may have) changed */
4939     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4940 #endif
4941 }
4942 
4943 /* mtspr */
4944 static void gen_mtspr(DisasContext *ctx)
4945 {
4946     void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
4947     uint32_t sprn = SPR(ctx->opcode);
4948 
4949 #if defined(CONFIG_USER_ONLY)
4950     write_cb = ctx->spr_cb[sprn].uea_write;
4951 #else
4952     if (ctx->pr) {
4953         write_cb = ctx->spr_cb[sprn].uea_write;
4954     } else if (ctx->hv) {
4955         write_cb = ctx->spr_cb[sprn].hea_write;
4956     } else {
4957         write_cb = ctx->spr_cb[sprn].oea_write;
4958     }
4959 #endif
4960     if (likely(write_cb != NULL)) {
4961         if (likely(write_cb != SPR_NOACCESS)) {
4962             (*write_cb)(ctx, sprn, rS(ctx->opcode));
4963         } else {
4964             /* Privilege exception */
4965             qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
4966                           "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4967                           ctx->cia);
4968             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4969         }
4970     } else {
4971         /* ISA 2.07 defines these as no-ops */
4972         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4973             (sprn >= 808 && sprn <= 811)) {
4974             /* This is a nop */
4975             return;
4976         }
4977 
4978         /* Not defined */
4979         qemu_log_mask(LOG_GUEST_ERROR,
4980                       "Trying to write invalid spr %d (0x%03x) at "
4981                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4982 
4983 
4984         /*
4985          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4986          * generate a priv, a hv emu or a no-op
4987          */
4988         if (sprn & 0x10) {
4989             if (ctx->pr) {
4990                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4991             }
4992         } else {
4993             if (ctx->pr || sprn == 0) {
4994                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4995             }
4996         }
4997     }
4998 }
4999 
5000 #if defined(TARGET_PPC64)
5001 /* setb */
5002 static void gen_setb(DisasContext *ctx)
5003 {
5004     TCGv_i32 t0 = tcg_temp_new_i32();
5005     TCGv_i32 t8 = tcg_constant_i32(8);
5006     TCGv_i32 tm1 = tcg_constant_i32(-1);
5007     int crf = crfS(ctx->opcode);
5008 
5009     tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
5010     tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
5011     tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
5012 }
5013 #endif
5014 
5015 /***                         Cache management                              ***/
5016 
5017 /* dcbf */
5018 static void gen_dcbf(DisasContext *ctx)
5019 {
5020     /* XXX: specification says this is treated as a load by the MMU */
5021     TCGv t0;
5022     gen_set_access_type(ctx, ACCESS_CACHE);
5023     t0 = tcg_temp_new();
5024     gen_addr_reg_index(ctx, t0);
5025     gen_qemu_ld8u(ctx, t0, t0);
5026 }
5027 
5028 /* dcbfep (external PID dcbf) */
5029 static void gen_dcbfep(DisasContext *ctx)
5030 {
5031     /* XXX: specification says this is treated as a load by the MMU */
5032     TCGv t0;
5033     CHK_SV(ctx);
5034     gen_set_access_type(ctx, ACCESS_CACHE);
5035     t0 = tcg_temp_new();
5036     gen_addr_reg_index(ctx, t0);
5037     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5038 }
5039 
5040 /* dcbi (Supervisor only) */
5041 static void gen_dcbi(DisasContext *ctx)
5042 {
5043 #if defined(CONFIG_USER_ONLY)
5044     GEN_PRIV(ctx);
5045 #else
5046     TCGv EA, val;
5047 
5048     CHK_SV(ctx);
5049     EA = tcg_temp_new();
5050     gen_set_access_type(ctx, ACCESS_CACHE);
5051     gen_addr_reg_index(ctx, EA);
5052     val = tcg_temp_new();
5053     /* XXX: specification says this should be treated as a store by the MMU */
5054     gen_qemu_ld8u(ctx, val, EA);
5055     gen_qemu_st8(ctx, val, EA);
5056 #endif /* defined(CONFIG_USER_ONLY) */
5057 }
5058 
5059 /* dcdst */
5060 static void gen_dcbst(DisasContext *ctx)
5061 {
5062     /* XXX: specification say this is treated as a load by the MMU */
5063     TCGv t0;
5064     gen_set_access_type(ctx, ACCESS_CACHE);
5065     t0 = tcg_temp_new();
5066     gen_addr_reg_index(ctx, t0);
5067     gen_qemu_ld8u(ctx, t0, t0);
5068 }
5069 
5070 /* dcbstep (dcbstep External PID version) */
5071 static void gen_dcbstep(DisasContext *ctx)
5072 {
5073     /* XXX: specification say this is treated as a load by the MMU */
5074     TCGv t0;
5075     gen_set_access_type(ctx, ACCESS_CACHE);
5076     t0 = tcg_temp_new();
5077     gen_addr_reg_index(ctx, t0);
5078     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5079 }
5080 
5081 /* dcbt */
5082 static void gen_dcbt(DisasContext *ctx)
5083 {
5084     /*
5085      * interpreted as no-op
5086      * XXX: specification say this is treated as a load by the MMU but
5087      *      does not generate any exception
5088      */
5089 }
5090 
5091 /* dcbtep */
5092 static void gen_dcbtep(DisasContext *ctx)
5093 {
5094     /*
5095      * interpreted as no-op
5096      * XXX: specification say this is treated as a load by the MMU but
5097      *      does not generate any exception
5098      */
5099 }
5100 
5101 /* dcbtst */
5102 static void gen_dcbtst(DisasContext *ctx)
5103 {
5104     /*
5105      * interpreted as no-op
5106      * XXX: specification say this is treated as a load by the MMU but
5107      *      does not generate any exception
5108      */
5109 }
5110 
5111 /* dcbtstep */
5112 static void gen_dcbtstep(DisasContext *ctx)
5113 {
5114     /*
5115      * interpreted as no-op
5116      * XXX: specification say this is treated as a load by the MMU but
5117      *      does not generate any exception
5118      */
5119 }
5120 
5121 /* dcbtls */
5122 static void gen_dcbtls(DisasContext *ctx)
5123 {
5124     /* Always fails locking the cache */
5125     TCGv t0 = tcg_temp_new();
5126     gen_load_spr(t0, SPR_Exxx_L1CSR0);
5127     tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5128     gen_store_spr(SPR_Exxx_L1CSR0, t0);
5129 }
5130 
5131 /* dcblc */
5132 static void gen_dcblc(DisasContext *ctx)
5133 {
5134     /*
5135      * interpreted as no-op
5136      */
5137 }
5138 
5139 /* dcbz */
5140 static void gen_dcbz(DisasContext *ctx)
5141 {
5142     TCGv tcgv_addr;
5143     TCGv_i32 tcgv_op;
5144 
5145     gen_set_access_type(ctx, ACCESS_CACHE);
5146     tcgv_addr = tcg_temp_new();
5147     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5148     gen_addr_reg_index(ctx, tcgv_addr);
5149     gen_helper_dcbz(tcg_env, tcgv_addr, tcgv_op);
5150 }
5151 
5152 /* dcbzep */
5153 static void gen_dcbzep(DisasContext *ctx)
5154 {
5155     TCGv tcgv_addr;
5156     TCGv_i32 tcgv_op;
5157 
5158     gen_set_access_type(ctx, ACCESS_CACHE);
5159     tcgv_addr = tcg_temp_new();
5160     tcgv_op = tcg_constant_i32(ctx->opcode & 0x03FF000);
5161     gen_addr_reg_index(ctx, tcgv_addr);
5162     gen_helper_dcbzep(tcg_env, tcgv_addr, tcgv_op);
5163 }
5164 
5165 /* dst / dstt */
5166 static void gen_dst(DisasContext *ctx)
5167 {
5168     if (rA(ctx->opcode) == 0) {
5169         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5170     } else {
5171         /* interpreted as no-op */
5172     }
5173 }
5174 
5175 /* dstst /dststt */
5176 static void gen_dstst(DisasContext *ctx)
5177 {
5178     if (rA(ctx->opcode) == 0) {
5179         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5180     } else {
5181         /* interpreted as no-op */
5182     }
5183 
5184 }
5185 
5186 /* dss / dssall */
5187 static void gen_dss(DisasContext *ctx)
5188 {
5189     /* interpreted as no-op */
5190 }
5191 
5192 /* icbi */
5193 static void gen_icbi(DisasContext *ctx)
5194 {
5195     TCGv t0;
5196     gen_set_access_type(ctx, ACCESS_CACHE);
5197     t0 = tcg_temp_new();
5198     gen_addr_reg_index(ctx, t0);
5199     gen_helper_icbi(tcg_env, t0);
5200 }
5201 
5202 /* icbiep */
5203 static void gen_icbiep(DisasContext *ctx)
5204 {
5205     TCGv t0;
5206     gen_set_access_type(ctx, ACCESS_CACHE);
5207     t0 = tcg_temp_new();
5208     gen_addr_reg_index(ctx, t0);
5209     gen_helper_icbiep(tcg_env, t0);
5210 }
5211 
5212 /* Optional: */
5213 /* dcba */
5214 static void gen_dcba(DisasContext *ctx)
5215 {
5216     /*
5217      * interpreted as no-op
5218      * XXX: specification say this is treated as a store by the MMU
5219      *      but does not generate any exception
5220      */
5221 }
5222 
5223 /***                    Segment register manipulation                      ***/
5224 /* Supervisor only: */
5225 
5226 /* mfsr */
5227 static void gen_mfsr(DisasContext *ctx)
5228 {
5229 #if defined(CONFIG_USER_ONLY)
5230     GEN_PRIV(ctx);
5231 #else
5232     TCGv t0;
5233 
5234     CHK_SV(ctx);
5235     t0 = tcg_constant_tl(SR(ctx->opcode));
5236     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5237 #endif /* defined(CONFIG_USER_ONLY) */
5238 }
5239 
5240 /* mfsrin */
5241 static void gen_mfsrin(DisasContext *ctx)
5242 {
5243 #if defined(CONFIG_USER_ONLY)
5244     GEN_PRIV(ctx);
5245 #else
5246     TCGv t0;
5247 
5248     CHK_SV(ctx);
5249     t0 = tcg_temp_new();
5250     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5251     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5252 #endif /* defined(CONFIG_USER_ONLY) */
5253 }
5254 
5255 /* mtsr */
5256 static void gen_mtsr(DisasContext *ctx)
5257 {
5258 #if defined(CONFIG_USER_ONLY)
5259     GEN_PRIV(ctx);
5260 #else
5261     TCGv t0;
5262 
5263     CHK_SV(ctx);
5264     t0 = tcg_constant_tl(SR(ctx->opcode));
5265     gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
5266 #endif /* defined(CONFIG_USER_ONLY) */
5267 }
5268 
5269 /* mtsrin */
5270 static void gen_mtsrin(DisasContext *ctx)
5271 {
5272 #if defined(CONFIG_USER_ONLY)
5273     GEN_PRIV(ctx);
5274 #else
5275     TCGv t0;
5276     CHK_SV(ctx);
5277 
5278     t0 = tcg_temp_new();
5279     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5280     gen_helper_store_sr(tcg_env, t0, cpu_gpr[rD(ctx->opcode)]);
5281 #endif /* defined(CONFIG_USER_ONLY) */
5282 }
5283 
5284 #if defined(TARGET_PPC64)
5285 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5286 
5287 /* mfsr */
5288 static void gen_mfsr_64b(DisasContext *ctx)
5289 {
5290 #if defined(CONFIG_USER_ONLY)
5291     GEN_PRIV(ctx);
5292 #else
5293     TCGv t0;
5294 
5295     CHK_SV(ctx);
5296     t0 = tcg_constant_tl(SR(ctx->opcode));
5297     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5298 #endif /* defined(CONFIG_USER_ONLY) */
5299 }
5300 
5301 /* mfsrin */
5302 static void gen_mfsrin_64b(DisasContext *ctx)
5303 {
5304 #if defined(CONFIG_USER_ONLY)
5305     GEN_PRIV(ctx);
5306 #else
5307     TCGv t0;
5308 
5309     CHK_SV(ctx);
5310     t0 = tcg_temp_new();
5311     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5312     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5313 #endif /* defined(CONFIG_USER_ONLY) */
5314 }
5315 
5316 /* mtsr */
5317 static void gen_mtsr_64b(DisasContext *ctx)
5318 {
5319 #if defined(CONFIG_USER_ONLY)
5320     GEN_PRIV(ctx);
5321 #else
5322     TCGv t0;
5323 
5324     CHK_SV(ctx);
5325     t0 = tcg_constant_tl(SR(ctx->opcode));
5326     gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
5327 #endif /* defined(CONFIG_USER_ONLY) */
5328 }
5329 
5330 /* mtsrin */
5331 static void gen_mtsrin_64b(DisasContext *ctx)
5332 {
5333 #if defined(CONFIG_USER_ONLY)
5334     GEN_PRIV(ctx);
5335 #else
5336     TCGv t0;
5337 
5338     CHK_SV(ctx);
5339     t0 = tcg_temp_new();
5340     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5341     gen_helper_store_sr(tcg_env, t0, cpu_gpr[rS(ctx->opcode)]);
5342 #endif /* defined(CONFIG_USER_ONLY) */
5343 }
5344 
5345 #endif /* defined(TARGET_PPC64) */
5346 
5347 /***                      Lookaside buffer management                      ***/
5348 /* Optional & supervisor only: */
5349 
5350 /* tlbia */
5351 static void gen_tlbia(DisasContext *ctx)
5352 {
5353 #if defined(CONFIG_USER_ONLY)
5354     GEN_PRIV(ctx);
5355 #else
5356     CHK_HV(ctx);
5357 
5358     gen_helper_tlbia(tcg_env);
5359 #endif  /* defined(CONFIG_USER_ONLY) */
5360 }
5361 
5362 /* tlbsync */
5363 static void gen_tlbsync(DisasContext *ctx)
5364 {
5365 #if defined(CONFIG_USER_ONLY)
5366     GEN_PRIV(ctx);
5367 #else
5368 
5369     if (ctx->gtse) {
5370         CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5371     } else {
5372         CHK_HV(ctx); /* Else hypervisor privileged */
5373     }
5374 
5375     /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5376     if (ctx->insns_flags & PPC_BOOKE) {
5377         gen_check_tlb_flush(ctx, true);
5378     }
5379 #endif /* defined(CONFIG_USER_ONLY) */
5380 }
5381 
5382 /***                              External control                         ***/
5383 /* Optional: */
5384 
5385 /* eciwx */
5386 static void gen_eciwx(DisasContext *ctx)
5387 {
5388     TCGv t0;
5389     /* Should check EAR[E] ! */
5390     gen_set_access_type(ctx, ACCESS_EXT);
5391     t0 = tcg_temp_new();
5392     gen_addr_reg_index(ctx, t0);
5393     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5394                        DEF_MEMOP(MO_UL | MO_ALIGN));
5395 }
5396 
5397 /* ecowx */
5398 static void gen_ecowx(DisasContext *ctx)
5399 {
5400     TCGv t0;
5401     /* Should check EAR[E] ! */
5402     gen_set_access_type(ctx, ACCESS_EXT);
5403     t0 = tcg_temp_new();
5404     gen_addr_reg_index(ctx, t0);
5405     tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5406                        DEF_MEMOP(MO_UL | MO_ALIGN));
5407 }
5408 
5409 /* 602 - 603 - G2 TLB management */
5410 
5411 /* tlbld */
5412 static void gen_tlbld_6xx(DisasContext *ctx)
5413 {
5414 #if defined(CONFIG_USER_ONLY)
5415     GEN_PRIV(ctx);
5416 #else
5417     CHK_SV(ctx);
5418     gen_helper_6xx_tlbd(tcg_env, cpu_gpr[rB(ctx->opcode)]);
5419 #endif /* defined(CONFIG_USER_ONLY) */
5420 }
5421 
5422 /* tlbli */
5423 static void gen_tlbli_6xx(DisasContext *ctx)
5424 {
5425 #if defined(CONFIG_USER_ONLY)
5426     GEN_PRIV(ctx);
5427 #else
5428     CHK_SV(ctx);
5429     gen_helper_6xx_tlbi(tcg_env, cpu_gpr[rB(ctx->opcode)]);
5430 #endif /* defined(CONFIG_USER_ONLY) */
5431 }
5432 
5433 /* BookE specific instructions */
5434 
5435 /* XXX: not implemented on 440 ? */
5436 static void gen_mfapidi(DisasContext *ctx)
5437 {
5438     /* XXX: TODO */
5439     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5440 }
5441 
5442 /* XXX: not implemented on 440 ? */
5443 static void gen_tlbiva(DisasContext *ctx)
5444 {
5445 #if defined(CONFIG_USER_ONLY)
5446     GEN_PRIV(ctx);
5447 #else
5448     TCGv t0;
5449 
5450     CHK_SV(ctx);
5451     t0 = tcg_temp_new();
5452     gen_addr_reg_index(ctx, t0);
5453     gen_helper_tlbiva(tcg_env, cpu_gpr[rB(ctx->opcode)]);
5454 #endif /* defined(CONFIG_USER_ONLY) */
5455 }
5456 
5457 /* All 405 MAC instructions are translated here */
5458 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5459                                         int ra, int rb, int rt, int Rc)
5460 {
5461     TCGv t0, t1;
5462 
5463     t0 = tcg_temp_new();
5464     t1 = tcg_temp_new();
5465 
5466     switch (opc3 & 0x0D) {
5467     case 0x05:
5468         /* macchw    - macchw.    - macchwo   - macchwo.   */
5469         /* macchws   - macchws.   - macchwso  - macchwso.  */
5470         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
5471         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
5472         /* mulchw - mulchw. */
5473         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5474         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5475         tcg_gen_ext16s_tl(t1, t1);
5476         break;
5477     case 0x04:
5478         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
5479         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
5480         /* mulchwu - mulchwu. */
5481         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5482         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5483         tcg_gen_ext16u_tl(t1, t1);
5484         break;
5485     case 0x01:
5486         /* machhw    - machhw.    - machhwo   - machhwo.   */
5487         /* machhws   - machhws.   - machhwso  - machhwso.  */
5488         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
5489         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
5490         /* mulhhw - mulhhw. */
5491         tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5492         tcg_gen_ext16s_tl(t0, t0);
5493         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5494         tcg_gen_ext16s_tl(t1, t1);
5495         break;
5496     case 0x00:
5497         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
5498         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
5499         /* mulhhwu - mulhhwu. */
5500         tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5501         tcg_gen_ext16u_tl(t0, t0);
5502         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5503         tcg_gen_ext16u_tl(t1, t1);
5504         break;
5505     case 0x0D:
5506         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
5507         /* maclhws   - maclhws.   - maclhwso  - maclhwso.  */
5508         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
5509         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
5510         /* mullhw - mullhw. */
5511         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5512         tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5513         break;
5514     case 0x0C:
5515         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
5516         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
5517         /* mullhwu - mullhwu. */
5518         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5519         tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5520         break;
5521     }
5522     if (opc2 & 0x04) {
5523         /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5524         tcg_gen_mul_tl(t1, t0, t1);
5525         if (opc2 & 0x02) {
5526             /* nmultiply-and-accumulate (0x0E) */
5527             tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5528         } else {
5529             /* multiply-and-accumulate (0x0C) */
5530             tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5531         }
5532 
5533         if (opc3 & 0x12) {
5534             /* Check overflow and/or saturate */
5535             TCGLabel *l1 = gen_new_label();
5536 
5537             if (opc3 & 0x10) {
5538                 /* Start with XER OV disabled, the most likely case */
5539                 tcg_gen_movi_tl(cpu_ov, 0);
5540             }
5541             if (opc3 & 0x01) {
5542                 /* Signed */
5543                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5544                 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5545                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5546                 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5547                 if (opc3 & 0x02) {
5548                     /* Saturate */
5549                     tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5550                     tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5551                 }
5552             } else {
5553                 /* Unsigned */
5554                 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5555                 if (opc3 & 0x02) {
5556                     /* Saturate */
5557                     tcg_gen_movi_tl(t0, UINT32_MAX);
5558                 }
5559             }
5560             if (opc3 & 0x10) {
5561                 /* Check overflow */
5562                 tcg_gen_movi_tl(cpu_ov, 1);
5563                 tcg_gen_movi_tl(cpu_so, 1);
5564             }
5565             gen_set_label(l1);
5566             tcg_gen_mov_tl(cpu_gpr[rt], t0);
5567         }
5568     } else {
5569         tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5570     }
5571     if (unlikely(Rc) != 0) {
5572         /* Update Rc0 */
5573         gen_set_Rc0(ctx, cpu_gpr[rt]);
5574     }
5575 }
5576 
5577 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
5578 static void glue(gen_, name)(DisasContext *ctx)                               \
5579 {                                                                             \
5580     gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode),   \
5581                          rD(ctx->opcode), Rc(ctx->opcode));                   \
5582 }
5583 
5584 /* macchw    - macchw.    */
5585 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5586 /* macchwo   - macchwo.   */
5587 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5588 /* macchws   - macchws.   */
5589 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5590 /* macchwso  - macchwso.  */
5591 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5592 /* macchwsu  - macchwsu.  */
5593 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5594 /* macchwsuo - macchwsuo. */
5595 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5596 /* macchwu   - macchwu.   */
5597 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5598 /* macchwuo  - macchwuo.  */
5599 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5600 /* machhw    - machhw.    */
5601 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5602 /* machhwo   - machhwo.   */
5603 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5604 /* machhws   - machhws.   */
5605 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5606 /* machhwso  - machhwso.  */
5607 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5608 /* machhwsu  - machhwsu.  */
5609 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5610 /* machhwsuo - machhwsuo. */
5611 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5612 /* machhwu   - machhwu.   */
5613 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5614 /* machhwuo  - machhwuo.  */
5615 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5616 /* maclhw    - maclhw.    */
5617 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5618 /* maclhwo   - maclhwo.   */
5619 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5620 /* maclhws   - maclhws.   */
5621 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5622 /* maclhwso  - maclhwso.  */
5623 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5624 /* maclhwu   - maclhwu.   */
5625 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5626 /* maclhwuo  - maclhwuo.  */
5627 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5628 /* maclhwsu  - maclhwsu.  */
5629 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5630 /* maclhwsuo - maclhwsuo. */
5631 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5632 /* nmacchw   - nmacchw.   */
5633 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5634 /* nmacchwo  - nmacchwo.  */
5635 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5636 /* nmacchws  - nmacchws.  */
5637 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5638 /* nmacchwso - nmacchwso. */
5639 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5640 /* nmachhw   - nmachhw.   */
5641 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5642 /* nmachhwo  - nmachhwo.  */
5643 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5644 /* nmachhws  - nmachhws.  */
5645 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5646 /* nmachhwso - nmachhwso. */
5647 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5648 /* nmaclhw   - nmaclhw.   */
5649 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5650 /* nmaclhwo  - nmaclhwo.  */
5651 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5652 /* nmaclhws  - nmaclhws.  */
5653 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5654 /* nmaclhwso - nmaclhwso. */
5655 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5656 
5657 /* mulchw  - mulchw.  */
5658 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5659 /* mulchwu - mulchwu. */
5660 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5661 /* mulhhw  - mulhhw.  */
5662 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5663 /* mulhhwu - mulhhwu. */
5664 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5665 /* mullhw  - mullhw.  */
5666 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5667 /* mullhwu - mullhwu. */
5668 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5669 
5670 /* mfdcr */
5671 static void gen_mfdcr(DisasContext *ctx)
5672 {
5673 #if defined(CONFIG_USER_ONLY)
5674     GEN_PRIV(ctx);
5675 #else
5676     TCGv dcrn;
5677 
5678     CHK_SV(ctx);
5679     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5680     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env, dcrn);
5681 #endif /* defined(CONFIG_USER_ONLY) */
5682 }
5683 
5684 /* mtdcr */
5685 static void gen_mtdcr(DisasContext *ctx)
5686 {
5687 #if defined(CONFIG_USER_ONLY)
5688     GEN_PRIV(ctx);
5689 #else
5690     TCGv dcrn;
5691 
5692     CHK_SV(ctx);
5693     dcrn = tcg_constant_tl(SPR(ctx->opcode));
5694     gen_helper_store_dcr(tcg_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5695 #endif /* defined(CONFIG_USER_ONLY) */
5696 }
5697 
5698 /* mfdcrx */
5699 /* XXX: not implemented on 440 ? */
5700 static void gen_mfdcrx(DisasContext *ctx)
5701 {
5702 #if defined(CONFIG_USER_ONLY)
5703     GEN_PRIV(ctx);
5704 #else
5705     CHK_SV(ctx);
5706     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], tcg_env,
5707                         cpu_gpr[rA(ctx->opcode)]);
5708     /* Note: Rc update flag set leads to undefined state of Rc0 */
5709 #endif /* defined(CONFIG_USER_ONLY) */
5710 }
5711 
5712 /* mtdcrx */
5713 /* XXX: not implemented on 440 ? */
5714 static void gen_mtdcrx(DisasContext *ctx)
5715 {
5716 #if defined(CONFIG_USER_ONLY)
5717     GEN_PRIV(ctx);
5718 #else
5719     CHK_SV(ctx);
5720     gen_helper_store_dcr(tcg_env, cpu_gpr[rA(ctx->opcode)],
5721                          cpu_gpr[rS(ctx->opcode)]);
5722     /* Note: Rc update flag set leads to undefined state of Rc0 */
5723 #endif /* defined(CONFIG_USER_ONLY) */
5724 }
5725 
5726 /* dccci */
5727 static void gen_dccci(DisasContext *ctx)
5728 {
5729     CHK_SV(ctx);
5730     /* interpreted as no-op */
5731 }
5732 
5733 /* dcread */
5734 static void gen_dcread(DisasContext *ctx)
5735 {
5736 #if defined(CONFIG_USER_ONLY)
5737     GEN_PRIV(ctx);
5738 #else
5739     TCGv EA, val;
5740 
5741     CHK_SV(ctx);
5742     gen_set_access_type(ctx, ACCESS_CACHE);
5743     EA = tcg_temp_new();
5744     gen_addr_reg_index(ctx, EA);
5745     val = tcg_temp_new();
5746     gen_qemu_ld32u(ctx, val, EA);
5747     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5748 #endif /* defined(CONFIG_USER_ONLY) */
5749 }
5750 
5751 /* icbt */
5752 static void gen_icbt_40x(DisasContext *ctx)
5753 {
5754     /*
5755      * interpreted as no-op
5756      * XXX: specification say this is treated as a load by the MMU but
5757      *      does not generate any exception
5758      */
5759 }
5760 
5761 /* iccci */
5762 static void gen_iccci(DisasContext *ctx)
5763 {
5764     CHK_SV(ctx);
5765     /* interpreted as no-op */
5766 }
5767 
5768 /* icread */
5769 static void gen_icread(DisasContext *ctx)
5770 {
5771     CHK_SV(ctx);
5772     /* interpreted as no-op */
5773 }
5774 
5775 /* rfci (supervisor only) */
5776 static void gen_rfci_40x(DisasContext *ctx)
5777 {
5778 #if defined(CONFIG_USER_ONLY)
5779     GEN_PRIV(ctx);
5780 #else
5781     CHK_SV(ctx);
5782     /* Restore CPU state */
5783     gen_helper_40x_rfci(tcg_env);
5784     ctx->base.is_jmp = DISAS_EXIT;
5785 #endif /* defined(CONFIG_USER_ONLY) */
5786 }
5787 
5788 static void gen_rfci(DisasContext *ctx)
5789 {
5790 #if defined(CONFIG_USER_ONLY)
5791     GEN_PRIV(ctx);
5792 #else
5793     CHK_SV(ctx);
5794     /* Restore CPU state */
5795     gen_helper_rfci(tcg_env);
5796     ctx->base.is_jmp = DISAS_EXIT;
5797 #endif /* defined(CONFIG_USER_ONLY) */
5798 }
5799 
5800 /* BookE specific */
5801 
5802 /* XXX: not implemented on 440 ? */
5803 static void gen_rfdi(DisasContext *ctx)
5804 {
5805 #if defined(CONFIG_USER_ONLY)
5806     GEN_PRIV(ctx);
5807 #else
5808     CHK_SV(ctx);
5809     /* Restore CPU state */
5810     gen_helper_rfdi(tcg_env);
5811     ctx->base.is_jmp = DISAS_EXIT;
5812 #endif /* defined(CONFIG_USER_ONLY) */
5813 }
5814 
5815 /* XXX: not implemented on 440 ? */
5816 static void gen_rfmci(DisasContext *ctx)
5817 {
5818 #if defined(CONFIG_USER_ONLY)
5819     GEN_PRIV(ctx);
5820 #else
5821     CHK_SV(ctx);
5822     /* Restore CPU state */
5823     gen_helper_rfmci(tcg_env);
5824     ctx->base.is_jmp = DISAS_EXIT;
5825 #endif /* defined(CONFIG_USER_ONLY) */
5826 }
5827 
5828 /* TLB management - PowerPC 405 implementation */
5829 
5830 /* tlbre */
5831 static void gen_tlbre_40x(DisasContext *ctx)
5832 {
5833 #if defined(CONFIG_USER_ONLY)
5834     GEN_PRIV(ctx);
5835 #else
5836     CHK_SV(ctx);
5837     switch (rB(ctx->opcode)) {
5838     case 0:
5839         gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], tcg_env,
5840                                 cpu_gpr[rA(ctx->opcode)]);
5841         break;
5842     case 1:
5843         gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], tcg_env,
5844                                 cpu_gpr[rA(ctx->opcode)]);
5845         break;
5846     default:
5847         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5848         break;
5849     }
5850 #endif /* defined(CONFIG_USER_ONLY) */
5851 }
5852 
5853 /* tlbsx - tlbsx. */
5854 static void gen_tlbsx_40x(DisasContext *ctx)
5855 {
5856 #if defined(CONFIG_USER_ONLY)
5857     GEN_PRIV(ctx);
5858 #else
5859     TCGv t0;
5860 
5861     CHK_SV(ctx);
5862     t0 = tcg_temp_new();
5863     gen_addr_reg_index(ctx, t0);
5864     gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5865     if (Rc(ctx->opcode)) {
5866         TCGLabel *l1 = gen_new_label();
5867         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5868         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5869         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5870         gen_set_label(l1);
5871     }
5872 #endif /* defined(CONFIG_USER_ONLY) */
5873 }
5874 
5875 /* tlbwe */
5876 static void gen_tlbwe_40x(DisasContext *ctx)
5877 {
5878 #if defined(CONFIG_USER_ONLY)
5879     GEN_PRIV(ctx);
5880 #else
5881     CHK_SV(ctx);
5882 
5883     switch (rB(ctx->opcode)) {
5884     case 0:
5885         gen_helper_4xx_tlbwe_hi(tcg_env, cpu_gpr[rA(ctx->opcode)],
5886                                 cpu_gpr[rS(ctx->opcode)]);
5887         break;
5888     case 1:
5889         gen_helper_4xx_tlbwe_lo(tcg_env, cpu_gpr[rA(ctx->opcode)],
5890                                 cpu_gpr[rS(ctx->opcode)]);
5891         break;
5892     default:
5893         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5894         break;
5895     }
5896 #endif /* defined(CONFIG_USER_ONLY) */
5897 }
5898 
5899 /* TLB management - PowerPC 440 implementation */
5900 
5901 /* tlbre */
5902 static void gen_tlbre_440(DisasContext *ctx)
5903 {
5904 #if defined(CONFIG_USER_ONLY)
5905     GEN_PRIV(ctx);
5906 #else
5907     CHK_SV(ctx);
5908 
5909     switch (rB(ctx->opcode)) {
5910     case 0:
5911     case 1:
5912     case 2:
5913         {
5914             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5915             gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], tcg_env,
5916                                  t0, cpu_gpr[rA(ctx->opcode)]);
5917         }
5918         break;
5919     default:
5920         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5921         break;
5922     }
5923 #endif /* defined(CONFIG_USER_ONLY) */
5924 }
5925 
5926 /* tlbsx - tlbsx. */
5927 static void gen_tlbsx_440(DisasContext *ctx)
5928 {
5929 #if defined(CONFIG_USER_ONLY)
5930     GEN_PRIV(ctx);
5931 #else
5932     TCGv t0;
5933 
5934     CHK_SV(ctx);
5935     t0 = tcg_temp_new();
5936     gen_addr_reg_index(ctx, t0);
5937     gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], tcg_env, t0);
5938     if (Rc(ctx->opcode)) {
5939         TCGLabel *l1 = gen_new_label();
5940         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5941         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5942         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5943         gen_set_label(l1);
5944     }
5945 #endif /* defined(CONFIG_USER_ONLY) */
5946 }
5947 
5948 /* tlbwe */
5949 static void gen_tlbwe_440(DisasContext *ctx)
5950 {
5951 #if defined(CONFIG_USER_ONLY)
5952     GEN_PRIV(ctx);
5953 #else
5954     CHK_SV(ctx);
5955     switch (rB(ctx->opcode)) {
5956     case 0:
5957     case 1:
5958     case 2:
5959         {
5960             TCGv_i32 t0 = tcg_constant_i32(rB(ctx->opcode));
5961             gen_helper_440_tlbwe(tcg_env, t0, cpu_gpr[rA(ctx->opcode)],
5962                                  cpu_gpr[rS(ctx->opcode)]);
5963         }
5964         break;
5965     default:
5966         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5967         break;
5968     }
5969 #endif /* defined(CONFIG_USER_ONLY) */
5970 }
5971 
5972 /* TLB management - PowerPC BookE 2.06 implementation */
5973 
5974 /* tlbre */
5975 static void gen_tlbre_booke206(DisasContext *ctx)
5976 {
5977  #if defined(CONFIG_USER_ONLY)
5978     GEN_PRIV(ctx);
5979 #else
5980    CHK_SV(ctx);
5981     gen_helper_booke206_tlbre(tcg_env);
5982 #endif /* defined(CONFIG_USER_ONLY) */
5983 }
5984 
5985 /* tlbsx - tlbsx. */
5986 static void gen_tlbsx_booke206(DisasContext *ctx)
5987 {
5988 #if defined(CONFIG_USER_ONLY)
5989     GEN_PRIV(ctx);
5990 #else
5991     TCGv t0;
5992 
5993     CHK_SV(ctx);
5994     if (rA(ctx->opcode)) {
5995         t0 = tcg_temp_new();
5996         tcg_gen_add_tl(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
5997     } else {
5998         t0 = cpu_gpr[rB(ctx->opcode)];
5999     }
6000     gen_helper_booke206_tlbsx(tcg_env, t0);
6001 #endif /* defined(CONFIG_USER_ONLY) */
6002 }
6003 
6004 /* tlbwe */
6005 static void gen_tlbwe_booke206(DisasContext *ctx)
6006 {
6007 #if defined(CONFIG_USER_ONLY)
6008     GEN_PRIV(ctx);
6009 #else
6010     CHK_SV(ctx);
6011     gen_helper_booke206_tlbwe(tcg_env);
6012 #endif /* defined(CONFIG_USER_ONLY) */
6013 }
6014 
6015 static void gen_tlbivax_booke206(DisasContext *ctx)
6016 {
6017 #if defined(CONFIG_USER_ONLY)
6018     GEN_PRIV(ctx);
6019 #else
6020     TCGv t0;
6021 
6022     CHK_SV(ctx);
6023     t0 = tcg_temp_new();
6024     gen_addr_reg_index(ctx, t0);
6025     gen_helper_booke206_tlbivax(tcg_env, t0);
6026 #endif /* defined(CONFIG_USER_ONLY) */
6027 }
6028 
6029 static void gen_tlbilx_booke206(DisasContext *ctx)
6030 {
6031 #if defined(CONFIG_USER_ONLY)
6032     GEN_PRIV(ctx);
6033 #else
6034     TCGv t0;
6035 
6036     CHK_SV(ctx);
6037     t0 = tcg_temp_new();
6038     gen_addr_reg_index(ctx, t0);
6039 
6040     switch ((ctx->opcode >> 21) & 0x3) {
6041     case 0:
6042         gen_helper_booke206_tlbilx0(tcg_env, t0);
6043         break;
6044     case 1:
6045         gen_helper_booke206_tlbilx1(tcg_env, t0);
6046         break;
6047     case 3:
6048         gen_helper_booke206_tlbilx3(tcg_env, t0);
6049         break;
6050     default:
6051         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6052         break;
6053     }
6054 #endif /* defined(CONFIG_USER_ONLY) */
6055 }
6056 
6057 /* wrtee */
6058 static void gen_wrtee(DisasContext *ctx)
6059 {
6060 #if defined(CONFIG_USER_ONLY)
6061     GEN_PRIV(ctx);
6062 #else
6063     TCGv t0;
6064 
6065     CHK_SV(ctx);
6066     t0 = tcg_temp_new();
6067     tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
6068     tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6069     tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
6070     gen_ppc_maybe_interrupt(ctx);
6071     /*
6072      * Stop translation to have a chance to raise an exception if we
6073      * just set msr_ee to 1
6074      */
6075     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6076 #endif /* defined(CONFIG_USER_ONLY) */
6077 }
6078 
6079 /* wrteei */
6080 static void gen_wrteei(DisasContext *ctx)
6081 {
6082 #if defined(CONFIG_USER_ONLY)
6083     GEN_PRIV(ctx);
6084 #else
6085     CHK_SV(ctx);
6086     if (ctx->opcode & 0x00008000) {
6087         tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
6088         gen_ppc_maybe_interrupt(ctx);
6089         /* Stop translation to have a chance to raise an exception */
6090         ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6091     } else {
6092         tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6093     }
6094 #endif /* defined(CONFIG_USER_ONLY) */
6095 }
6096 
6097 /* PowerPC 440 specific instructions */
6098 
6099 /* dlmzb */
6100 static void gen_dlmzb(DisasContext *ctx)
6101 {
6102     TCGv_i32 t0 = tcg_constant_i32(Rc(ctx->opcode));
6103     gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], tcg_env,
6104                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
6105 }
6106 
6107 /* mbar replaces eieio on 440 */
6108 static void gen_mbar(DisasContext *ctx)
6109 {
6110     /* interpreted as no-op */
6111 }
6112 
6113 /* msync replaces sync on 440 */
6114 static void gen_msync_4xx(DisasContext *ctx)
6115 {
6116     /* Only e500 seems to treat reserved bits as invalid */
6117     if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6118         (ctx->opcode & 0x03FFF801)) {
6119         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6120     }
6121     /* otherwise interpreted as no-op */
6122 }
6123 
6124 /* icbt */
6125 static void gen_icbt_440(DisasContext *ctx)
6126 {
6127     /*
6128      * interpreted as no-op
6129      * XXX: specification say this is treated as a load by the MMU but
6130      *      does not generate any exception
6131      */
6132 }
6133 
6134 #if defined(TARGET_PPC64)
6135 static void gen_maddld(DisasContext *ctx)
6136 {
6137     TCGv_i64 t1 = tcg_temp_new_i64();
6138 
6139     tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6140     tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6141 }
6142 
6143 /* maddhd maddhdu */
6144 static void gen_maddhd_maddhdu(DisasContext *ctx)
6145 {
6146     TCGv_i64 lo = tcg_temp_new_i64();
6147     TCGv_i64 hi = tcg_temp_new_i64();
6148     TCGv_i64 t1 = tcg_temp_new_i64();
6149 
6150     if (Rc(ctx->opcode)) {
6151         tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6152                           cpu_gpr[rB(ctx->opcode)]);
6153         tcg_gen_movi_i64(t1, 0);
6154     } else {
6155         tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6156                           cpu_gpr[rB(ctx->opcode)]);
6157         tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6158     }
6159     tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6160                      cpu_gpr[rC(ctx->opcode)], t1);
6161 }
6162 #endif /* defined(TARGET_PPC64) */
6163 
6164 static void gen_tbegin(DisasContext *ctx)
6165 {
6166     if (unlikely(!ctx->tm_enabled)) {
6167         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6168         return;
6169     }
6170     gen_helper_tbegin(tcg_env);
6171 }
6172 
6173 #define GEN_TM_NOOP(name)                                      \
6174 static inline void gen_##name(DisasContext *ctx)               \
6175 {                                                              \
6176     if (unlikely(!ctx->tm_enabled)) {                          \
6177         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6178         return;                                                \
6179     }                                                          \
6180     /*                                                         \
6181      * Because tbegin always fails in QEMU, these user         \
6182      * space instructions all have a simple implementation:    \
6183      *                                                         \
6184      *     CR[0] = 0b0 || MSR[TS] || 0b0                       \
6185      *           = 0b0 || 0b00    || 0b0                       \
6186      */                                                        \
6187     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6188 }
6189 
6190 GEN_TM_NOOP(tend);
6191 GEN_TM_NOOP(tabort);
6192 GEN_TM_NOOP(tabortwc);
6193 GEN_TM_NOOP(tabortwci);
6194 GEN_TM_NOOP(tabortdc);
6195 GEN_TM_NOOP(tabortdci);
6196 GEN_TM_NOOP(tsr);
6197 
6198 static inline void gen_cp_abort(DisasContext *ctx)
6199 {
6200     /* Do Nothing */
6201 }
6202 
6203 #define GEN_CP_PASTE_NOOP(name)                           \
6204 static inline void gen_##name(DisasContext *ctx)          \
6205 {                                                         \
6206     /*                                                    \
6207      * Generate invalid exception until we have an        \
6208      * implementation of the copy paste facility          \
6209      */                                                   \
6210     gen_invalid(ctx);                                     \
6211 }
6212 
6213 GEN_CP_PASTE_NOOP(copy)
6214 GEN_CP_PASTE_NOOP(paste)
6215 
6216 static void gen_tcheck(DisasContext *ctx)
6217 {
6218     if (unlikely(!ctx->tm_enabled)) {
6219         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6220         return;
6221     }
6222     /*
6223      * Because tbegin always fails, the tcheck implementation is
6224      * simple:
6225      *
6226      * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6227      *         = 0b1 || 0b00 || 0b0
6228      */
6229     tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6230 }
6231 
6232 #if defined(CONFIG_USER_ONLY)
6233 #define GEN_TM_PRIV_NOOP(name)                                 \
6234 static inline void gen_##name(DisasContext *ctx)               \
6235 {                                                              \
6236     gen_priv_opc(ctx);                                         \
6237 }
6238 
6239 #else
6240 
6241 #define GEN_TM_PRIV_NOOP(name)                                 \
6242 static inline void gen_##name(DisasContext *ctx)               \
6243 {                                                              \
6244     CHK_SV(ctx);                                               \
6245     if (unlikely(!ctx->tm_enabled)) {                          \
6246         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6247         return;                                                \
6248     }                                                          \
6249     /*                                                         \
6250      * Because tbegin always fails, the implementation is      \
6251      * simple:                                                 \
6252      *                                                         \
6253      *   CR[0] = 0b0 || MSR[TS] || 0b0                         \
6254      *         = 0b0 || 0b00 | 0b0                             \
6255      */                                                        \
6256     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6257 }
6258 
6259 #endif
6260 
6261 GEN_TM_PRIV_NOOP(treclaim);
6262 GEN_TM_PRIV_NOOP(trechkpt);
6263 
6264 static inline void get_fpr(TCGv_i64 dst, int regno)
6265 {
6266     tcg_gen_ld_i64(dst, tcg_env, fpr_offset(regno));
6267 }
6268 
6269 static inline void set_fpr(int regno, TCGv_i64 src)
6270 {
6271     tcg_gen_st_i64(src, tcg_env, fpr_offset(regno));
6272     /*
6273      * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6274      * corresponding to the target FPR was undefined. However,
6275      * most (if not all) real hardware were setting the result to 0.
6276      * Starting at ISA v3.1, the result for doubleword 1 is now defined
6277      * to be 0.
6278      */
6279     tcg_gen_st_i64(tcg_constant_i64(0), tcg_env, vsr64_offset(regno, false));
6280 }
6281 
6282 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6283 {
6284     tcg_gen_ld_i64(dst, tcg_env, avr64_offset(regno, high));
6285 }
6286 
6287 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6288 {
6289     tcg_gen_st_i64(src, tcg_env, avr64_offset(regno, high));
6290 }
6291 
6292 /*
6293  * Helpers for decodetree used by !function for decoding arguments.
6294  */
6295 static int times_2(DisasContext *ctx, int x)
6296 {
6297     return x * 2;
6298 }
6299 
6300 static int times_4(DisasContext *ctx, int x)
6301 {
6302     return x * 4;
6303 }
6304 
6305 static int times_16(DisasContext *ctx, int x)
6306 {
6307     return x * 16;
6308 }
6309 
6310 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6311 {
6312     return deposit64(0xfffffffffffffe00, 3, 6, x);
6313 }
6314 
6315 /*
6316  * Helpers for trans_* functions to check for specific insns flags.
6317  * Use token pasting to ensure that we use the proper flag with the
6318  * proper variable.
6319  */
6320 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6321     do {                                                \
6322         if (((CTX)->insns_flags & PPC_##NAME) == 0) {   \
6323             return false;                               \
6324         }                                               \
6325     } while (0)
6326 
6327 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6328     do {                                                \
6329         if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6330             return false;                               \
6331         }                                               \
6332     } while (0)
6333 
6334 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6335 #if TARGET_LONG_BITS == 32
6336 # define REQUIRE_64BIT(CTX)  return false
6337 #else
6338 # define REQUIRE_64BIT(CTX)  REQUIRE_INSNS_FLAGS(CTX, 64B)
6339 #endif
6340 
6341 #define REQUIRE_VECTOR(CTX)                             \
6342     do {                                                \
6343         if (unlikely(!(CTX)->altivec_enabled)) {        \
6344             gen_exception((CTX), POWERPC_EXCP_VPU);     \
6345             return true;                                \
6346         }                                               \
6347     } while (0)
6348 
6349 #define REQUIRE_VSX(CTX)                                \
6350     do {                                                \
6351         if (unlikely(!(CTX)->vsx_enabled)) {            \
6352             gen_exception((CTX), POWERPC_EXCP_VSXU);    \
6353             return true;                                \
6354         }                                               \
6355     } while (0)
6356 
6357 #define REQUIRE_FPU(ctx)                                \
6358     do {                                                \
6359         if (unlikely(!(ctx)->fpu_enabled)) {            \
6360             gen_exception((ctx), POWERPC_EXCP_FPU);     \
6361             return true;                                \
6362         }                                               \
6363     } while (0)
6364 
6365 #if !defined(CONFIG_USER_ONLY)
6366 #define REQUIRE_SV(CTX)             \
6367     do {                            \
6368         if (unlikely((CTX)->pr)) {  \
6369             gen_priv_opc(CTX);      \
6370             return true;            \
6371         }                           \
6372     } while (0)
6373 
6374 #define REQUIRE_HV(CTX)                             \
6375     do {                                            \
6376         if (unlikely((CTX)->pr || !(CTX)->hv)) {    \
6377             gen_priv_opc(CTX);                      \
6378             return true;                            \
6379         }                                           \
6380     } while (0)
6381 #else
6382 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6383 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6384 #endif
6385 
6386 /*
6387  * Helpers for implementing sets of trans_* functions.
6388  * Defer the implementation of NAME to FUNC, with optional extra arguments.
6389  */
6390 #define TRANS(NAME, FUNC, ...) \
6391     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6392     { return FUNC(ctx, a, __VA_ARGS__); }
6393 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6394     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6395     {                                                          \
6396         REQUIRE_INSNS_FLAGS(ctx, FLAGS);                       \
6397         return FUNC(ctx, a, __VA_ARGS__);                      \
6398     }
6399 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6400     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6401     {                                                          \
6402         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6403         return FUNC(ctx, a, __VA_ARGS__);                      \
6404     }
6405 
6406 #define TRANS64(NAME, FUNC, ...) \
6407     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6408     { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6409 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6410     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6411     {                                                          \
6412         REQUIRE_64BIT(ctx);                                    \
6413         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6414         return FUNC(ctx, a, __VA_ARGS__);                      \
6415     }
6416 
6417 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6418 
6419 
6420 #include "decode-insn32.c.inc"
6421 #include "decode-insn64.c.inc"
6422 #include "power8-pmu-regs.c.inc"
6423 
6424 /*
6425  * Incorporate CIA into the constant when R=1.
6426  * Validate that when R=1, RA=0.
6427  */
6428 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6429 {
6430     d->rt = a->rt;
6431     d->ra = a->ra;
6432     d->si = a->si;
6433     if (a->r) {
6434         if (unlikely(a->ra != 0)) {
6435             gen_invalid(ctx);
6436             return false;
6437         }
6438         d->si += ctx->cia;
6439     }
6440     return true;
6441 }
6442 
6443 #include "translate/fixedpoint-impl.c.inc"
6444 
6445 #include "translate/fp-impl.c.inc"
6446 
6447 #include "translate/vmx-impl.c.inc"
6448 
6449 #include "translate/vsx-impl.c.inc"
6450 
6451 #include "translate/dfp-impl.c.inc"
6452 
6453 #include "translate/spe-impl.c.inc"
6454 
6455 #include "translate/branch-impl.c.inc"
6456 
6457 #include "translate/processor-ctrl-impl.c.inc"
6458 
6459 #include "translate/storage-ctrl-impl.c.inc"
6460 
6461 /* Handles lfdp */
6462 static void gen_dform39(DisasContext *ctx)
6463 {
6464     if ((ctx->opcode & 0x3) == 0) {
6465         if (ctx->insns_flags2 & PPC2_ISA205) {
6466             return gen_lfdp(ctx);
6467         }
6468     }
6469     return gen_invalid(ctx);
6470 }
6471 
6472 /* Handles stfdp */
6473 static void gen_dform3D(DisasContext *ctx)
6474 {
6475     if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6476         /* stfdp */
6477         if (ctx->insns_flags2 & PPC2_ISA205) {
6478             return gen_stfdp(ctx);
6479         }
6480     }
6481     return gen_invalid(ctx);
6482 }
6483 
6484 #if defined(TARGET_PPC64)
6485 /* brd */
6486 static void gen_brd(DisasContext *ctx)
6487 {
6488     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6489 }
6490 
6491 /* brw */
6492 static void gen_brw(DisasContext *ctx)
6493 {
6494     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6495     tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6496 
6497 }
6498 
6499 /* brh */
6500 static void gen_brh(DisasContext *ctx)
6501 {
6502     TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6503     TCGv_i64 t1 = tcg_temp_new_i64();
6504     TCGv_i64 t2 = tcg_temp_new_i64();
6505 
6506     tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6507     tcg_gen_and_i64(t2, t1, mask);
6508     tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6509     tcg_gen_shli_i64(t1, t1, 8);
6510     tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6511 }
6512 #endif
6513 
6514 static opcode_t opcodes[] = {
6515 #if defined(TARGET_PPC64)
6516 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6517 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6518 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6519 #endif
6520 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6521 #if defined(TARGET_PPC64)
6522 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6523 #endif
6524 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6525 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6526 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6527 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6528 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6529 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6530 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6531 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6532 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6533 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6534 #if defined(TARGET_PPC64)
6535 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6536 #endif
6537 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6538 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6539 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6540 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6541 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6542 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6543 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6544 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6545 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6546 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6547 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6548 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6549 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6550 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6551 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6552 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6553 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6554 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6555 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6556 #if defined(TARGET_PPC64)
6557 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6558 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6559 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6560 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6561 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6562 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6563 #endif
6564 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6565 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6566 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6567 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6568 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6569 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6570 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6571 #if defined(TARGET_PPC64)
6572 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6573 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6574 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6575 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6576 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6577 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6578                PPC_NONE, PPC2_ISA300),
6579 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6580                PPC_NONE, PPC2_ISA300),
6581 #endif
6582 /* handles lfdp, lxsd, lxssp */
6583 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6584 /* handles stfdp, stxsd, stxssp */
6585 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6586 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6587 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6588 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6589 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6590 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6591 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6592 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6593 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6594 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6595 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6596 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6597 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6598 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6599 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6600 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6601 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6602 #if defined(TARGET_PPC64)
6603 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6604 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6605 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6606 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6607 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6608 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6609 #endif
6610 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6611 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6612 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6613 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6614 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6615 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6616 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6617 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6618 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6619 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6620 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6621 #if defined(TARGET_PPC64)
6622 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6623 #if !defined(CONFIG_USER_ONLY)
6624 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6625 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6626 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6627 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6628 #endif
6629 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6630 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6631 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6632 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6633 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6634 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6635 #endif
6636 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6637 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6638 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6639 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6640 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6641 #if defined(TARGET_PPC64)
6642 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6643 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6644 #endif
6645 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6646 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6647 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6648 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6649 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6650 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6651 #if defined(TARGET_PPC64)
6652 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6653 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6654 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6655 #endif
6656 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6657 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6658 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6659 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6660 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6661 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6662 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6663 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6664 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6665 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6666 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6667 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6668 GEN_HANDLER_E(dcblc, 0x1F, 0x06, 0x0c, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6669 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6670 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6671 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6672 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6673 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6674 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6675 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6676 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6677 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6678 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6679 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6680 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6681 #if defined(TARGET_PPC64)
6682 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6683 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6684              PPC_SEGMENT_64B),
6685 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6686 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6687              PPC_SEGMENT_64B),
6688 #endif
6689 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6690 /*
6691  * XXX Those instructions will need to be handled differently for
6692  * different ISA versions
6693  */
6694 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6695 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6696 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6697 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6698 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6699 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6700 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6701 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6702 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6703 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6704 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6705 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6706 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6707 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6708 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6709 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6710 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6711 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6712 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6713 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6714 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6715 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6716 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6717 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6718 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6719 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6720 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6721                PPC_NONE, PPC2_BOOKE206),
6722 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6723                PPC_NONE, PPC2_BOOKE206),
6724 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6725                PPC_NONE, PPC2_BOOKE206),
6726 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6727                PPC_NONE, PPC2_BOOKE206),
6728 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6729                PPC_NONE, PPC2_BOOKE206),
6730 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6731 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6732 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6733 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6734               PPC_BOOKE, PPC2_BOOKE206),
6735 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6736 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6737                PPC_BOOKE, PPC2_BOOKE206),
6738 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6739              PPC_440_SPEC),
6740 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6741 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6742 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6743 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6744 #if defined(TARGET_PPC64)
6745 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6746               PPC2_ISA300),
6747 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6748 #endif
6749 
6750 #undef GEN_INT_ARITH_ADD
6751 #undef GEN_INT_ARITH_ADD_CONST
6752 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
6753 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6754 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
6755                                 add_ca, compute_ca, compute_ov)               \
6756 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6757 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6758 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6759 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6760 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6761 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6762 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6763 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6764 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6765 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6766 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6767 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6768 
6769 #undef GEN_INT_ARITH_DIVW
6770 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
6771 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6772 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6773 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6774 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6775 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6776 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6777 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6778 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6779 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6780 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6781 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6782 
6783 #if defined(TARGET_PPC64)
6784 #undef GEN_INT_ARITH_DIVD
6785 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
6786 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6787 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6788 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6789 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6790 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6791 
6792 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6793 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6794 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6795 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6796 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6797 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6798 
6799 #undef GEN_INT_ARITH_MUL_HELPER
6800 #define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
6801 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6802 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6803 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6804 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6805 #endif
6806 
6807 #undef GEN_INT_ARITH_SUBF
6808 #undef GEN_INT_ARITH_SUBF_CONST
6809 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
6810 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6811 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
6812                                 add_ca, compute_ca, compute_ov)               \
6813 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6814 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
6815 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
6816 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
6817 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
6818 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
6819 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
6820 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
6821 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
6822 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
6823 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
6824 
6825 #undef GEN_LOGICAL1
6826 #undef GEN_LOGICAL2
6827 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
6828 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
6829 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
6830 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
6831 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
6832 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
6833 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
6834 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
6835 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
6836 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
6837 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
6838 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
6839 #if defined(TARGET_PPC64)
6840 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
6841 #endif
6842 
6843 #if defined(TARGET_PPC64)
6844 #undef GEN_PPC64_R2
6845 #undef GEN_PPC64_R4
6846 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
6847 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6848 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6849              PPC_64B)
6850 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
6851 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
6852 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000,   \
6853              PPC_64B),                                                        \
6854 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
6855              PPC_64B),                                                        \
6856 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
6857              PPC_64B)
6858 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
6859 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
6860 GEN_PPC64_R4(rldic, 0x1E, 0x04),
6861 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
6862 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
6863 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
6864 #endif
6865 
6866 #undef GEN_LDX_E
6867 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
6868 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
6869 
6870 #if defined(TARGET_PPC64)
6871 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
6872 
6873 /* HV/P7 and later only */
6874 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
6875 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
6876 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
6877 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
6878 #endif
6879 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
6880 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
6881 
6882 /* External PID based load */
6883 #undef GEN_LDEPX
6884 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
6885 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6886               0x00000001, PPC_NONE, PPC2_BOOKE206),
6887 
6888 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
6889 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
6890 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
6891 #if defined(TARGET_PPC64)
6892 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
6893 #endif
6894 
6895 #undef GEN_STX_E
6896 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
6897 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
6898 
6899 #if defined(TARGET_PPC64)
6900 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
6901 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
6902 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
6903 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
6904 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
6905 #endif
6906 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
6907 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
6908 
6909 #undef GEN_STEPX
6910 #define GEN_STEPX(name, ldop, opc2, opc3)                                     \
6911 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
6912               0x00000001, PPC_NONE, PPC2_BOOKE206),
6913 
6914 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
6915 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
6916 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
6917 #if defined(TARGET_PPC64)
6918 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
6919 #endif
6920 
6921 #undef GEN_CRLOGIC
6922 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
6923 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
6924 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
6925 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
6926 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
6927 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
6928 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
6929 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
6930 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
6931 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
6932 
6933 #undef GEN_MAC_HANDLER
6934 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
6935 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
6936 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
6937 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
6938 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
6939 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
6940 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
6941 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
6942 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
6943 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
6944 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
6945 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
6946 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
6947 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
6948 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
6949 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
6950 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
6951 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
6952 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
6953 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
6954 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
6955 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
6956 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
6957 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
6958 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
6959 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
6960 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
6961 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
6962 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
6963 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
6964 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
6965 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
6966 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
6967 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
6968 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
6969 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
6970 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
6971 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
6972 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
6973 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
6974 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
6975 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
6976 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
6977 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
6978 
6979 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
6980                PPC_NONE, PPC2_TM),
6981 GEN_HANDLER2_E(tend,   "tend",   0x1F, 0x0E, 0x15, 0x01FFF800, \
6982                PPC_NONE, PPC2_TM),
6983 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
6984                PPC_NONE, PPC2_TM),
6985 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
6986                PPC_NONE, PPC2_TM),
6987 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
6988                PPC_NONE, PPC2_TM),
6989 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
6990                PPC_NONE, PPC2_TM),
6991 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
6992                PPC_NONE, PPC2_TM),
6993 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
6994                PPC_NONE, PPC2_TM),
6995 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
6996                PPC_NONE, PPC2_TM),
6997 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
6998                PPC_NONE, PPC2_TM),
6999 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
7000                PPC_NONE, PPC2_TM),
7001 
7002 #include "translate/fp-ops.c.inc"
7003 
7004 #include "translate/vmx-ops.c.inc"
7005 
7006 #include "translate/vsx-ops.c.inc"
7007 
7008 #include "translate/spe-ops.c.inc"
7009 };
7010 
7011 /*****************************************************************************/
7012 /* Opcode types */
7013 enum {
7014     PPC_DIRECT   = 0, /* Opcode routine        */
7015     PPC_INDIRECT = 1, /* Indirect opcode table */
7016 };
7017 
7018 #define PPC_OPCODE_MASK 0x3
7019 
7020 static inline int is_indirect_opcode(void *handler)
7021 {
7022     return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
7023 }
7024 
7025 static inline opc_handler_t **ind_table(void *handler)
7026 {
7027     return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
7028 }
7029 
7030 /* Instruction table creation */
7031 /* Opcodes tables creation */
7032 static void fill_new_table(opc_handler_t **table, int len)
7033 {
7034     int i;
7035 
7036     for (i = 0; i < len; i++) {
7037         table[i] = &invalid_handler;
7038     }
7039 }
7040 
7041 static int create_new_table(opc_handler_t **table, unsigned char idx)
7042 {
7043     opc_handler_t **tmp;
7044 
7045     tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
7046     fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
7047     table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
7048 
7049     return 0;
7050 }
7051 
7052 static int insert_in_table(opc_handler_t **table, unsigned char idx,
7053                             opc_handler_t *handler)
7054 {
7055     if (table[idx] != &invalid_handler) {
7056         return -1;
7057     }
7058     table[idx] = handler;
7059 
7060     return 0;
7061 }
7062 
7063 static int register_direct_insn(opc_handler_t **ppc_opcodes,
7064                                 unsigned char idx, opc_handler_t *handler)
7065 {
7066     if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
7067         printf("*** ERROR: opcode %02x already assigned in main "
7068                "opcode table\n", idx);
7069         return -1;
7070     }
7071 
7072     return 0;
7073 }
7074 
7075 static int register_ind_in_table(opc_handler_t **table,
7076                                  unsigned char idx1, unsigned char idx2,
7077                                  opc_handler_t *handler)
7078 {
7079     if (table[idx1] == &invalid_handler) {
7080         if (create_new_table(table, idx1) < 0) {
7081             printf("*** ERROR: unable to create indirect table "
7082                    "idx=%02x\n", idx1);
7083             return -1;
7084         }
7085     } else {
7086         if (!is_indirect_opcode(table[idx1])) {
7087             printf("*** ERROR: idx %02x already assigned to a direct "
7088                    "opcode\n", idx1);
7089             return -1;
7090         }
7091     }
7092     if (handler != NULL &&
7093         insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
7094         printf("*** ERROR: opcode %02x already assigned in "
7095                "opcode table %02x\n", idx2, idx1);
7096         return -1;
7097     }
7098 
7099     return 0;
7100 }
7101 
7102 static int register_ind_insn(opc_handler_t **ppc_opcodes,
7103                              unsigned char idx1, unsigned char idx2,
7104                              opc_handler_t *handler)
7105 {
7106     return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
7107 }
7108 
7109 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7110                                 unsigned char idx1, unsigned char idx2,
7111                                 unsigned char idx3, opc_handler_t *handler)
7112 {
7113     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7114         printf("*** ERROR: unable to join indirect table idx "
7115                "[%02x-%02x]\n", idx1, idx2);
7116         return -1;
7117     }
7118     if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7119                               handler) < 0) {
7120         printf("*** ERROR: unable to insert opcode "
7121                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7122         return -1;
7123     }
7124 
7125     return 0;
7126 }
7127 
7128 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7129                                  unsigned char idx1, unsigned char idx2,
7130                                  unsigned char idx3, unsigned char idx4,
7131                                  opc_handler_t *handler)
7132 {
7133     opc_handler_t **table;
7134 
7135     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7136         printf("*** ERROR: unable to join indirect table idx "
7137                "[%02x-%02x]\n", idx1, idx2);
7138         return -1;
7139     }
7140     table = ind_table(ppc_opcodes[idx1]);
7141     if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7142         printf("*** ERROR: unable to join 2nd-level indirect table idx "
7143                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7144         return -1;
7145     }
7146     table = ind_table(table[idx2]);
7147     if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7148         printf("*** ERROR: unable to insert opcode "
7149                "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7150         return -1;
7151     }
7152     return 0;
7153 }
7154 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7155 {
7156     if (insn->opc2 != 0xFF) {
7157         if (insn->opc3 != 0xFF) {
7158             if (insn->opc4 != 0xFF) {
7159                 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7160                                           insn->opc3, insn->opc4,
7161                                           &insn->handler) < 0) {
7162                     return -1;
7163                 }
7164             } else {
7165                 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7166                                          insn->opc3, &insn->handler) < 0) {
7167                     return -1;
7168                 }
7169             }
7170         } else {
7171             if (register_ind_insn(ppc_opcodes, insn->opc1,
7172                                   insn->opc2, &insn->handler) < 0) {
7173                 return -1;
7174             }
7175         }
7176     } else {
7177         if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7178             return -1;
7179         }
7180     }
7181 
7182     return 0;
7183 }
7184 
7185 static int test_opcode_table(opc_handler_t **table, int len)
7186 {
7187     int i, count, tmp;
7188 
7189     for (i = 0, count = 0; i < len; i++) {
7190         /* Consistency fixup */
7191         if (table[i] == NULL) {
7192             table[i] = &invalid_handler;
7193         }
7194         if (table[i] != &invalid_handler) {
7195             if (is_indirect_opcode(table[i])) {
7196                 tmp = test_opcode_table(ind_table(table[i]),
7197                     PPC_CPU_INDIRECT_OPCODES_LEN);
7198                 if (tmp == 0) {
7199                     g_free(table[i]);
7200                     table[i] = &invalid_handler;
7201                 } else {
7202                     count++;
7203                 }
7204             } else {
7205                 count++;
7206             }
7207         }
7208     }
7209 
7210     return count;
7211 }
7212 
7213 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7214 {
7215     if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7216         printf("*** WARNING: no opcode defined !\n");
7217     }
7218 }
7219 
7220 /*****************************************************************************/
7221 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7222 {
7223     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7224     opcode_t *opc;
7225 
7226     fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7227     for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7228         if (((opc->handler.type & pcc->insns_flags) != 0) ||
7229             ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7230             if (register_insn(cpu->opcodes, opc) < 0) {
7231                 error_setg(errp, "ERROR initializing PowerPC instruction "
7232                            "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7233                            opc->opc3);
7234                 return;
7235             }
7236         }
7237     }
7238     fix_opcode_tables(cpu->opcodes);
7239     fflush(stdout);
7240     fflush(stderr);
7241 }
7242 
7243 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7244 {
7245     opc_handler_t **table, **table_2;
7246     int i, j, k;
7247 
7248     for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7249         if (cpu->opcodes[i] == &invalid_handler) {
7250             continue;
7251         }
7252         if (is_indirect_opcode(cpu->opcodes[i])) {
7253             table = ind_table(cpu->opcodes[i]);
7254             for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7255                 if (table[j] == &invalid_handler) {
7256                     continue;
7257                 }
7258                 if (is_indirect_opcode(table[j])) {
7259                     table_2 = ind_table(table[j]);
7260                     for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7261                         if (table_2[k] != &invalid_handler &&
7262                             is_indirect_opcode(table_2[k])) {
7263                             g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7264                                                      ~PPC_INDIRECT));
7265                         }
7266                     }
7267                     g_free((opc_handler_t *)((uintptr_t)table[j] &
7268                                              ~PPC_INDIRECT));
7269                 }
7270             }
7271             g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7272                 ~PPC_INDIRECT));
7273         }
7274     }
7275 }
7276 
7277 int ppc_fixup_cpu(PowerPCCPU *cpu)
7278 {
7279     CPUPPCState *env = &cpu->env;
7280 
7281     /*
7282      * TCG doesn't (yet) emulate some groups of instructions that are
7283      * implemented on some otherwise supported CPUs (e.g. VSX and
7284      * decimal floating point instructions on POWER7).  We remove
7285      * unsupported instruction groups from the cpu state's instruction
7286      * masks and hope the guest can cope.  For at least the pseries
7287      * machine, the unavailability of these instructions can be
7288      * advertised to the guest via the device tree.
7289      */
7290     if ((env->insns_flags & ~PPC_TCG_INSNS)
7291         || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7292         warn_report("Disabling some instructions which are not "
7293                     "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7294                     env->insns_flags & ~PPC_TCG_INSNS,
7295                     env->insns_flags2 & ~PPC_TCG_INSNS2);
7296     }
7297     env->insns_flags &= PPC_TCG_INSNS;
7298     env->insns_flags2 &= PPC_TCG_INSNS2;
7299     return 0;
7300 }
7301 
7302 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7303 {
7304     opc_handler_t **table, *handler;
7305     uint32_t inval;
7306 
7307     ctx->opcode = insn;
7308 
7309     LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7310               insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7311               ctx->le_mode ? "little" : "big");
7312 
7313     table = cpu->opcodes;
7314     handler = table[opc1(insn)];
7315     if (is_indirect_opcode(handler)) {
7316         table = ind_table(handler);
7317         handler = table[opc2(insn)];
7318         if (is_indirect_opcode(handler)) {
7319             table = ind_table(handler);
7320             handler = table[opc3(insn)];
7321             if (is_indirect_opcode(handler)) {
7322                 table = ind_table(handler);
7323                 handler = table[opc4(insn)];
7324             }
7325         }
7326     }
7327 
7328     /* Is opcode *REALLY* valid ? */
7329     if (unlikely(handler->handler == &gen_invalid)) {
7330         qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7331                       "%02x - %02x - %02x - %02x (%08x) "
7332                       TARGET_FMT_lx "\n",
7333                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7334                       insn, ctx->cia);
7335         return false;
7336     }
7337 
7338     if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7339                  && Rc(insn))) {
7340         inval = handler->inval2;
7341     } else {
7342         inval = handler->inval1;
7343     }
7344 
7345     if (unlikely((insn & inval) != 0)) {
7346         qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7347                       "%02x - %02x - %02x - %02x (%08x) "
7348                       TARGET_FMT_lx "\n", insn & inval,
7349                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7350                       insn, ctx->cia);
7351         return false;
7352     }
7353 
7354     handler->handler(ctx);
7355     return true;
7356 }
7357 
7358 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7359 {
7360     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7361     CPUPPCState *env = cpu_env(cs);
7362     uint32_t hflags = ctx->base.tb->flags;
7363 
7364     ctx->spr_cb = env->spr_cb;
7365     ctx->pr = (hflags >> HFLAGS_PR) & 1;
7366     ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7367     ctx->dr = (hflags >> HFLAGS_DR) & 1;
7368     ctx->hv = (hflags >> HFLAGS_HV) & 1;
7369     ctx->insns_flags = env->insns_flags;
7370     ctx->insns_flags2 = env->insns_flags2;
7371     ctx->access_type = -1;
7372     ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7373     ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7374     ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7375     ctx->flags = env->flags;
7376 #if defined(TARGET_PPC64)
7377     ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7378     ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7379 #endif
7380     ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7381         || env->mmu_model & POWERPC_MMU_64;
7382 
7383     ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7384     ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7385     ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7386     ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7387     ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7388     ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7389     ctx->hr = (hflags >> HFLAGS_HR) & 1;
7390     ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7391     ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7392     ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1;
7393     ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1;
7394     ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7395 
7396     ctx->singlestep_enabled = 0;
7397     if ((hflags >> HFLAGS_SE) & 1) {
7398         ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7399         ctx->base.max_insns = 1;
7400     }
7401     if ((hflags >> HFLAGS_BE) & 1) {
7402         ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7403     }
7404 }
7405 
7406 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7407 {
7408 }
7409 
7410 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7411 {
7412     tcg_gen_insn_start(dcbase->pc_next);
7413 }
7414 
7415 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7416 {
7417     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7418     return opc1(insn) == 1;
7419 }
7420 
7421 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7422 {
7423     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7424     PowerPCCPU *cpu = POWERPC_CPU(cs);
7425     CPUPPCState *env = cpu_env(cs);
7426     target_ulong pc;
7427     uint32_t insn;
7428     bool ok;
7429 
7430     LOG_DISAS("----------------\n");
7431     LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7432               ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7433 
7434     ctx->cia = pc = ctx->base.pc_next;
7435     insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7436     ctx->base.pc_next = pc += 4;
7437 
7438     if (!is_prefix_insn(ctx, insn)) {
7439         ok = (decode_insn32(ctx, insn) ||
7440               decode_legacy(cpu, ctx, insn));
7441     } else if ((pc & 63) == 0) {
7442         /*
7443          * Power v3.1, section 1.9 Exceptions:
7444          * attempt to execute a prefixed instruction that crosses a
7445          * 64-byte address boundary (system alignment error).
7446          */
7447         gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7448         ok = true;
7449     } else {
7450         uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7451                                              need_byteswap(ctx));
7452         ctx->base.pc_next = pc += 4;
7453         ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7454     }
7455     if (!ok) {
7456         gen_invalid(ctx);
7457     }
7458 
7459     /* End the TB when crossing a page boundary. */
7460     if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7461         ctx->base.is_jmp = DISAS_TOO_MANY;
7462     }
7463 }
7464 
7465 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7466 {
7467     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7468     DisasJumpType is_jmp = ctx->base.is_jmp;
7469     target_ulong nip = ctx->base.pc_next;
7470 
7471     if (is_jmp == DISAS_NORETURN) {
7472         /* We have already exited the TB. */
7473         return;
7474     }
7475 
7476     /* Honor single stepping. */
7477     if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)) {
7478         bool rfi_type = false;
7479 
7480         switch (is_jmp) {
7481         case DISAS_TOO_MANY:
7482         case DISAS_EXIT_UPDATE:
7483         case DISAS_CHAIN_UPDATE:
7484             gen_update_nip(ctx, nip);
7485             break;
7486         case DISAS_EXIT:
7487         case DISAS_CHAIN:
7488             /*
7489              * This is a heuristic, to put it kindly. The rfi class of
7490              * instructions are among the few outside branches that change
7491              * NIP without taking an interrupt. Single step trace interrupts
7492              * do not fire on completion of these instructions.
7493              */
7494             rfi_type = true;
7495             break;
7496         default:
7497             g_assert_not_reached();
7498         }
7499 
7500         gen_debug_exception(ctx, rfi_type);
7501         return;
7502     }
7503 
7504     switch (is_jmp) {
7505     case DISAS_TOO_MANY:
7506         if (use_goto_tb(ctx, nip)) {
7507             pmu_count_insns(ctx);
7508             tcg_gen_goto_tb(0);
7509             gen_update_nip(ctx, nip);
7510             tcg_gen_exit_tb(ctx->base.tb, 0);
7511             break;
7512         }
7513         /* fall through */
7514     case DISAS_CHAIN_UPDATE:
7515         gen_update_nip(ctx, nip);
7516         /* fall through */
7517     case DISAS_CHAIN:
7518         /*
7519          * tcg_gen_lookup_and_goto_ptr will exit the TB if
7520          * CF_NO_GOTO_PTR is set. Count insns now.
7521          */
7522         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7523             pmu_count_insns(ctx);
7524         }
7525 
7526         tcg_gen_lookup_and_goto_ptr();
7527         break;
7528 
7529     case DISAS_EXIT_UPDATE:
7530         gen_update_nip(ctx, nip);
7531         /* fall through */
7532     case DISAS_EXIT:
7533         pmu_count_insns(ctx);
7534         tcg_gen_exit_tb(NULL, 0);
7535         break;
7536 
7537     default:
7538         g_assert_not_reached();
7539     }
7540 }
7541 
7542 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7543                              CPUState *cs, FILE *logfile)
7544 {
7545     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7546     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7547 }
7548 
7549 static const TranslatorOps ppc_tr_ops = {
7550     .init_disas_context = ppc_tr_init_disas_context,
7551     .tb_start           = ppc_tr_tb_start,
7552     .insn_start         = ppc_tr_insn_start,
7553     .translate_insn     = ppc_tr_translate_insn,
7554     .tb_stop            = ppc_tr_tb_stop,
7555     .disas_log          = ppc_tr_disas_log,
7556 };
7557 
7558 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
7559                            vaddr pc, void *host_pc)
7560 {
7561     DisasContext ctx;
7562 
7563     translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7564 }
7565