xref: /openbmc/qemu/target/ppc/translate.c (revision f16d15c9)
1 /*
2  *  PowerPC emulation for qemu: main translation routines.
3  *
4  *  Copyright (c) 2003-2007 Jocelyn Mayer
5  *  Copyright (C) 2011 Freescale Semiconductor, Inc.
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "qemu/host-utils.h"
29 #include "qemu/main-loop.h"
30 #include "exec/cpu_ldst.h"
31 
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
34 
35 #include "exec/translator.h"
36 #include "exec/log.h"
37 #include "qemu/atomic128.h"
38 #include "spr_common.h"
39 
40 #include "qemu/qemu-print.h"
41 #include "qapi/error.h"
42 
43 #define CPU_SINGLE_STEP 0x1
44 #define CPU_BRANCH_STEP 0x2
45 
46 /* Include definitions for instructions classes and implementations flags */
47 /* #define PPC_DEBUG_DISAS */
48 
49 #ifdef PPC_DEBUG_DISAS
50 #  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
51 #else
52 #  define LOG_DISAS(...) do { } while (0)
53 #endif
54 /*****************************************************************************/
55 /* Code translation helpers                                                  */
56 
57 /* global register indexes */
58 static char cpu_reg_names[10 * 3 + 22 * 4   /* GPR */
59                           + 10 * 4 + 22 * 5 /* SPE GPRh */
60                           + 8 * 5           /* CRF */];
61 static TCGv cpu_gpr[32];
62 static TCGv cpu_gprh[32];
63 static TCGv_i32 cpu_crf[8];
64 static TCGv cpu_nip;
65 static TCGv cpu_msr;
66 static TCGv cpu_ctr;
67 static TCGv cpu_lr;
68 #if defined(TARGET_PPC64)
69 static TCGv cpu_cfar;
70 #endif
71 static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
72 static TCGv cpu_reserve;
73 static TCGv cpu_reserve_val;
74 static TCGv cpu_fpscr;
75 static TCGv_i32 cpu_access_type;
76 
77 #include "exec/gen-icount.h"
78 
79 void ppc_translate_init(void)
80 {
81     int i;
82     char *p;
83     size_t cpu_reg_names_size;
84 
85     p = cpu_reg_names;
86     cpu_reg_names_size = sizeof(cpu_reg_names);
87 
88     for (i = 0; i < 8; i++) {
89         snprintf(p, cpu_reg_names_size, "crf%d", i);
90         cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
91                                             offsetof(CPUPPCState, crf[i]), p);
92         p += 5;
93         cpu_reg_names_size -= 5;
94     }
95 
96     for (i = 0; i < 32; i++) {
97         snprintf(p, cpu_reg_names_size, "r%d", i);
98         cpu_gpr[i] = tcg_global_mem_new(cpu_env,
99                                         offsetof(CPUPPCState, gpr[i]), p);
100         p += (i < 10) ? 3 : 4;
101         cpu_reg_names_size -= (i < 10) ? 3 : 4;
102         snprintf(p, cpu_reg_names_size, "r%dH", i);
103         cpu_gprh[i] = tcg_global_mem_new(cpu_env,
104                                          offsetof(CPUPPCState, gprh[i]), p);
105         p += (i < 10) ? 4 : 5;
106         cpu_reg_names_size -= (i < 10) ? 4 : 5;
107     }
108 
109     cpu_nip = tcg_global_mem_new(cpu_env,
110                                  offsetof(CPUPPCState, nip), "nip");
111 
112     cpu_msr = tcg_global_mem_new(cpu_env,
113                                  offsetof(CPUPPCState, msr), "msr");
114 
115     cpu_ctr = tcg_global_mem_new(cpu_env,
116                                  offsetof(CPUPPCState, ctr), "ctr");
117 
118     cpu_lr = tcg_global_mem_new(cpu_env,
119                                 offsetof(CPUPPCState, lr), "lr");
120 
121 #if defined(TARGET_PPC64)
122     cpu_cfar = tcg_global_mem_new(cpu_env,
123                                   offsetof(CPUPPCState, cfar), "cfar");
124 #endif
125 
126     cpu_xer = tcg_global_mem_new(cpu_env,
127                                  offsetof(CPUPPCState, xer), "xer");
128     cpu_so = tcg_global_mem_new(cpu_env,
129                                 offsetof(CPUPPCState, so), "SO");
130     cpu_ov = tcg_global_mem_new(cpu_env,
131                                 offsetof(CPUPPCState, ov), "OV");
132     cpu_ca = tcg_global_mem_new(cpu_env,
133                                 offsetof(CPUPPCState, ca), "CA");
134     cpu_ov32 = tcg_global_mem_new(cpu_env,
135                                   offsetof(CPUPPCState, ov32), "OV32");
136     cpu_ca32 = tcg_global_mem_new(cpu_env,
137                                   offsetof(CPUPPCState, ca32), "CA32");
138 
139     cpu_reserve = tcg_global_mem_new(cpu_env,
140                                      offsetof(CPUPPCState, reserve_addr),
141                                      "reserve_addr");
142     cpu_reserve_val = tcg_global_mem_new(cpu_env,
143                                      offsetof(CPUPPCState, reserve_val),
144                                      "reserve_val");
145 
146     cpu_fpscr = tcg_global_mem_new(cpu_env,
147                                    offsetof(CPUPPCState, fpscr), "fpscr");
148 
149     cpu_access_type = tcg_global_mem_new_i32(cpu_env,
150                                              offsetof(CPUPPCState, access_type),
151                                              "access_type");
152 }
153 
154 /* internal defines */
155 struct DisasContext {
156     DisasContextBase base;
157     target_ulong cia;  /* current instruction address */
158     uint32_t opcode;
159     /* Routine used to access memory */
160     bool pr, hv, dr, le_mode;
161     bool lazy_tlb_flush;
162     bool need_access_type;
163     int mem_idx;
164     int access_type;
165     /* Translation flags */
166     MemOp default_tcg_memop_mask;
167 #if defined(TARGET_PPC64)
168     bool sf_mode;
169     bool has_cfar;
170 #endif
171     bool fpu_enabled;
172     bool altivec_enabled;
173     bool vsx_enabled;
174     bool spe_enabled;
175     bool tm_enabled;
176     bool gtse;
177     bool hr;
178     bool mmcr0_pmcc0;
179     bool mmcr0_pmcc1;
180     bool pmu_insn_cnt;
181     ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
182     int singlestep_enabled;
183     uint32_t flags;
184     uint64_t insns_flags;
185     uint64_t insns_flags2;
186 };
187 
188 #define DISAS_EXIT         DISAS_TARGET_0  /* exit to main loop, pc updated */
189 #define DISAS_EXIT_UPDATE  DISAS_TARGET_1  /* exit to main loop, pc stale */
190 #define DISAS_CHAIN        DISAS_TARGET_2  /* lookup next tb, pc updated */
191 #define DISAS_CHAIN_UPDATE DISAS_TARGET_3  /* lookup next tb, pc stale */
192 
193 /* Return true iff byteswap is needed in a scalar memop */
194 static inline bool need_byteswap(const DisasContext *ctx)
195 {
196 #if TARGET_BIG_ENDIAN
197      return ctx->le_mode;
198 #else
199      return !ctx->le_mode;
200 #endif
201 }
202 
203 /* True when active word size < size of target_long.  */
204 #ifdef TARGET_PPC64
205 # define NARROW_MODE(C)  (!(C)->sf_mode)
206 #else
207 # define NARROW_MODE(C)  0
208 #endif
209 
210 struct opc_handler_t {
211     /* invalid bits for instruction 1 (Rc(opcode) == 0) */
212     uint32_t inval1;
213     /* invalid bits for instruction 2 (Rc(opcode) == 1) */
214     uint32_t inval2;
215     /* instruction type */
216     uint64_t type;
217     /* extended instruction type */
218     uint64_t type2;
219     /* handler */
220     void (*handler)(DisasContext *ctx);
221 };
222 
223 /* SPR load/store helpers */
224 static inline void gen_load_spr(TCGv t, int reg)
225 {
226     tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
227 }
228 
229 static inline void gen_store_spr(int reg, TCGv t)
230 {
231     tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
232 }
233 
234 static inline void gen_set_access_type(DisasContext *ctx, int access_type)
235 {
236     if (ctx->need_access_type && ctx->access_type != access_type) {
237         tcg_gen_movi_i32(cpu_access_type, access_type);
238         ctx->access_type = access_type;
239     }
240 }
241 
242 static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
243 {
244     if (NARROW_MODE(ctx)) {
245         nip = (uint32_t)nip;
246     }
247     tcg_gen_movi_tl(cpu_nip, nip);
248 }
249 
250 static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
251 {
252     TCGv_i32 t0, t1;
253 
254     /*
255      * These are all synchronous exceptions, we set the PC back to the
256      * faulting instruction
257      */
258     gen_update_nip(ctx, ctx->cia);
259     t0 = tcg_const_i32(excp);
260     t1 = tcg_const_i32(error);
261     gen_helper_raise_exception_err(cpu_env, t0, t1);
262     tcg_temp_free_i32(t0);
263     tcg_temp_free_i32(t1);
264     ctx->base.is_jmp = DISAS_NORETURN;
265 }
266 
267 static void gen_exception(DisasContext *ctx, uint32_t excp)
268 {
269     TCGv_i32 t0;
270 
271     /*
272      * These are all synchronous exceptions, we set the PC back to the
273      * faulting instruction
274      */
275     gen_update_nip(ctx, ctx->cia);
276     t0 = tcg_const_i32(excp);
277     gen_helper_raise_exception(cpu_env, t0);
278     tcg_temp_free_i32(t0);
279     ctx->base.is_jmp = DISAS_NORETURN;
280 }
281 
282 static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
283                               target_ulong nip)
284 {
285     TCGv_i32 t0;
286 
287     gen_update_nip(ctx, nip);
288     t0 = tcg_const_i32(excp);
289     gen_helper_raise_exception(cpu_env, t0);
290     tcg_temp_free_i32(t0);
291     ctx->base.is_jmp = DISAS_NORETURN;
292 }
293 
294 static void gen_icount_io_start(DisasContext *ctx)
295 {
296     if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
297         gen_io_start();
298         /*
299          * An I/O instruction must be last in the TB.
300          * Chain to the next TB, and let the code from gen_tb_start
301          * decide if we need to return to the main loop.
302          * Doing this first also allows this value to be overridden.
303          */
304         ctx->base.is_jmp = DISAS_TOO_MANY;
305     }
306 }
307 
308 /*
309  * Tells the caller what is the appropriate exception to generate and prepares
310  * SPR registers for this exception.
311  *
312  * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or
313  * POWERPC_EXCP_DEBUG (on BookE).
314  */
315 static uint32_t gen_prep_dbgex(DisasContext *ctx)
316 {
317     if (ctx->flags & POWERPC_FLAG_DE) {
318         target_ulong dbsr = 0;
319         if (ctx->singlestep_enabled & CPU_SINGLE_STEP) {
320             dbsr = DBCR0_ICMP;
321         } else {
322             /* Must have been branch */
323             dbsr = DBCR0_BRT;
324         }
325         TCGv t0 = tcg_temp_new();
326         gen_load_spr(t0, SPR_BOOKE_DBSR);
327         tcg_gen_ori_tl(t0, t0, dbsr);
328         gen_store_spr(SPR_BOOKE_DBSR, t0);
329         tcg_temp_free(t0);
330         return POWERPC_EXCP_DEBUG;
331     } else {
332         return POWERPC_EXCP_TRACE;
333     }
334 }
335 
336 static void gen_debug_exception(DisasContext *ctx)
337 {
338     gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx)));
339     ctx->base.is_jmp = DISAS_NORETURN;
340 }
341 
342 static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
343 {
344     /* Will be converted to program check if needed */
345     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
346 }
347 
348 static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
349 {
350     gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
351 }
352 
353 static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
354 {
355     /* Will be converted to program check if needed */
356     gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
357 }
358 
359 /*****************************************************************************/
360 /* SPR READ/WRITE CALLBACKS */
361 
362 void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
363 {
364 #if 0
365     sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
366     printf("ERROR: try to access SPR %d !\n", sprn);
367 #endif
368 }
369 
370 /* #define PPC_DUMP_SPR_ACCESSES */
371 
372 /*
373  * Generic callbacks:
374  * do nothing but store/retrieve spr value
375  */
376 static void spr_load_dump_spr(int sprn)
377 {
378 #ifdef PPC_DUMP_SPR_ACCESSES
379     TCGv_i32 t0 = tcg_const_i32(sprn);
380     gen_helper_load_dump_spr(cpu_env, t0);
381     tcg_temp_free_i32(t0);
382 #endif
383 }
384 
385 void spr_read_generic(DisasContext *ctx, int gprn, int sprn)
386 {
387     gen_load_spr(cpu_gpr[gprn], sprn);
388     spr_load_dump_spr(sprn);
389 }
390 
391 static void spr_store_dump_spr(int sprn)
392 {
393 #ifdef PPC_DUMP_SPR_ACCESSES
394     TCGv_i32 t0 = tcg_const_i32(sprn);
395     gen_helper_store_dump_spr(cpu_env, t0);
396     tcg_temp_free_i32(t0);
397 #endif
398 }
399 
400 void spr_write_generic(DisasContext *ctx, int sprn, int gprn)
401 {
402     gen_store_spr(sprn, cpu_gpr[gprn]);
403     spr_store_dump_spr(sprn);
404 }
405 
406 void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn)
407 {
408     spr_write_generic(ctx, sprn, gprn);
409 
410     /*
411      * SPR_CTRL writes must force a new translation block,
412      * allowing the PMU to calculate the run latch events with
413      * more accuracy.
414      */
415     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
416 }
417 
418 #if !defined(CONFIG_USER_ONLY)
419 void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
420 {
421 #ifdef TARGET_PPC64
422     TCGv t0 = tcg_temp_new();
423     tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
424     gen_store_spr(sprn, t0);
425     tcg_temp_free(t0);
426     spr_store_dump_spr(sprn);
427 #else
428     spr_write_generic(ctx, sprn, gprn);
429 #endif
430 }
431 
432 void spr_write_clear(DisasContext *ctx, int sprn, int gprn)
433 {
434     TCGv t0 = tcg_temp_new();
435     TCGv t1 = tcg_temp_new();
436     gen_load_spr(t0, sprn);
437     tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
438     tcg_gen_and_tl(t0, t0, t1);
439     gen_store_spr(sprn, t0);
440     tcg_temp_free(t0);
441     tcg_temp_free(t1);
442 }
443 
444 void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
445 {
446 }
447 
448 #endif
449 
450 /* SPR common to all PowerPC */
451 /* XER */
452 void spr_read_xer(DisasContext *ctx, int gprn, int sprn)
453 {
454     TCGv dst = cpu_gpr[gprn];
455     TCGv t0 = tcg_temp_new();
456     TCGv t1 = tcg_temp_new();
457     TCGv t2 = tcg_temp_new();
458     tcg_gen_mov_tl(dst, cpu_xer);
459     tcg_gen_shli_tl(t0, cpu_so, XER_SO);
460     tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
461     tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
462     tcg_gen_or_tl(t0, t0, t1);
463     tcg_gen_or_tl(dst, dst, t2);
464     tcg_gen_or_tl(dst, dst, t0);
465     if (is_isa300(ctx)) {
466         tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32);
467         tcg_gen_or_tl(dst, dst, t0);
468         tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32);
469         tcg_gen_or_tl(dst, dst, t0);
470     }
471     tcg_temp_free(t0);
472     tcg_temp_free(t1);
473     tcg_temp_free(t2);
474 }
475 
476 void spr_write_xer(DisasContext *ctx, int sprn, int gprn)
477 {
478     TCGv src = cpu_gpr[gprn];
479     /* Write all flags, while reading back check for isa300 */
480     tcg_gen_andi_tl(cpu_xer, src,
481                     ~((1u << XER_SO) |
482                       (1u << XER_OV) | (1u << XER_OV32) |
483                       (1u << XER_CA) | (1u << XER_CA32)));
484     tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1);
485     tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1);
486     tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
487     tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
488     tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
489 }
490 
491 /* LR */
492 void spr_read_lr(DisasContext *ctx, int gprn, int sprn)
493 {
494     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
495 }
496 
497 void spr_write_lr(DisasContext *ctx, int sprn, int gprn)
498 {
499     tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
500 }
501 
502 /* CFAR */
503 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
504 void spr_read_cfar(DisasContext *ctx, int gprn, int sprn)
505 {
506     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
507 }
508 
509 void spr_write_cfar(DisasContext *ctx, int sprn, int gprn)
510 {
511     tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
512 }
513 #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
514 
515 /* CTR */
516 void spr_read_ctr(DisasContext *ctx, int gprn, int sprn)
517 {
518     tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
519 }
520 
521 void spr_write_ctr(DisasContext *ctx, int sprn, int gprn)
522 {
523     tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
524 }
525 
526 /* User read access to SPR */
527 /* USPRx */
528 /* UMMCRx */
529 /* UPMCx */
530 /* USIA */
531 /* UDECR */
532 void spr_read_ureg(DisasContext *ctx, int gprn, int sprn)
533 {
534     gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
535 }
536 
537 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
538 void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
539 {
540     gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
541 }
542 #endif
543 
544 /* SPR common to all non-embedded PowerPC */
545 /* DECR */
546 #if !defined(CONFIG_USER_ONLY)
547 void spr_read_decr(DisasContext *ctx, int gprn, int sprn)
548 {
549     gen_icount_io_start(ctx);
550     gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
551 }
552 
553 void spr_write_decr(DisasContext *ctx, int sprn, int gprn)
554 {
555     gen_icount_io_start(ctx);
556     gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
557 }
558 #endif
559 
560 /* SPR common to all non-embedded PowerPC, except 601 */
561 /* Time base */
562 void spr_read_tbl(DisasContext *ctx, int gprn, int sprn)
563 {
564     gen_icount_io_start(ctx);
565     gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
566 }
567 
568 void spr_read_tbu(DisasContext *ctx, int gprn, int sprn)
569 {
570     gen_icount_io_start(ctx);
571     gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
572 }
573 
574 void spr_read_atbl(DisasContext *ctx, int gprn, int sprn)
575 {
576     gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
577 }
578 
579 void spr_read_atbu(DisasContext *ctx, int gprn, int sprn)
580 {
581     gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
582 }
583 
584 #if !defined(CONFIG_USER_ONLY)
585 void spr_write_tbl(DisasContext *ctx, int sprn, int gprn)
586 {
587     gen_icount_io_start(ctx);
588     gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
589 }
590 
591 void spr_write_tbu(DisasContext *ctx, int sprn, int gprn)
592 {
593     gen_icount_io_start(ctx);
594     gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
595 }
596 
597 void spr_write_atbl(DisasContext *ctx, int sprn, int gprn)
598 {
599     gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
600 }
601 
602 void spr_write_atbu(DisasContext *ctx, int sprn, int gprn)
603 {
604     gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
605 }
606 
607 #if defined(TARGET_PPC64)
608 void spr_read_purr(DisasContext *ctx, int gprn, int sprn)
609 {
610     gen_icount_io_start(ctx);
611     gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
612 }
613 
614 void spr_write_purr(DisasContext *ctx, int sprn, int gprn)
615 {
616     gen_icount_io_start(ctx);
617     gen_helper_store_purr(cpu_env, cpu_gpr[gprn]);
618 }
619 
620 /* HDECR */
621 void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
622 {
623     gen_icount_io_start(ctx);
624     gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
625 }
626 
627 void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
628 {
629     gen_icount_io_start(ctx);
630     gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
631 }
632 
633 void spr_read_vtb(DisasContext *ctx, int gprn, int sprn)
634 {
635     gen_icount_io_start(ctx);
636     gen_helper_load_vtb(cpu_gpr[gprn], cpu_env);
637 }
638 
639 void spr_write_vtb(DisasContext *ctx, int sprn, int gprn)
640 {
641     gen_icount_io_start(ctx);
642     gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]);
643 }
644 
645 void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn)
646 {
647     gen_icount_io_start(ctx);
648     gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]);
649 }
650 
651 #endif
652 #endif
653 
654 #if !defined(CONFIG_USER_ONLY)
655 /* IBAT0U...IBAT0U */
656 /* IBAT0L...IBAT7L */
657 void spr_read_ibat(DisasContext *ctx, int gprn, int sprn)
658 {
659     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
660                   offsetof(CPUPPCState,
661                            IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
662 }
663 
664 void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn)
665 {
666     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
667                   offsetof(CPUPPCState,
668                            IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
669 }
670 
671 void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn)
672 {
673     TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
674     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
675     tcg_temp_free_i32(t0);
676 }
677 
678 void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn)
679 {
680     TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
681     gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
682     tcg_temp_free_i32(t0);
683 }
684 
685 void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn)
686 {
687     TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
688     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
689     tcg_temp_free_i32(t0);
690 }
691 
692 void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn)
693 {
694     TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
695     gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
696     tcg_temp_free_i32(t0);
697 }
698 
699 /* DBAT0U...DBAT7U */
700 /* DBAT0L...DBAT7L */
701 void spr_read_dbat(DisasContext *ctx, int gprn, int sprn)
702 {
703     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
704                   offsetof(CPUPPCState,
705                            DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
706 }
707 
708 void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn)
709 {
710     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env,
711                   offsetof(CPUPPCState,
712                            DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
713 }
714 
715 void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn)
716 {
717     TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
718     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
719     tcg_temp_free_i32(t0);
720 }
721 
722 void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn)
723 {
724     TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
725     gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
726     tcg_temp_free_i32(t0);
727 }
728 
729 void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn)
730 {
731     TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
732     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
733     tcg_temp_free_i32(t0);
734 }
735 
736 void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn)
737 {
738     TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
739     gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
740     tcg_temp_free_i32(t0);
741 }
742 
743 /* SDR1 */
744 void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn)
745 {
746     gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
747 }
748 
749 #if defined(TARGET_PPC64)
750 /* 64 bits PowerPC specific SPRs */
751 /* PIDR */
752 void spr_write_pidr(DisasContext *ctx, int sprn, int gprn)
753 {
754     gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]);
755 }
756 
757 void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn)
758 {
759     gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]);
760 }
761 
762 void spr_read_hior(DisasContext *ctx, int gprn, int sprn)
763 {
764     tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
765 }
766 
767 void spr_write_hior(DisasContext *ctx, int sprn, int gprn)
768 {
769     TCGv t0 = tcg_temp_new();
770     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
771     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
772     tcg_temp_free(t0);
773 }
774 void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn)
775 {
776     gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]);
777 }
778 
779 void spr_write_pcr(DisasContext *ctx, int sprn, int gprn)
780 {
781     gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]);
782 }
783 
784 /* DPDES */
785 void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn)
786 {
787     gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env);
788 }
789 
790 void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn)
791 {
792     gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]);
793 }
794 #endif
795 #endif
796 
797 /* PowerPC 40x specific registers */
798 #if !defined(CONFIG_USER_ONLY)
799 void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn)
800 {
801     gen_icount_io_start(ctx);
802     gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
803 }
804 
805 void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn)
806 {
807     gen_icount_io_start(ctx);
808     gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
809 }
810 
811 void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn)
812 {
813     gen_icount_io_start(ctx);
814     gen_store_spr(sprn, cpu_gpr[gprn]);
815     gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
816     /* We must stop translation as we may have rebooted */
817     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
818 }
819 
820 void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn)
821 {
822     gen_icount_io_start(ctx);
823     gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
824 }
825 
826 void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
827 {
828     gen_icount_io_start(ctx);
829     gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
830 }
831 
832 void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
833 {
834     gen_icount_io_start(ctx);
835     gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
836 }
837 
838 void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
839 {
840     TCGv t0 = tcg_temp_new();
841     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
842     gen_helper_store_40x_pid(cpu_env, t0);
843     tcg_temp_free(t0);
844 }
845 
846 void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
847 {
848     gen_icount_io_start(ctx);
849     gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
850 }
851 
852 void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn)
853 {
854     gen_icount_io_start(ctx);
855     gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
856 }
857 #endif
858 
859 /* PIR */
860 #if !defined(CONFIG_USER_ONLY)
861 void spr_write_pir(DisasContext *ctx, int sprn, int gprn)
862 {
863     TCGv t0 = tcg_temp_new();
864     tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
865     gen_store_spr(SPR_PIR, t0);
866     tcg_temp_free(t0);
867 }
868 #endif
869 
870 /* SPE specific registers */
871 void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn)
872 {
873     TCGv_i32 t0 = tcg_temp_new_i32();
874     tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
875     tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
876     tcg_temp_free_i32(t0);
877 }
878 
879 void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn)
880 {
881     TCGv_i32 t0 = tcg_temp_new_i32();
882     tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
883     tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
884     tcg_temp_free_i32(t0);
885 }
886 
887 #if !defined(CONFIG_USER_ONLY)
888 /* Callback used to write the exception vector base */
889 void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn)
890 {
891     TCGv t0 = tcg_temp_new();
892     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
893     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
894     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
895     gen_store_spr(sprn, t0);
896     tcg_temp_free(t0);
897 }
898 
899 void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn)
900 {
901     int sprn_offs;
902 
903     if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
904         sprn_offs = sprn - SPR_BOOKE_IVOR0;
905     } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
906         sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
907     } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
908         sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
909     } else {
910         qemu_log_mask(LOG_GUEST_ERROR, "Trying to write an unknown exception"
911                       " vector 0x%03x\n", sprn);
912         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
913         return;
914     }
915 
916     TCGv t0 = tcg_temp_new();
917     tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
918     tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
919     tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
920     gen_store_spr(sprn, t0);
921     tcg_temp_free(t0);
922 }
923 #endif
924 
925 #ifdef TARGET_PPC64
926 #ifndef CONFIG_USER_ONLY
927 void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
928 {
929     TCGv t0 = tcg_temp_new();
930     TCGv t1 = tcg_temp_new();
931     TCGv t2 = tcg_temp_new();
932 
933     /*
934      * Note, the HV=1 PR=0 case is handled earlier by simply using
935      * spr_write_generic for HV mode in the SPR table
936      */
937 
938     /* Build insertion mask into t1 based on context */
939     if (ctx->pr) {
940         gen_load_spr(t1, SPR_UAMOR);
941     } else {
942         gen_load_spr(t1, SPR_AMOR);
943     }
944 
945     /* Mask new bits into t2 */
946     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
947 
948     /* Load AMR and clear new bits in t0 */
949     gen_load_spr(t0, SPR_AMR);
950     tcg_gen_andc_tl(t0, t0, t1);
951 
952     /* Or'in new bits and write it out */
953     tcg_gen_or_tl(t0, t0, t2);
954     gen_store_spr(SPR_AMR, t0);
955     spr_store_dump_spr(SPR_AMR);
956 
957     tcg_temp_free(t0);
958     tcg_temp_free(t1);
959     tcg_temp_free(t2);
960 }
961 
962 void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
963 {
964     TCGv t0 = tcg_temp_new();
965     TCGv t1 = tcg_temp_new();
966     TCGv t2 = tcg_temp_new();
967 
968     /*
969      * Note, the HV=1 case is handled earlier by simply using
970      * spr_write_generic for HV mode in the SPR table
971      */
972 
973     /* Build insertion mask into t1 based on context */
974     gen_load_spr(t1, SPR_AMOR);
975 
976     /* Mask new bits into t2 */
977     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
978 
979     /* Load AMR and clear new bits in t0 */
980     gen_load_spr(t0, SPR_UAMOR);
981     tcg_gen_andc_tl(t0, t0, t1);
982 
983     /* Or'in new bits and write it out */
984     tcg_gen_or_tl(t0, t0, t2);
985     gen_store_spr(SPR_UAMOR, t0);
986     spr_store_dump_spr(SPR_UAMOR);
987 
988     tcg_temp_free(t0);
989     tcg_temp_free(t1);
990     tcg_temp_free(t2);
991 }
992 
993 void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
994 {
995     TCGv t0 = tcg_temp_new();
996     TCGv t1 = tcg_temp_new();
997     TCGv t2 = tcg_temp_new();
998 
999     /*
1000      * Note, the HV=1 case is handled earlier by simply using
1001      * spr_write_generic for HV mode in the SPR table
1002      */
1003 
1004     /* Build insertion mask into t1 based on context */
1005     gen_load_spr(t1, SPR_AMOR);
1006 
1007     /* Mask new bits into t2 */
1008     tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
1009 
1010     /* Load AMR and clear new bits in t0 */
1011     gen_load_spr(t0, SPR_IAMR);
1012     tcg_gen_andc_tl(t0, t0, t1);
1013 
1014     /* Or'in new bits and write it out */
1015     tcg_gen_or_tl(t0, t0, t2);
1016     gen_store_spr(SPR_IAMR, t0);
1017     spr_store_dump_spr(SPR_IAMR);
1018 
1019     tcg_temp_free(t0);
1020     tcg_temp_free(t1);
1021     tcg_temp_free(t2);
1022 }
1023 #endif
1024 #endif
1025 
1026 #ifndef CONFIG_USER_ONLY
1027 void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
1028 {
1029     gen_helper_fixup_thrm(cpu_env);
1030     gen_load_spr(cpu_gpr[gprn], sprn);
1031     spr_load_dump_spr(sprn);
1032 }
1033 #endif /* !CONFIG_USER_ONLY */
1034 
1035 #if !defined(CONFIG_USER_ONLY)
1036 void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn)
1037 {
1038     TCGv t0 = tcg_temp_new();
1039 
1040     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
1041     gen_store_spr(sprn, t0);
1042     tcg_temp_free(t0);
1043 }
1044 
1045 void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
1046 {
1047     TCGv t0 = tcg_temp_new();
1048 
1049     tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
1050     gen_store_spr(sprn, t0);
1051     tcg_temp_free(t0);
1052 }
1053 
1054 void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn)
1055 {
1056     TCGv t0 = tcg_temp_new();
1057 
1058     tcg_gen_andi_tl(t0, cpu_gpr[gprn],
1059                     ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC));
1060     gen_store_spr(sprn, t0);
1061     tcg_temp_free(t0);
1062 }
1063 
1064 void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn)
1065 {
1066     gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
1067 }
1068 
1069 void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn)
1070 {
1071     TCGv_i32 t0 = tcg_const_i32(sprn);
1072     gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
1073     tcg_temp_free_i32(t0);
1074 }
1075 void spr_write_eplc(DisasContext *ctx, int sprn, int gprn)
1076 {
1077     gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]);
1078 }
1079 void spr_write_epsc(DisasContext *ctx, int sprn, int gprn)
1080 {
1081     gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]);
1082 }
1083 
1084 #endif
1085 
1086 #if !defined(CONFIG_USER_ONLY)
1087 void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
1088 {
1089     TCGv val = tcg_temp_new();
1090     tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
1091     gen_store_spr(SPR_BOOKE_MAS3, val);
1092     tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
1093     gen_store_spr(SPR_BOOKE_MAS7, val);
1094     tcg_temp_free(val);
1095 }
1096 
1097 void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
1098 {
1099     TCGv mas7 = tcg_temp_new();
1100     TCGv mas3 = tcg_temp_new();
1101     gen_load_spr(mas7, SPR_BOOKE_MAS7);
1102     tcg_gen_shli_tl(mas7, mas7, 32);
1103     gen_load_spr(mas3, SPR_BOOKE_MAS3);
1104     tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
1105     tcg_temp_free(mas3);
1106     tcg_temp_free(mas7);
1107 }
1108 
1109 #endif
1110 
1111 #ifdef TARGET_PPC64
1112 static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
1113                                     int bit, int sprn, int cause)
1114 {
1115     TCGv_i32 t1 = tcg_const_i32(bit);
1116     TCGv_i32 t2 = tcg_const_i32(sprn);
1117     TCGv_i32 t3 = tcg_const_i32(cause);
1118 
1119     gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
1120 
1121     tcg_temp_free_i32(t3);
1122     tcg_temp_free_i32(t2);
1123     tcg_temp_free_i32(t1);
1124 }
1125 
1126 static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
1127                                    int bit, int sprn, int cause)
1128 {
1129     TCGv_i32 t1 = tcg_const_i32(bit);
1130     TCGv_i32 t2 = tcg_const_i32(sprn);
1131     TCGv_i32 t3 = tcg_const_i32(cause);
1132 
1133     gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
1134 
1135     tcg_temp_free_i32(t3);
1136     tcg_temp_free_i32(t2);
1137     tcg_temp_free_i32(t1);
1138 }
1139 
1140 void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
1141 {
1142     TCGv spr_up = tcg_temp_new();
1143     TCGv spr = tcg_temp_new();
1144 
1145     gen_load_spr(spr, sprn - 1);
1146     tcg_gen_shri_tl(spr_up, spr, 32);
1147     tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
1148 
1149     tcg_temp_free(spr);
1150     tcg_temp_free(spr_up);
1151 }
1152 
1153 void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
1154 {
1155     TCGv spr = tcg_temp_new();
1156 
1157     gen_load_spr(spr, sprn - 1);
1158     tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
1159     gen_store_spr(sprn - 1, spr);
1160 
1161     tcg_temp_free(spr);
1162 }
1163 
1164 #if !defined(CONFIG_USER_ONLY)
1165 void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
1166 {
1167     TCGv hmer = tcg_temp_new();
1168 
1169     gen_load_spr(hmer, sprn);
1170     tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
1171     gen_store_spr(sprn, hmer);
1172     spr_store_dump_spr(sprn);
1173     tcg_temp_free(hmer);
1174 }
1175 
1176 void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
1177 {
1178     gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
1179 }
1180 #endif /* !defined(CONFIG_USER_ONLY) */
1181 
1182 void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
1183 {
1184     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1185     spr_read_generic(ctx, gprn, sprn);
1186 }
1187 
1188 void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
1189 {
1190     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
1191     spr_write_generic(ctx, sprn, gprn);
1192 }
1193 
1194 void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
1195 {
1196     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1197     spr_read_generic(ctx, gprn, sprn);
1198 }
1199 
1200 void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
1201 {
1202     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1203     spr_write_generic(ctx, sprn, gprn);
1204 }
1205 
1206 void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
1207 {
1208     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1209     spr_read_prev_upper32(ctx, gprn, sprn);
1210 }
1211 
1212 void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
1213 {
1214     gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
1215     spr_write_prev_upper32(ctx, sprn, gprn);
1216 }
1217 
1218 void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
1219 {
1220     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1221     spr_read_generic(ctx, gprn, sprn);
1222 }
1223 
1224 void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
1225 {
1226     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1227     spr_write_generic(ctx, sprn, gprn);
1228 }
1229 
1230 void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
1231 {
1232     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1233     spr_read_prev_upper32(ctx, gprn, sprn);
1234 }
1235 
1236 void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
1237 {
1238     gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
1239     spr_write_prev_upper32(ctx, sprn, gprn);
1240 }
1241 #endif
1242 
1243 #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type)                      \
1244 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
1245 
1246 #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2)             \
1247 GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
1248 
1249 #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type)               \
1250 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
1251 
1252 #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2)      \
1253 GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
1254 
1255 #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2)     \
1256 GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
1257 
1258 #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
1259 GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
1260 
1261 typedef struct opcode_t {
1262     unsigned char opc1, opc2, opc3, opc4;
1263 #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
1264     unsigned char pad[4];
1265 #endif
1266     opc_handler_t handler;
1267     const char *oname;
1268 } opcode_t;
1269 
1270 static void gen_priv_opc(DisasContext *ctx)
1271 {
1272     gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
1273 }
1274 
1275 /* Helpers for priv. check */
1276 #define GEN_PRIV(CTX)              \
1277     do {                           \
1278         gen_priv_opc(CTX); return; \
1279     } while (0)
1280 
1281 #if defined(CONFIG_USER_ONLY)
1282 #define CHK_HV(CTX) GEN_PRIV(CTX)
1283 #define CHK_SV(CTX) GEN_PRIV(CTX)
1284 #define CHK_HVRM(CTX) GEN_PRIV(CTX)
1285 #else
1286 #define CHK_HV(CTX)                         \
1287     do {                                    \
1288         if (unlikely(ctx->pr || !ctx->hv)) {\
1289             GEN_PRIV(CTX);                  \
1290         }                                   \
1291     } while (0)
1292 #define CHK_SV(CTX)              \
1293     do {                         \
1294         if (unlikely(ctx->pr)) { \
1295             GEN_PRIV(CTX);       \
1296         }                        \
1297     } while (0)
1298 #define CHK_HVRM(CTX)                                   \
1299     do {                                                \
1300         if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
1301             GEN_PRIV(CTX);                              \
1302         }                                               \
1303     } while (0)
1304 #endif
1305 
1306 #define CHK_NONE(CTX)
1307 
1308 /*****************************************************************************/
1309 /* PowerPC instructions table                                                */
1310 
1311 #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2)                    \
1312 {                                                                             \
1313     .opc1 = op1,                                                              \
1314     .opc2 = op2,                                                              \
1315     .opc3 = op3,                                                              \
1316     .opc4 = 0xff,                                                             \
1317     .handler = {                                                              \
1318         .inval1  = invl,                                                      \
1319         .type = _typ,                                                         \
1320         .type2 = _typ2,                                                       \
1321         .handler = &gen_##name,                                               \
1322     },                                                                        \
1323     .oname = stringify(name),                                                 \
1324 }
1325 #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2)       \
1326 {                                                                             \
1327     .opc1 = op1,                                                              \
1328     .opc2 = op2,                                                              \
1329     .opc3 = op3,                                                              \
1330     .opc4 = 0xff,                                                             \
1331     .handler = {                                                              \
1332         .inval1  = invl1,                                                     \
1333         .inval2  = invl2,                                                     \
1334         .type = _typ,                                                         \
1335         .type2 = _typ2,                                                       \
1336         .handler = &gen_##name,                                               \
1337     },                                                                        \
1338     .oname = stringify(name),                                                 \
1339 }
1340 #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2)             \
1341 {                                                                             \
1342     .opc1 = op1,                                                              \
1343     .opc2 = op2,                                                              \
1344     .opc3 = op3,                                                              \
1345     .opc4 = 0xff,                                                             \
1346     .handler = {                                                              \
1347         .inval1  = invl,                                                      \
1348         .type = _typ,                                                         \
1349         .type2 = _typ2,                                                       \
1350         .handler = &gen_##name,                                               \
1351     },                                                                        \
1352     .oname = onam,                                                            \
1353 }
1354 #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2)              \
1355 {                                                                             \
1356     .opc1 = op1,                                                              \
1357     .opc2 = op2,                                                              \
1358     .opc3 = op3,                                                              \
1359     .opc4 = op4,                                                              \
1360     .handler = {                                                              \
1361         .inval1  = invl,                                                      \
1362         .type = _typ,                                                         \
1363         .type2 = _typ2,                                                       \
1364         .handler = &gen_##name,                                               \
1365     },                                                                        \
1366     .oname = stringify(name),                                                 \
1367 }
1368 #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2)        \
1369 {                                                                             \
1370     .opc1 = op1,                                                              \
1371     .opc2 = op2,                                                              \
1372     .opc3 = op3,                                                              \
1373     .opc4 = op4,                                                              \
1374     .handler = {                                                              \
1375         .inval1  = invl,                                                      \
1376         .type = _typ,                                                         \
1377         .type2 = _typ2,                                                       \
1378         .handler = &gen_##name,                                               \
1379     },                                                                        \
1380     .oname = onam,                                                            \
1381 }
1382 
1383 /* Invalid instruction */
1384 static void gen_invalid(DisasContext *ctx)
1385 {
1386     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
1387 }
1388 
1389 static opc_handler_t invalid_handler = {
1390     .inval1  = 0xFFFFFFFF,
1391     .inval2  = 0xFFFFFFFF,
1392     .type    = PPC_NONE,
1393     .type2   = PPC_NONE,
1394     .handler = gen_invalid,
1395 };
1396 
1397 /***                           Integer comparison                          ***/
1398 
1399 static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
1400 {
1401     TCGv t0 = tcg_temp_new();
1402     TCGv t1 = tcg_temp_new();
1403     TCGv_i32 t = tcg_temp_new_i32();
1404 
1405     tcg_gen_movi_tl(t0, CRF_EQ);
1406     tcg_gen_movi_tl(t1, CRF_LT);
1407     tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU),
1408                        t0, arg0, arg1, t1, t0);
1409     tcg_gen_movi_tl(t1, CRF_GT);
1410     tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU),
1411                        t0, arg0, arg1, t1, t0);
1412 
1413     tcg_gen_trunc_tl_i32(t, t0);
1414     tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
1415     tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t);
1416 
1417     tcg_temp_free(t0);
1418     tcg_temp_free(t1);
1419     tcg_temp_free_i32(t);
1420 }
1421 
1422 static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
1423 {
1424     TCGv t0 = tcg_const_tl(arg1);
1425     gen_op_cmp(arg0, t0, s, crf);
1426     tcg_temp_free(t0);
1427 }
1428 
1429 static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
1430 {
1431     TCGv t0, t1;
1432     t0 = tcg_temp_new();
1433     t1 = tcg_temp_new();
1434     if (s) {
1435         tcg_gen_ext32s_tl(t0, arg0);
1436         tcg_gen_ext32s_tl(t1, arg1);
1437     } else {
1438         tcg_gen_ext32u_tl(t0, arg0);
1439         tcg_gen_ext32u_tl(t1, arg1);
1440     }
1441     gen_op_cmp(t0, t1, s, crf);
1442     tcg_temp_free(t1);
1443     tcg_temp_free(t0);
1444 }
1445 
1446 static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
1447 {
1448     TCGv t0 = tcg_const_tl(arg1);
1449     gen_op_cmp32(arg0, t0, s, crf);
1450     tcg_temp_free(t0);
1451 }
1452 
1453 static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
1454 {
1455     if (NARROW_MODE(ctx)) {
1456         gen_op_cmpi32(reg, 0, 1, 0);
1457     } else {
1458         gen_op_cmpi(reg, 0, 1, 0);
1459     }
1460 }
1461 
1462 /* cmprb - range comparison: isupper, isaplha, islower*/
1463 static void gen_cmprb(DisasContext *ctx)
1464 {
1465     TCGv_i32 src1 = tcg_temp_new_i32();
1466     TCGv_i32 src2 = tcg_temp_new_i32();
1467     TCGv_i32 src2lo = tcg_temp_new_i32();
1468     TCGv_i32 src2hi = tcg_temp_new_i32();
1469     TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
1470 
1471     tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
1472     tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
1473 
1474     tcg_gen_andi_i32(src1, src1, 0xFF);
1475     tcg_gen_ext8u_i32(src2lo, src2);
1476     tcg_gen_shri_i32(src2, src2, 8);
1477     tcg_gen_ext8u_i32(src2hi, src2);
1478 
1479     tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1480     tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1481     tcg_gen_and_i32(crf, src2lo, src2hi);
1482 
1483     if (ctx->opcode & 0x00200000) {
1484         tcg_gen_shri_i32(src2, src2, 8);
1485         tcg_gen_ext8u_i32(src2lo, src2);
1486         tcg_gen_shri_i32(src2, src2, 8);
1487         tcg_gen_ext8u_i32(src2hi, src2);
1488         tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
1489         tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
1490         tcg_gen_and_i32(src2lo, src2lo, src2hi);
1491         tcg_gen_or_i32(crf, crf, src2lo);
1492     }
1493     tcg_gen_shli_i32(crf, crf, CRF_GT_BIT);
1494     tcg_temp_free_i32(src1);
1495     tcg_temp_free_i32(src2);
1496     tcg_temp_free_i32(src2lo);
1497     tcg_temp_free_i32(src2hi);
1498 }
1499 
1500 #if defined(TARGET_PPC64)
1501 /* cmpeqb */
1502 static void gen_cmpeqb(DisasContext *ctx)
1503 {
1504     gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1505                       cpu_gpr[rB(ctx->opcode)]);
1506 }
1507 #endif
1508 
1509 /* isel (PowerPC 2.03 specification) */
1510 static void gen_isel(DisasContext *ctx)
1511 {
1512     uint32_t bi = rC(ctx->opcode);
1513     uint32_t mask = 0x08 >> (bi & 0x03);
1514     TCGv t0 = tcg_temp_new();
1515     TCGv zr;
1516 
1517     tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
1518     tcg_gen_andi_tl(t0, t0, mask);
1519 
1520     zr = tcg_const_tl(0);
1521     tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
1522                        rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
1523                        cpu_gpr[rB(ctx->opcode)]);
1524     tcg_temp_free(zr);
1525     tcg_temp_free(t0);
1526 }
1527 
1528 /* cmpb: PowerPC 2.05 specification */
1529 static void gen_cmpb(DisasContext *ctx)
1530 {
1531     gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
1532                     cpu_gpr[rB(ctx->opcode)]);
1533 }
1534 
1535 /***                           Integer arithmetic                          ***/
1536 
1537 static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
1538                                            TCGv arg1, TCGv arg2, int sub)
1539 {
1540     TCGv t0 = tcg_temp_new();
1541 
1542     tcg_gen_xor_tl(cpu_ov, arg0, arg2);
1543     tcg_gen_xor_tl(t0, arg1, arg2);
1544     if (sub) {
1545         tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
1546     } else {
1547         tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
1548     }
1549     tcg_temp_free(t0);
1550     if (NARROW_MODE(ctx)) {
1551         tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1);
1552         if (is_isa300(ctx)) {
1553             tcg_gen_mov_tl(cpu_ov32, cpu_ov);
1554         }
1555     } else {
1556         if (is_isa300(ctx)) {
1557             tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1);
1558         }
1559         tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1);
1560     }
1561     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1562 }
1563 
1564 static inline void gen_op_arith_compute_ca32(DisasContext *ctx,
1565                                              TCGv res, TCGv arg0, TCGv arg1,
1566                                              TCGv ca32, int sub)
1567 {
1568     TCGv t0;
1569 
1570     if (!is_isa300(ctx)) {
1571         return;
1572     }
1573 
1574     t0 = tcg_temp_new();
1575     if (sub) {
1576         tcg_gen_eqv_tl(t0, arg0, arg1);
1577     } else {
1578         tcg_gen_xor_tl(t0, arg0, arg1);
1579     }
1580     tcg_gen_xor_tl(t0, t0, res);
1581     tcg_gen_extract_tl(ca32, t0, 32, 1);
1582     tcg_temp_free(t0);
1583 }
1584 
1585 /* Common add function */
1586 static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
1587                                     TCGv arg2, TCGv ca, TCGv ca32,
1588                                     bool add_ca, bool compute_ca,
1589                                     bool compute_ov, bool compute_rc0)
1590 {
1591     TCGv t0 = ret;
1592 
1593     if (compute_ca || compute_ov) {
1594         t0 = tcg_temp_new();
1595     }
1596 
1597     if (compute_ca) {
1598         if (NARROW_MODE(ctx)) {
1599             /*
1600              * Caution: a non-obvious corner case of the spec is that
1601              * we must produce the *entire* 64-bit addition, but
1602              * produce the carry into bit 32.
1603              */
1604             TCGv t1 = tcg_temp_new();
1605             tcg_gen_xor_tl(t1, arg1, arg2);        /* add without carry */
1606             tcg_gen_add_tl(t0, arg1, arg2);
1607             if (add_ca) {
1608                 tcg_gen_add_tl(t0, t0, ca);
1609             }
1610             tcg_gen_xor_tl(ca, t0, t1);        /* bits changed w/ carry */
1611             tcg_temp_free(t1);
1612             tcg_gen_extract_tl(ca, ca, 32, 1);
1613             if (is_isa300(ctx)) {
1614                 tcg_gen_mov_tl(ca32, ca);
1615             }
1616         } else {
1617             TCGv zero = tcg_const_tl(0);
1618             if (add_ca) {
1619                 tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero);
1620                 tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero);
1621             } else {
1622                 tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero);
1623             }
1624             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0);
1625             tcg_temp_free(zero);
1626         }
1627     } else {
1628         tcg_gen_add_tl(t0, arg1, arg2);
1629         if (add_ca) {
1630             tcg_gen_add_tl(t0, t0, ca);
1631         }
1632     }
1633 
1634     if (compute_ov) {
1635         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
1636     }
1637     if (unlikely(compute_rc0)) {
1638         gen_set_Rc0(ctx, t0);
1639     }
1640 
1641     if (t0 != ret) {
1642         tcg_gen_mov_tl(ret, t0);
1643         tcg_temp_free(t0);
1644     }
1645 }
1646 /* Add functions with two operands */
1647 #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov)     \
1648 static void glue(gen_, name)(DisasContext *ctx)                               \
1649 {                                                                             \
1650     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1651                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1652                      ca, glue(ca, 32),                                        \
1653                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1654 }
1655 /* Add functions with one operand and one immediate */
1656 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca,                    \
1657                                 add_ca, compute_ca, compute_ov)               \
1658 static void glue(gen_, name)(DisasContext *ctx)                               \
1659 {                                                                             \
1660     TCGv t0 = tcg_const_tl(const_val);                                        \
1661     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)],                           \
1662                      cpu_gpr[rA(ctx->opcode)], t0,                            \
1663                      ca, glue(ca, 32),                                        \
1664                      add_ca, compute_ca, compute_ov, Rc(ctx->opcode));        \
1665     tcg_temp_free(t0);                                                        \
1666 }
1667 
1668 /* add  add.  addo  addo. */
1669 GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0)
1670 GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1)
1671 /* addc  addc.  addco  addco. */
1672 GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0)
1673 GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1)
1674 /* adde  adde.  addeo  addeo. */
1675 GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0)
1676 GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1)
1677 /* addme  addme.  addmeo  addmeo.  */
1678 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0)
1679 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1)
1680 /* addex */
1681 GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0);
1682 /* addze  addze.  addzeo  addzeo.*/
1683 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0)
1684 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1)
1685 /* addic  addic.*/
1686 static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
1687 {
1688     TCGv c = tcg_const_tl(SIMM(ctx->opcode));
1689     gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1690                      c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0);
1691     tcg_temp_free(c);
1692 }
1693 
1694 static void gen_addic(DisasContext *ctx)
1695 {
1696     gen_op_addic(ctx, 0);
1697 }
1698 
1699 static void gen_addic_(DisasContext *ctx)
1700 {
1701     gen_op_addic(ctx, 1);
1702 }
1703 
1704 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
1705                                      TCGv arg2, int sign, int compute_ov)
1706 {
1707     TCGv_i32 t0 = tcg_temp_new_i32();
1708     TCGv_i32 t1 = tcg_temp_new_i32();
1709     TCGv_i32 t2 = tcg_temp_new_i32();
1710     TCGv_i32 t3 = tcg_temp_new_i32();
1711 
1712     tcg_gen_trunc_tl_i32(t0, arg1);
1713     tcg_gen_trunc_tl_i32(t1, arg2);
1714     if (sign) {
1715         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1716         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1717         tcg_gen_and_i32(t2, t2, t3);
1718         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1719         tcg_gen_or_i32(t2, t2, t3);
1720         tcg_gen_movi_i32(t3, 0);
1721         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1722         tcg_gen_div_i32(t3, t0, t1);
1723         tcg_gen_extu_i32_tl(ret, t3);
1724     } else {
1725         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
1726         tcg_gen_movi_i32(t3, 0);
1727         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1728         tcg_gen_divu_i32(t3, t0, t1);
1729         tcg_gen_extu_i32_tl(ret, t3);
1730     }
1731     if (compute_ov) {
1732         tcg_gen_extu_i32_tl(cpu_ov, t2);
1733         if (is_isa300(ctx)) {
1734             tcg_gen_extu_i32_tl(cpu_ov32, t2);
1735         }
1736         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1737     }
1738     tcg_temp_free_i32(t0);
1739     tcg_temp_free_i32(t1);
1740     tcg_temp_free_i32(t2);
1741     tcg_temp_free_i32(t3);
1742 
1743     if (unlikely(Rc(ctx->opcode) != 0)) {
1744         gen_set_Rc0(ctx, ret);
1745     }
1746 }
1747 /* Div functions */
1748 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
1749 static void glue(gen_, name)(DisasContext *ctx)                               \
1750 {                                                                             \
1751     gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1752                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],      \
1753                      sign, compute_ov);                                       \
1754 }
1755 /* divwu  divwu.  divwuo  divwuo.   */
1756 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
1757 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
1758 /* divw  divw.  divwo  divwo.   */
1759 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
1760 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
1761 
1762 /* div[wd]eu[o][.] */
1763 #define GEN_DIVE(name, hlpr, compute_ov)                                      \
1764 static void gen_##name(DisasContext *ctx)                                     \
1765 {                                                                             \
1766     TCGv_i32 t0 = tcg_const_i32(compute_ov);                                  \
1767     gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env,                      \
1768                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
1769     tcg_temp_free_i32(t0);                                                    \
1770     if (unlikely(Rc(ctx->opcode) != 0)) {                                     \
1771         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);                           \
1772     }                                                                         \
1773 }
1774 
1775 GEN_DIVE(divweu, divweu, 0);
1776 GEN_DIVE(divweuo, divweu, 1);
1777 GEN_DIVE(divwe, divwe, 0);
1778 GEN_DIVE(divweo, divwe, 1);
1779 
1780 #if defined(TARGET_PPC64)
1781 static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
1782                                      TCGv arg2, int sign, int compute_ov)
1783 {
1784     TCGv_i64 t0 = tcg_temp_new_i64();
1785     TCGv_i64 t1 = tcg_temp_new_i64();
1786     TCGv_i64 t2 = tcg_temp_new_i64();
1787     TCGv_i64 t3 = tcg_temp_new_i64();
1788 
1789     tcg_gen_mov_i64(t0, arg1);
1790     tcg_gen_mov_i64(t1, arg2);
1791     if (sign) {
1792         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1793         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1794         tcg_gen_and_i64(t2, t2, t3);
1795         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1796         tcg_gen_or_i64(t2, t2, t3);
1797         tcg_gen_movi_i64(t3, 0);
1798         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1799         tcg_gen_div_i64(ret, t0, t1);
1800     } else {
1801         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
1802         tcg_gen_movi_i64(t3, 0);
1803         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1804         tcg_gen_divu_i64(ret, t0, t1);
1805     }
1806     if (compute_ov) {
1807         tcg_gen_mov_tl(cpu_ov, t2);
1808         if (is_isa300(ctx)) {
1809             tcg_gen_mov_tl(cpu_ov32, t2);
1810         }
1811         tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
1812     }
1813     tcg_temp_free_i64(t0);
1814     tcg_temp_free_i64(t1);
1815     tcg_temp_free_i64(t2);
1816     tcg_temp_free_i64(t3);
1817 
1818     if (unlikely(Rc(ctx->opcode) != 0)) {
1819         gen_set_Rc0(ctx, ret);
1820     }
1821 }
1822 
1823 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
1824 static void glue(gen_, name)(DisasContext *ctx)                               \
1825 {                                                                             \
1826     gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)],                          \
1827                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
1828                       sign, compute_ov);                                      \
1829 }
1830 /* divdu  divdu.  divduo  divduo.   */
1831 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
1832 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
1833 /* divd  divd.  divdo  divdo.   */
1834 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
1835 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
1836 
1837 GEN_DIVE(divdeu, divdeu, 0);
1838 GEN_DIVE(divdeuo, divdeu, 1);
1839 GEN_DIVE(divde, divde, 0);
1840 GEN_DIVE(divdeo, divde, 1);
1841 #endif
1842 
1843 static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
1844                                      TCGv arg2, int sign)
1845 {
1846     TCGv_i32 t0 = tcg_temp_new_i32();
1847     TCGv_i32 t1 = tcg_temp_new_i32();
1848 
1849     tcg_gen_trunc_tl_i32(t0, arg1);
1850     tcg_gen_trunc_tl_i32(t1, arg2);
1851     if (sign) {
1852         TCGv_i32 t2 = tcg_temp_new_i32();
1853         TCGv_i32 t3 = tcg_temp_new_i32();
1854         tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
1855         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
1856         tcg_gen_and_i32(t2, t2, t3);
1857         tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
1858         tcg_gen_or_i32(t2, t2, t3);
1859         tcg_gen_movi_i32(t3, 0);
1860         tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
1861         tcg_gen_rem_i32(t3, t0, t1);
1862         tcg_gen_ext_i32_tl(ret, t3);
1863         tcg_temp_free_i32(t2);
1864         tcg_temp_free_i32(t3);
1865     } else {
1866         TCGv_i32 t2 = tcg_const_i32(1);
1867         TCGv_i32 t3 = tcg_const_i32(0);
1868         tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
1869         tcg_gen_remu_i32(t3, t0, t1);
1870         tcg_gen_extu_i32_tl(ret, t3);
1871         tcg_temp_free_i32(t2);
1872         tcg_temp_free_i32(t3);
1873     }
1874     tcg_temp_free_i32(t0);
1875     tcg_temp_free_i32(t1);
1876 }
1877 
1878 #define GEN_INT_ARITH_MODW(name, opc3, sign)                                \
1879 static void glue(gen_, name)(DisasContext *ctx)                             \
1880 {                                                                           \
1881     gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1882                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1883                       sign);                                                \
1884 }
1885 
1886 GEN_INT_ARITH_MODW(moduw, 0x08, 0);
1887 GEN_INT_ARITH_MODW(modsw, 0x18, 1);
1888 
1889 #if defined(TARGET_PPC64)
1890 static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
1891                                      TCGv arg2, int sign)
1892 {
1893     TCGv_i64 t0 = tcg_temp_new_i64();
1894     TCGv_i64 t1 = tcg_temp_new_i64();
1895 
1896     tcg_gen_mov_i64(t0, arg1);
1897     tcg_gen_mov_i64(t1, arg2);
1898     if (sign) {
1899         TCGv_i64 t2 = tcg_temp_new_i64();
1900         TCGv_i64 t3 = tcg_temp_new_i64();
1901         tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
1902         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
1903         tcg_gen_and_i64(t2, t2, t3);
1904         tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
1905         tcg_gen_or_i64(t2, t2, t3);
1906         tcg_gen_movi_i64(t3, 0);
1907         tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
1908         tcg_gen_rem_i64(ret, t0, t1);
1909         tcg_temp_free_i64(t2);
1910         tcg_temp_free_i64(t3);
1911     } else {
1912         TCGv_i64 t2 = tcg_const_i64(1);
1913         TCGv_i64 t3 = tcg_const_i64(0);
1914         tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
1915         tcg_gen_remu_i64(ret, t0, t1);
1916         tcg_temp_free_i64(t2);
1917         tcg_temp_free_i64(t3);
1918     }
1919     tcg_temp_free_i64(t0);
1920     tcg_temp_free_i64(t1);
1921 }
1922 
1923 #define GEN_INT_ARITH_MODD(name, opc3, sign)                            \
1924 static void glue(gen_, name)(DisasContext *ctx)                           \
1925 {                                                                         \
1926   gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)],                        \
1927                     cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],   \
1928                     sign);                                                \
1929 }
1930 
1931 GEN_INT_ARITH_MODD(modud, 0x08, 0);
1932 GEN_INT_ARITH_MODD(modsd, 0x18, 1);
1933 #endif
1934 
1935 /* mulhw  mulhw. */
1936 static void gen_mulhw(DisasContext *ctx)
1937 {
1938     TCGv_i32 t0 = tcg_temp_new_i32();
1939     TCGv_i32 t1 = tcg_temp_new_i32();
1940 
1941     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1942     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1943     tcg_gen_muls2_i32(t0, t1, t0, t1);
1944     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1945     tcg_temp_free_i32(t0);
1946     tcg_temp_free_i32(t1);
1947     if (unlikely(Rc(ctx->opcode) != 0)) {
1948         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1949     }
1950 }
1951 
1952 /* mulhwu  mulhwu.  */
1953 static void gen_mulhwu(DisasContext *ctx)
1954 {
1955     TCGv_i32 t0 = tcg_temp_new_i32();
1956     TCGv_i32 t1 = tcg_temp_new_i32();
1957 
1958     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1959     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1960     tcg_gen_mulu2_i32(t0, t1, t0, t1);
1961     tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
1962     tcg_temp_free_i32(t0);
1963     tcg_temp_free_i32(t1);
1964     if (unlikely(Rc(ctx->opcode) != 0)) {
1965         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1966     }
1967 }
1968 
1969 /* mullw  mullw. */
1970 static void gen_mullw(DisasContext *ctx)
1971 {
1972 #if defined(TARGET_PPC64)
1973     TCGv_i64 t0, t1;
1974     t0 = tcg_temp_new_i64();
1975     t1 = tcg_temp_new_i64();
1976     tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
1977     tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
1978     tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
1979     tcg_temp_free(t0);
1980     tcg_temp_free(t1);
1981 #else
1982     tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
1983                     cpu_gpr[rB(ctx->opcode)]);
1984 #endif
1985     if (unlikely(Rc(ctx->opcode) != 0)) {
1986         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
1987     }
1988 }
1989 
1990 /* mullwo  mullwo. */
1991 static void gen_mullwo(DisasContext *ctx)
1992 {
1993     TCGv_i32 t0 = tcg_temp_new_i32();
1994     TCGv_i32 t1 = tcg_temp_new_i32();
1995 
1996     tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
1997     tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
1998     tcg_gen_muls2_i32(t0, t1, t0, t1);
1999 #if defined(TARGET_PPC64)
2000     tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
2001 #else
2002     tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
2003 #endif
2004 
2005     tcg_gen_sari_i32(t0, t0, 31);
2006     tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
2007     tcg_gen_extu_i32_tl(cpu_ov, t0);
2008     if (is_isa300(ctx)) {
2009         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2010     }
2011     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2012 
2013     tcg_temp_free_i32(t0);
2014     tcg_temp_free_i32(t1);
2015     if (unlikely(Rc(ctx->opcode) != 0)) {
2016         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2017     }
2018 }
2019 
2020 /* mulli */
2021 static void gen_mulli(DisasContext *ctx)
2022 {
2023     tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2024                     SIMM(ctx->opcode));
2025 }
2026 
2027 #if defined(TARGET_PPC64)
2028 /* mulhd  mulhd. */
2029 static void gen_mulhd(DisasContext *ctx)
2030 {
2031     TCGv lo = tcg_temp_new();
2032     tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2033                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2034     tcg_temp_free(lo);
2035     if (unlikely(Rc(ctx->opcode) != 0)) {
2036         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2037     }
2038 }
2039 
2040 /* mulhdu  mulhdu. */
2041 static void gen_mulhdu(DisasContext *ctx)
2042 {
2043     TCGv lo = tcg_temp_new();
2044     tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
2045                      cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2046     tcg_temp_free(lo);
2047     if (unlikely(Rc(ctx->opcode) != 0)) {
2048         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2049     }
2050 }
2051 
2052 /* mulld  mulld. */
2053 static void gen_mulld(DisasContext *ctx)
2054 {
2055     tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2056                    cpu_gpr[rB(ctx->opcode)]);
2057     if (unlikely(Rc(ctx->opcode) != 0)) {
2058         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2059     }
2060 }
2061 
2062 /* mulldo  mulldo. */
2063 static void gen_mulldo(DisasContext *ctx)
2064 {
2065     TCGv_i64 t0 = tcg_temp_new_i64();
2066     TCGv_i64 t1 = tcg_temp_new_i64();
2067 
2068     tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
2069                       cpu_gpr[rB(ctx->opcode)]);
2070     tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
2071 
2072     tcg_gen_sari_i64(t0, t0, 63);
2073     tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
2074     if (is_isa300(ctx)) {
2075         tcg_gen_mov_tl(cpu_ov32, cpu_ov);
2076     }
2077     tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
2078 
2079     tcg_temp_free_i64(t0);
2080     tcg_temp_free_i64(t1);
2081 
2082     if (unlikely(Rc(ctx->opcode) != 0)) {
2083         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2084     }
2085 }
2086 #endif
2087 
2088 /* Common subf function */
2089 static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
2090                                      TCGv arg2, bool add_ca, bool compute_ca,
2091                                      bool compute_ov, bool compute_rc0)
2092 {
2093     TCGv t0 = ret;
2094 
2095     if (compute_ca || compute_ov) {
2096         t0 = tcg_temp_new();
2097     }
2098 
2099     if (compute_ca) {
2100         /* dest = ~arg1 + arg2 [+ ca].  */
2101         if (NARROW_MODE(ctx)) {
2102             /*
2103              * Caution: a non-obvious corner case of the spec is that
2104              * we must produce the *entire* 64-bit addition, but
2105              * produce the carry into bit 32.
2106              */
2107             TCGv inv1 = tcg_temp_new();
2108             TCGv t1 = tcg_temp_new();
2109             tcg_gen_not_tl(inv1, arg1);
2110             if (add_ca) {
2111                 tcg_gen_add_tl(t0, arg2, cpu_ca);
2112             } else {
2113                 tcg_gen_addi_tl(t0, arg2, 1);
2114             }
2115             tcg_gen_xor_tl(t1, arg2, inv1);         /* add without carry */
2116             tcg_gen_add_tl(t0, t0, inv1);
2117             tcg_temp_free(inv1);
2118             tcg_gen_xor_tl(cpu_ca, t0, t1);         /* bits changes w/ carry */
2119             tcg_temp_free(t1);
2120             tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1);
2121             if (is_isa300(ctx)) {
2122                 tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2123             }
2124         } else if (add_ca) {
2125             TCGv zero, inv1 = tcg_temp_new();
2126             tcg_gen_not_tl(inv1, arg1);
2127             zero = tcg_const_tl(0);
2128             tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
2129             tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
2130             gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0);
2131             tcg_temp_free(zero);
2132             tcg_temp_free(inv1);
2133         } else {
2134             tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
2135             tcg_gen_sub_tl(t0, arg2, arg1);
2136             gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1);
2137         }
2138     } else if (add_ca) {
2139         /*
2140          * Since we're ignoring carry-out, we can simplify the
2141          * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1.
2142          */
2143         tcg_gen_sub_tl(t0, arg2, arg1);
2144         tcg_gen_add_tl(t0, t0, cpu_ca);
2145         tcg_gen_subi_tl(t0, t0, 1);
2146     } else {
2147         tcg_gen_sub_tl(t0, arg2, arg1);
2148     }
2149 
2150     if (compute_ov) {
2151         gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
2152     }
2153     if (unlikely(compute_rc0)) {
2154         gen_set_Rc0(ctx, t0);
2155     }
2156 
2157     if (t0 != ret) {
2158         tcg_gen_mov_tl(ret, t0);
2159         tcg_temp_free(t0);
2160     }
2161 }
2162 /* Sub functions with Two operands functions */
2163 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
2164 static void glue(gen_, name)(DisasContext *ctx)                               \
2165 {                                                                             \
2166     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2167                       cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],     \
2168                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2169 }
2170 /* Sub functions with one operand and one immediate */
2171 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
2172                                 add_ca, compute_ca, compute_ov)               \
2173 static void glue(gen_, name)(DisasContext *ctx)                               \
2174 {                                                                             \
2175     TCGv t0 = tcg_const_tl(const_val);                                        \
2176     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)],                          \
2177                       cpu_gpr[rA(ctx->opcode)], t0,                           \
2178                       add_ca, compute_ca, compute_ov, Rc(ctx->opcode));       \
2179     tcg_temp_free(t0);                                                        \
2180 }
2181 /* subf  subf.  subfo  subfo. */
2182 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
2183 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
2184 /* subfc  subfc.  subfco  subfco. */
2185 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
2186 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
2187 /* subfe  subfe.  subfeo  subfo. */
2188 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
2189 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
2190 /* subfme  subfme.  subfmeo  subfmeo.  */
2191 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
2192 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
2193 /* subfze  subfze.  subfzeo  subfzeo.*/
2194 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
2195 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
2196 
2197 /* subfic */
2198 static void gen_subfic(DisasContext *ctx)
2199 {
2200     TCGv c = tcg_const_tl(SIMM(ctx->opcode));
2201     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2202                       c, 0, 1, 0, 0);
2203     tcg_temp_free(c);
2204 }
2205 
2206 /* neg neg. nego nego. */
2207 static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
2208 {
2209     TCGv zero = tcg_const_tl(0);
2210     gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
2211                       zero, 0, 0, compute_ov, Rc(ctx->opcode));
2212     tcg_temp_free(zero);
2213 }
2214 
2215 static void gen_neg(DisasContext *ctx)
2216 {
2217     tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2218     if (unlikely(Rc(ctx->opcode))) {
2219         gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
2220     }
2221 }
2222 
2223 static void gen_nego(DisasContext *ctx)
2224 {
2225     gen_op_arith_neg(ctx, 1);
2226 }
2227 
2228 /***                            Integer logical                            ***/
2229 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
2230 static void glue(gen_, name)(DisasContext *ctx)                               \
2231 {                                                                             \
2232     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],                \
2233        cpu_gpr[rB(ctx->opcode)]);                                             \
2234     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2235         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2236 }
2237 
2238 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
2239 static void glue(gen_, name)(DisasContext *ctx)                               \
2240 {                                                                             \
2241     tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);               \
2242     if (unlikely(Rc(ctx->opcode) != 0))                                       \
2243         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);                           \
2244 }
2245 
2246 /* and & and. */
2247 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
2248 /* andc & andc. */
2249 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
2250 
2251 /* andi. */
2252 static void gen_andi_(DisasContext *ctx)
2253 {
2254     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2255                     UIMM(ctx->opcode));
2256     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2257 }
2258 
2259 /* andis. */
2260 static void gen_andis_(DisasContext *ctx)
2261 {
2262     tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2263                     UIMM(ctx->opcode) << 16);
2264     gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2265 }
2266 
2267 /* cntlzw */
2268 static void gen_cntlzw(DisasContext *ctx)
2269 {
2270     TCGv_i32 t = tcg_temp_new_i32();
2271 
2272     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2273     tcg_gen_clzi_i32(t, t, 32);
2274     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2275     tcg_temp_free_i32(t);
2276 
2277     if (unlikely(Rc(ctx->opcode) != 0)) {
2278         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2279     }
2280 }
2281 
2282 /* cnttzw */
2283 static void gen_cnttzw(DisasContext *ctx)
2284 {
2285     TCGv_i32 t = tcg_temp_new_i32();
2286 
2287     tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]);
2288     tcg_gen_ctzi_i32(t, t, 32);
2289     tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t);
2290     tcg_temp_free_i32(t);
2291 
2292     if (unlikely(Rc(ctx->opcode) != 0)) {
2293         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2294     }
2295 }
2296 
2297 /* eqv & eqv. */
2298 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
2299 /* extsb & extsb. */
2300 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
2301 /* extsh & extsh. */
2302 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
2303 /* nand & nand. */
2304 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
2305 /* nor & nor. */
2306 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
2307 
2308 #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
2309 static void gen_pause(DisasContext *ctx)
2310 {
2311     TCGv_i32 t0 = tcg_const_i32(0);
2312     tcg_gen_st_i32(t0, cpu_env,
2313                    -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
2314     tcg_temp_free_i32(t0);
2315 
2316     /* Stop translation, this gives other CPUs a chance to run */
2317     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
2318 }
2319 #endif /* defined(TARGET_PPC64) */
2320 
2321 /* or & or. */
2322 static void gen_or(DisasContext *ctx)
2323 {
2324     int rs, ra, rb;
2325 
2326     rs = rS(ctx->opcode);
2327     ra = rA(ctx->opcode);
2328     rb = rB(ctx->opcode);
2329     /* Optimisation for mr. ri case */
2330     if (rs != ra || rs != rb) {
2331         if (rs != rb) {
2332             tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
2333         } else {
2334             tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
2335         }
2336         if (unlikely(Rc(ctx->opcode) != 0)) {
2337             gen_set_Rc0(ctx, cpu_gpr[ra]);
2338         }
2339     } else if (unlikely(Rc(ctx->opcode) != 0)) {
2340         gen_set_Rc0(ctx, cpu_gpr[rs]);
2341 #if defined(TARGET_PPC64)
2342     } else if (rs != 0) { /* 0 is nop */
2343         int prio = 0;
2344 
2345         switch (rs) {
2346         case 1:
2347             /* Set process priority to low */
2348             prio = 2;
2349             break;
2350         case 6:
2351             /* Set process priority to medium-low */
2352             prio = 3;
2353             break;
2354         case 2:
2355             /* Set process priority to normal */
2356             prio = 4;
2357             break;
2358 #if !defined(CONFIG_USER_ONLY)
2359         case 31:
2360             if (!ctx->pr) {
2361                 /* Set process priority to very low */
2362                 prio = 1;
2363             }
2364             break;
2365         case 5:
2366             if (!ctx->pr) {
2367                 /* Set process priority to medium-hight */
2368                 prio = 5;
2369             }
2370             break;
2371         case 3:
2372             if (!ctx->pr) {
2373                 /* Set process priority to high */
2374                 prio = 6;
2375             }
2376             break;
2377         case 7:
2378             if (ctx->hv && !ctx->pr) {
2379                 /* Set process priority to very high */
2380                 prio = 7;
2381             }
2382             break;
2383 #endif
2384         default:
2385             break;
2386         }
2387         if (prio) {
2388             TCGv t0 = tcg_temp_new();
2389             gen_load_spr(t0, SPR_PPR);
2390             tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
2391             tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
2392             gen_store_spr(SPR_PPR, t0);
2393             tcg_temp_free(t0);
2394         }
2395 #if !defined(CONFIG_USER_ONLY)
2396         /*
2397          * Pause out of TCG otherwise spin loops with smt_low eat too
2398          * much CPU and the kernel hangs.  This applies to all
2399          * encodings other than no-op, e.g., miso(rs=26), yield(27),
2400          * mdoio(29), mdoom(30), and all currently undefined.
2401          */
2402         gen_pause(ctx);
2403 #endif
2404 #endif
2405     }
2406 }
2407 /* orc & orc. */
2408 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
2409 
2410 /* xor & xor. */
2411 static void gen_xor(DisasContext *ctx)
2412 {
2413     /* Optimisation for "set to zero" case */
2414     if (rS(ctx->opcode) != rB(ctx->opcode)) {
2415         tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2416                        cpu_gpr[rB(ctx->opcode)]);
2417     } else {
2418         tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
2419     }
2420     if (unlikely(Rc(ctx->opcode) != 0)) {
2421         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2422     }
2423 }
2424 
2425 /* ori */
2426 static void gen_ori(DisasContext *ctx)
2427 {
2428     target_ulong uimm = UIMM(ctx->opcode);
2429 
2430     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2431         return;
2432     }
2433     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2434 }
2435 
2436 /* oris */
2437 static void gen_oris(DisasContext *ctx)
2438 {
2439     target_ulong uimm = UIMM(ctx->opcode);
2440 
2441     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2442         /* NOP */
2443         return;
2444     }
2445     tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2446                    uimm << 16);
2447 }
2448 
2449 /* xori */
2450 static void gen_xori(DisasContext *ctx)
2451 {
2452     target_ulong uimm = UIMM(ctx->opcode);
2453 
2454     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2455         /* NOP */
2456         return;
2457     }
2458     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
2459 }
2460 
2461 /* xoris */
2462 static void gen_xoris(DisasContext *ctx)
2463 {
2464     target_ulong uimm = UIMM(ctx->opcode);
2465 
2466     if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
2467         /* NOP */
2468         return;
2469     }
2470     tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
2471                     uimm << 16);
2472 }
2473 
2474 /* popcntb : PowerPC 2.03 specification */
2475 static void gen_popcntb(DisasContext *ctx)
2476 {
2477     gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2478 }
2479 
2480 static void gen_popcntw(DisasContext *ctx)
2481 {
2482 #if defined(TARGET_PPC64)
2483     gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2484 #else
2485     tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2486 #endif
2487 }
2488 
2489 #if defined(TARGET_PPC64)
2490 /* popcntd: PowerPC 2.06 specification */
2491 static void gen_popcntd(DisasContext *ctx)
2492 {
2493     tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
2494 }
2495 #endif
2496 
2497 /* prtyw: PowerPC 2.05 specification */
2498 static void gen_prtyw(DisasContext *ctx)
2499 {
2500     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2501     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2502     TCGv t0 = tcg_temp_new();
2503     tcg_gen_shri_tl(t0, rs, 16);
2504     tcg_gen_xor_tl(ra, rs, t0);
2505     tcg_gen_shri_tl(t0, ra, 8);
2506     tcg_gen_xor_tl(ra, ra, t0);
2507     tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
2508     tcg_temp_free(t0);
2509 }
2510 
2511 #if defined(TARGET_PPC64)
2512 /* prtyd: PowerPC 2.05 specification */
2513 static void gen_prtyd(DisasContext *ctx)
2514 {
2515     TCGv ra = cpu_gpr[rA(ctx->opcode)];
2516     TCGv rs = cpu_gpr[rS(ctx->opcode)];
2517     TCGv t0 = tcg_temp_new();
2518     tcg_gen_shri_tl(t0, rs, 32);
2519     tcg_gen_xor_tl(ra, rs, t0);
2520     tcg_gen_shri_tl(t0, ra, 16);
2521     tcg_gen_xor_tl(ra, ra, t0);
2522     tcg_gen_shri_tl(t0, ra, 8);
2523     tcg_gen_xor_tl(ra, ra, t0);
2524     tcg_gen_andi_tl(ra, ra, 1);
2525     tcg_temp_free(t0);
2526 }
2527 #endif
2528 
2529 #if defined(TARGET_PPC64)
2530 /* bpermd */
2531 static void gen_bpermd(DisasContext *ctx)
2532 {
2533     gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
2534                       cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2535 }
2536 #endif
2537 
2538 #if defined(TARGET_PPC64)
2539 /* extsw & extsw. */
2540 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
2541 
2542 /* cntlzd */
2543 static void gen_cntlzd(DisasContext *ctx)
2544 {
2545     tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2546     if (unlikely(Rc(ctx->opcode) != 0)) {
2547         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2548     }
2549 }
2550 
2551 /* cnttzd */
2552 static void gen_cnttzd(DisasContext *ctx)
2553 {
2554     tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64);
2555     if (unlikely(Rc(ctx->opcode) != 0)) {
2556         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2557     }
2558 }
2559 
2560 /* darn */
2561 static void gen_darn(DisasContext *ctx)
2562 {
2563     int l = L(ctx->opcode);
2564 
2565     if (l > 2) {
2566         tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
2567     } else {
2568         gen_icount_io_start(ctx);
2569         if (l == 0) {
2570             gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
2571         } else {
2572             /* Return 64-bit random for both CRN and RRN */
2573             gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
2574         }
2575     }
2576 }
2577 #endif
2578 
2579 /***                             Integer rotate                            ***/
2580 
2581 /* rlwimi & rlwimi. */
2582 static void gen_rlwimi(DisasContext *ctx)
2583 {
2584     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2585     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2586     uint32_t sh = SH(ctx->opcode);
2587     uint32_t mb = MB(ctx->opcode);
2588     uint32_t me = ME(ctx->opcode);
2589 
2590     if (sh == (31 - me) && mb <= me) {
2591         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2592     } else {
2593         target_ulong mask;
2594         bool mask_in_32b = true;
2595         TCGv t1;
2596 
2597 #if defined(TARGET_PPC64)
2598         mb += 32;
2599         me += 32;
2600 #endif
2601         mask = MASK(mb, me);
2602 
2603 #if defined(TARGET_PPC64)
2604         if (mask > 0xffffffffu) {
2605             mask_in_32b = false;
2606         }
2607 #endif
2608         t1 = tcg_temp_new();
2609         if (mask_in_32b) {
2610             TCGv_i32 t0 = tcg_temp_new_i32();
2611             tcg_gen_trunc_tl_i32(t0, t_rs);
2612             tcg_gen_rotli_i32(t0, t0, sh);
2613             tcg_gen_extu_i32_tl(t1, t0);
2614             tcg_temp_free_i32(t0);
2615         } else {
2616 #if defined(TARGET_PPC64)
2617             tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
2618             tcg_gen_rotli_i64(t1, t1, sh);
2619 #else
2620             g_assert_not_reached();
2621 #endif
2622         }
2623 
2624         tcg_gen_andi_tl(t1, t1, mask);
2625         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2626         tcg_gen_or_tl(t_ra, t_ra, t1);
2627         tcg_temp_free(t1);
2628     }
2629     if (unlikely(Rc(ctx->opcode) != 0)) {
2630         gen_set_Rc0(ctx, t_ra);
2631     }
2632 }
2633 
2634 /* rlwinm & rlwinm. */
2635 static void gen_rlwinm(DisasContext *ctx)
2636 {
2637     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2638     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2639     int sh = SH(ctx->opcode);
2640     int mb = MB(ctx->opcode);
2641     int me = ME(ctx->opcode);
2642     int len = me - mb + 1;
2643     int rsh = (32 - sh) & 31;
2644 
2645     if (sh != 0 && len > 0 && me == (31 - sh)) {
2646         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2647     } else if (me == 31 && rsh + len <= 32) {
2648         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2649     } else {
2650         target_ulong mask;
2651         bool mask_in_32b = true;
2652 #if defined(TARGET_PPC64)
2653         mb += 32;
2654         me += 32;
2655 #endif
2656         mask = MASK(mb, me);
2657 #if defined(TARGET_PPC64)
2658         if (mask > 0xffffffffu) {
2659             mask_in_32b = false;
2660         }
2661 #endif
2662         if (mask_in_32b) {
2663             if (sh == 0) {
2664                 tcg_gen_andi_tl(t_ra, t_rs, mask);
2665             } else {
2666                 TCGv_i32 t0 = tcg_temp_new_i32();
2667                 tcg_gen_trunc_tl_i32(t0, t_rs);
2668                 tcg_gen_rotli_i32(t0, t0, sh);
2669                 tcg_gen_andi_i32(t0, t0, mask);
2670                 tcg_gen_extu_i32_tl(t_ra, t0);
2671                 tcg_temp_free_i32(t0);
2672             }
2673         } else {
2674 #if defined(TARGET_PPC64)
2675             tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2676             tcg_gen_rotli_i64(t_ra, t_ra, sh);
2677             tcg_gen_andi_i64(t_ra, t_ra, mask);
2678 #else
2679             g_assert_not_reached();
2680 #endif
2681         }
2682     }
2683     if (unlikely(Rc(ctx->opcode) != 0)) {
2684         gen_set_Rc0(ctx, t_ra);
2685     }
2686 }
2687 
2688 /* rlwnm & rlwnm. */
2689 static void gen_rlwnm(DisasContext *ctx)
2690 {
2691     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2692     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2693     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2694     uint32_t mb = MB(ctx->opcode);
2695     uint32_t me = ME(ctx->opcode);
2696     target_ulong mask;
2697     bool mask_in_32b = true;
2698 
2699 #if defined(TARGET_PPC64)
2700     mb += 32;
2701     me += 32;
2702 #endif
2703     mask = MASK(mb, me);
2704 
2705 #if defined(TARGET_PPC64)
2706     if (mask > 0xffffffffu) {
2707         mask_in_32b = false;
2708     }
2709 #endif
2710     if (mask_in_32b) {
2711         TCGv_i32 t0 = tcg_temp_new_i32();
2712         TCGv_i32 t1 = tcg_temp_new_i32();
2713         tcg_gen_trunc_tl_i32(t0, t_rb);
2714         tcg_gen_trunc_tl_i32(t1, t_rs);
2715         tcg_gen_andi_i32(t0, t0, 0x1f);
2716         tcg_gen_rotl_i32(t1, t1, t0);
2717         tcg_gen_extu_i32_tl(t_ra, t1);
2718         tcg_temp_free_i32(t0);
2719         tcg_temp_free_i32(t1);
2720     } else {
2721 #if defined(TARGET_PPC64)
2722         TCGv_i64 t0 = tcg_temp_new_i64();
2723         tcg_gen_andi_i64(t0, t_rb, 0x1f);
2724         tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
2725         tcg_gen_rotl_i64(t_ra, t_ra, t0);
2726         tcg_temp_free_i64(t0);
2727 #else
2728         g_assert_not_reached();
2729 #endif
2730     }
2731 
2732     tcg_gen_andi_tl(t_ra, t_ra, mask);
2733 
2734     if (unlikely(Rc(ctx->opcode) != 0)) {
2735         gen_set_Rc0(ctx, t_ra);
2736     }
2737 }
2738 
2739 #if defined(TARGET_PPC64)
2740 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
2741 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2742 {                                                                             \
2743     gen_##name(ctx, 0);                                                       \
2744 }                                                                             \
2745                                                                               \
2746 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2747 {                                                                             \
2748     gen_##name(ctx, 1);                                                       \
2749 }
2750 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
2751 static void glue(gen_, name##0)(DisasContext *ctx)                            \
2752 {                                                                             \
2753     gen_##name(ctx, 0, 0);                                                    \
2754 }                                                                             \
2755                                                                               \
2756 static void glue(gen_, name##1)(DisasContext *ctx)                            \
2757 {                                                                             \
2758     gen_##name(ctx, 0, 1);                                                    \
2759 }                                                                             \
2760                                                                               \
2761 static void glue(gen_, name##2)(DisasContext *ctx)                            \
2762 {                                                                             \
2763     gen_##name(ctx, 1, 0);                                                    \
2764 }                                                                             \
2765                                                                               \
2766 static void glue(gen_, name##3)(DisasContext *ctx)                            \
2767 {                                                                             \
2768     gen_##name(ctx, 1, 1);                                                    \
2769 }
2770 
2771 static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
2772 {
2773     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2774     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2775     int len = me - mb + 1;
2776     int rsh = (64 - sh) & 63;
2777 
2778     if (sh != 0 && len > 0 && me == (63 - sh)) {
2779         tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len);
2780     } else if (me == 63 && rsh + len <= 64) {
2781         tcg_gen_extract_tl(t_ra, t_rs, rsh, len);
2782     } else {
2783         tcg_gen_rotli_tl(t_ra, t_rs, sh);
2784         tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2785     }
2786     if (unlikely(Rc(ctx->opcode) != 0)) {
2787         gen_set_Rc0(ctx, t_ra);
2788     }
2789 }
2790 
2791 /* rldicl - rldicl. */
2792 static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
2793 {
2794     uint32_t sh, mb;
2795 
2796     sh = SH(ctx->opcode) | (shn << 5);
2797     mb = MB(ctx->opcode) | (mbn << 5);
2798     gen_rldinm(ctx, mb, 63, sh);
2799 }
2800 GEN_PPC64_R4(rldicl, 0x1E, 0x00);
2801 
2802 /* rldicr - rldicr. */
2803 static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
2804 {
2805     uint32_t sh, me;
2806 
2807     sh = SH(ctx->opcode) | (shn << 5);
2808     me = MB(ctx->opcode) | (men << 5);
2809     gen_rldinm(ctx, 0, me, sh);
2810 }
2811 GEN_PPC64_R4(rldicr, 0x1E, 0x02);
2812 
2813 /* rldic - rldic. */
2814 static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
2815 {
2816     uint32_t sh, mb;
2817 
2818     sh = SH(ctx->opcode) | (shn << 5);
2819     mb = MB(ctx->opcode) | (mbn << 5);
2820     gen_rldinm(ctx, mb, 63 - sh, sh);
2821 }
2822 GEN_PPC64_R4(rldic, 0x1E, 0x04);
2823 
2824 static void gen_rldnm(DisasContext *ctx, int mb, int me)
2825 {
2826     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2827     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2828     TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
2829     TCGv t0;
2830 
2831     t0 = tcg_temp_new();
2832     tcg_gen_andi_tl(t0, t_rb, 0x3f);
2833     tcg_gen_rotl_tl(t_ra, t_rs, t0);
2834     tcg_temp_free(t0);
2835 
2836     tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
2837     if (unlikely(Rc(ctx->opcode) != 0)) {
2838         gen_set_Rc0(ctx, t_ra);
2839     }
2840 }
2841 
2842 /* rldcl - rldcl. */
2843 static inline void gen_rldcl(DisasContext *ctx, int mbn)
2844 {
2845     uint32_t mb;
2846 
2847     mb = MB(ctx->opcode) | (mbn << 5);
2848     gen_rldnm(ctx, mb, 63);
2849 }
2850 GEN_PPC64_R2(rldcl, 0x1E, 0x08);
2851 
2852 /* rldcr - rldcr. */
2853 static inline void gen_rldcr(DisasContext *ctx, int men)
2854 {
2855     uint32_t me;
2856 
2857     me = MB(ctx->opcode) | (men << 5);
2858     gen_rldnm(ctx, 0, me);
2859 }
2860 GEN_PPC64_R2(rldcr, 0x1E, 0x09);
2861 
2862 /* rldimi - rldimi. */
2863 static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
2864 {
2865     TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
2866     TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
2867     uint32_t sh = SH(ctx->opcode) | (shn << 5);
2868     uint32_t mb = MB(ctx->opcode) | (mbn << 5);
2869     uint32_t me = 63 - sh;
2870 
2871     if (mb <= me) {
2872         tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
2873     } else {
2874         target_ulong mask = MASK(mb, me);
2875         TCGv t1 = tcg_temp_new();
2876 
2877         tcg_gen_rotli_tl(t1, t_rs, sh);
2878         tcg_gen_andi_tl(t1, t1, mask);
2879         tcg_gen_andi_tl(t_ra, t_ra, ~mask);
2880         tcg_gen_or_tl(t_ra, t_ra, t1);
2881         tcg_temp_free(t1);
2882     }
2883     if (unlikely(Rc(ctx->opcode) != 0)) {
2884         gen_set_Rc0(ctx, t_ra);
2885     }
2886 }
2887 GEN_PPC64_R4(rldimi, 0x1E, 0x06);
2888 #endif
2889 
2890 /***                             Integer shift                             ***/
2891 
2892 /* slw & slw. */
2893 static void gen_slw(DisasContext *ctx)
2894 {
2895     TCGv t0, t1;
2896 
2897     t0 = tcg_temp_new();
2898     /* AND rS with a mask that is 0 when rB >= 0x20 */
2899 #if defined(TARGET_PPC64)
2900     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2901     tcg_gen_sari_tl(t0, t0, 0x3f);
2902 #else
2903     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2904     tcg_gen_sari_tl(t0, t0, 0x1f);
2905 #endif
2906     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2907     t1 = tcg_temp_new();
2908     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2909     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2910     tcg_temp_free(t1);
2911     tcg_temp_free(t0);
2912     tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
2913     if (unlikely(Rc(ctx->opcode) != 0)) {
2914         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2915     }
2916 }
2917 
2918 /* sraw & sraw. */
2919 static void gen_sraw(DisasContext *ctx)
2920 {
2921     gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
2922                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
2923     if (unlikely(Rc(ctx->opcode) != 0)) {
2924         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2925     }
2926 }
2927 
2928 /* srawi & srawi. */
2929 static void gen_srawi(DisasContext *ctx)
2930 {
2931     int sh = SH(ctx->opcode);
2932     TCGv dst = cpu_gpr[rA(ctx->opcode)];
2933     TCGv src = cpu_gpr[rS(ctx->opcode)];
2934     if (sh == 0) {
2935         tcg_gen_ext32s_tl(dst, src);
2936         tcg_gen_movi_tl(cpu_ca, 0);
2937         if (is_isa300(ctx)) {
2938             tcg_gen_movi_tl(cpu_ca32, 0);
2939         }
2940     } else {
2941         TCGv t0;
2942         tcg_gen_ext32s_tl(dst, src);
2943         tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
2944         t0 = tcg_temp_new();
2945         tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
2946         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
2947         tcg_temp_free(t0);
2948         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
2949         if (is_isa300(ctx)) {
2950             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
2951         }
2952         tcg_gen_sari_tl(dst, dst, sh);
2953     }
2954     if (unlikely(Rc(ctx->opcode) != 0)) {
2955         gen_set_Rc0(ctx, dst);
2956     }
2957 }
2958 
2959 /* srw & srw. */
2960 static void gen_srw(DisasContext *ctx)
2961 {
2962     TCGv t0, t1;
2963 
2964     t0 = tcg_temp_new();
2965     /* AND rS with a mask that is 0 when rB >= 0x20 */
2966 #if defined(TARGET_PPC64)
2967     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
2968     tcg_gen_sari_tl(t0, t0, 0x3f);
2969 #else
2970     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
2971     tcg_gen_sari_tl(t0, t0, 0x1f);
2972 #endif
2973     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2974     tcg_gen_ext32u_tl(t0, t0);
2975     t1 = tcg_temp_new();
2976     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
2977     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2978     tcg_temp_free(t1);
2979     tcg_temp_free(t0);
2980     if (unlikely(Rc(ctx->opcode) != 0)) {
2981         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
2982     }
2983 }
2984 
2985 #if defined(TARGET_PPC64)
2986 /* sld & sld. */
2987 static void gen_sld(DisasContext *ctx)
2988 {
2989     TCGv t0, t1;
2990 
2991     t0 = tcg_temp_new();
2992     /* AND rS with a mask that is 0 when rB >= 0x40 */
2993     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
2994     tcg_gen_sari_tl(t0, t0, 0x3f);
2995     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
2996     t1 = tcg_temp_new();
2997     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
2998     tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
2999     tcg_temp_free(t1);
3000     tcg_temp_free(t0);
3001     if (unlikely(Rc(ctx->opcode) != 0)) {
3002         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3003     }
3004 }
3005 
3006 /* srad & srad. */
3007 static void gen_srad(DisasContext *ctx)
3008 {
3009     gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
3010                     cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3011     if (unlikely(Rc(ctx->opcode) != 0)) {
3012         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3013     }
3014 }
3015 /* sradi & sradi. */
3016 static inline void gen_sradi(DisasContext *ctx, int n)
3017 {
3018     int sh = SH(ctx->opcode) + (n << 5);
3019     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3020     TCGv src = cpu_gpr[rS(ctx->opcode)];
3021     if (sh == 0) {
3022         tcg_gen_mov_tl(dst, src);
3023         tcg_gen_movi_tl(cpu_ca, 0);
3024         if (is_isa300(ctx)) {
3025             tcg_gen_movi_tl(cpu_ca32, 0);
3026         }
3027     } else {
3028         TCGv t0;
3029         tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
3030         t0 = tcg_temp_new();
3031         tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
3032         tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
3033         tcg_temp_free(t0);
3034         tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
3035         if (is_isa300(ctx)) {
3036             tcg_gen_mov_tl(cpu_ca32, cpu_ca);
3037         }
3038         tcg_gen_sari_tl(dst, src, sh);
3039     }
3040     if (unlikely(Rc(ctx->opcode) != 0)) {
3041         gen_set_Rc0(ctx, dst);
3042     }
3043 }
3044 
3045 static void gen_sradi0(DisasContext *ctx)
3046 {
3047     gen_sradi(ctx, 0);
3048 }
3049 
3050 static void gen_sradi1(DisasContext *ctx)
3051 {
3052     gen_sradi(ctx, 1);
3053 }
3054 
3055 /* extswsli & extswsli. */
3056 static inline void gen_extswsli(DisasContext *ctx, int n)
3057 {
3058     int sh = SH(ctx->opcode) + (n << 5);
3059     TCGv dst = cpu_gpr[rA(ctx->opcode)];
3060     TCGv src = cpu_gpr[rS(ctx->opcode)];
3061 
3062     tcg_gen_ext32s_tl(dst, src);
3063     tcg_gen_shli_tl(dst, dst, sh);
3064     if (unlikely(Rc(ctx->opcode) != 0)) {
3065         gen_set_Rc0(ctx, dst);
3066     }
3067 }
3068 
3069 static void gen_extswsli0(DisasContext *ctx)
3070 {
3071     gen_extswsli(ctx, 0);
3072 }
3073 
3074 static void gen_extswsli1(DisasContext *ctx)
3075 {
3076     gen_extswsli(ctx, 1);
3077 }
3078 
3079 /* srd & srd. */
3080 static void gen_srd(DisasContext *ctx)
3081 {
3082     TCGv t0, t1;
3083 
3084     t0 = tcg_temp_new();
3085     /* AND rS with a mask that is 0 when rB >= 0x40 */
3086     tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
3087     tcg_gen_sari_tl(t0, t0, 0x3f);
3088     tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
3089     t1 = tcg_temp_new();
3090     tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
3091     tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
3092     tcg_temp_free(t1);
3093     tcg_temp_free(t0);
3094     if (unlikely(Rc(ctx->opcode) != 0)) {
3095         gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
3096     }
3097 }
3098 #endif
3099 
3100 /***                           Addressing modes                            ***/
3101 /* Register indirect with immediate index : EA = (rA|0) + SIMM */
3102 static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
3103                                       target_long maskl)
3104 {
3105     target_long simm = SIMM(ctx->opcode);
3106 
3107     simm &= ~maskl;
3108     if (rA(ctx->opcode) == 0) {
3109         if (NARROW_MODE(ctx)) {
3110             simm = (uint32_t)simm;
3111         }
3112         tcg_gen_movi_tl(EA, simm);
3113     } else if (likely(simm != 0)) {
3114         tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
3115         if (NARROW_MODE(ctx)) {
3116             tcg_gen_ext32u_tl(EA, EA);
3117         }
3118     } else {
3119         if (NARROW_MODE(ctx)) {
3120             tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3121         } else {
3122             tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3123         }
3124     }
3125 }
3126 
3127 static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
3128 {
3129     if (rA(ctx->opcode) == 0) {
3130         if (NARROW_MODE(ctx)) {
3131             tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3132         } else {
3133             tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
3134         }
3135     } else {
3136         tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
3137         if (NARROW_MODE(ctx)) {
3138             tcg_gen_ext32u_tl(EA, EA);
3139         }
3140     }
3141 }
3142 
3143 static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
3144 {
3145     if (rA(ctx->opcode) == 0) {
3146         tcg_gen_movi_tl(EA, 0);
3147     } else if (NARROW_MODE(ctx)) {
3148         tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3149     } else {
3150         tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
3151     }
3152 }
3153 
3154 static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
3155                                 target_long val)
3156 {
3157     tcg_gen_addi_tl(ret, arg1, val);
3158     if (NARROW_MODE(ctx)) {
3159         tcg_gen_ext32u_tl(ret, ret);
3160     }
3161 }
3162 
3163 static inline void gen_align_no_le(DisasContext *ctx)
3164 {
3165     gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
3166                       (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
3167 }
3168 
3169 static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ)
3170 {
3171     TCGv ea = tcg_temp_new();
3172     if (ra) {
3173         tcg_gen_add_tl(ea, cpu_gpr[ra], displ);
3174     } else {
3175         tcg_gen_mov_tl(ea, displ);
3176     }
3177     if (NARROW_MODE(ctx)) {
3178         tcg_gen_ext32u_tl(ea, ea);
3179     }
3180     return ea;
3181 }
3182 
3183 /***                             Integer load                              ***/
3184 #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
3185 #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
3186 
3187 #define GEN_QEMU_LOAD_TL(ldop, op)                                      \
3188 static void glue(gen_qemu_, ldop)(DisasContext *ctx,                    \
3189                                   TCGv val,                             \
3190                                   TCGv addr)                            \
3191 {                                                                       \
3192     tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op);                    \
3193 }
3194 
3195 GEN_QEMU_LOAD_TL(ld8u,  DEF_MEMOP(MO_UB))
3196 GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
3197 GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
3198 GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
3199 GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
3200 
3201 GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
3202 GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
3203 
3204 #define GEN_QEMU_LOAD_64(ldop, op)                                  \
3205 static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx,    \
3206                                              TCGv_i64 val,          \
3207                                              TCGv addr)             \
3208 {                                                                   \
3209     tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op);               \
3210 }
3211 
3212 GEN_QEMU_LOAD_64(ld8u,  DEF_MEMOP(MO_UB))
3213 GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
3214 GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
3215 GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
3216 GEN_QEMU_LOAD_64(ld64,  DEF_MEMOP(MO_UQ))
3217 
3218 #if defined(TARGET_PPC64)
3219 GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
3220 #endif
3221 
3222 #define GEN_QEMU_STORE_TL(stop, op)                                     \
3223 static void glue(gen_qemu_, stop)(DisasContext *ctx,                    \
3224                                   TCGv val,                             \
3225                                   TCGv addr)                            \
3226 {                                                                       \
3227     tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op);                    \
3228 }
3229 
3230 #if defined(TARGET_PPC64) || !defined(CONFIG_USER_ONLY)
3231 GEN_QEMU_STORE_TL(st8,  DEF_MEMOP(MO_UB))
3232 #endif
3233 GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
3234 GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
3235 
3236 GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
3237 GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
3238 
3239 #define GEN_QEMU_STORE_64(stop, op)                               \
3240 static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx,  \
3241                                               TCGv_i64 val,       \
3242                                               TCGv addr)          \
3243 {                                                                 \
3244     tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op);             \
3245 }
3246 
3247 GEN_QEMU_STORE_64(st8,  DEF_MEMOP(MO_UB))
3248 GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
3249 GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
3250 GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
3251 
3252 #if defined(TARGET_PPC64)
3253 GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
3254 #endif
3255 
3256 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
3257 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3258 {                                                                             \
3259     TCGv EA;                                                                  \
3260     chk(ctx);                                                                 \
3261     gen_set_access_type(ctx, ACCESS_INT);                                     \
3262     EA = tcg_temp_new();                                                      \
3263     gen_addr_reg_index(ctx, EA);                                              \
3264     gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA);                       \
3265     tcg_temp_free(EA);                                                        \
3266 }
3267 
3268 #define GEN_LDX(name, ldop, opc2, opc3, type)                                 \
3269     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3270 
3271 #define GEN_LDX_HVRM(name, ldop, opc2, opc3, type)                            \
3272     GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3273 
3274 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
3275 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3276 {                                                                             \
3277     TCGv EA;                                                                  \
3278     CHK_SV(ctx);                                                              \
3279     gen_set_access_type(ctx, ACCESS_INT);                                     \
3280     EA = tcg_temp_new();                                                      \
3281     gen_addr_reg_index(ctx, EA);                                              \
3282     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_LOAD, ldop);\
3283     tcg_temp_free(EA);                                                        \
3284 }
3285 
3286 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
3287 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
3288 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
3289 #if defined(TARGET_PPC64)
3290 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
3291 #endif
3292 
3293 #if defined(TARGET_PPC64)
3294 /* CI load/store variants */
3295 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
3296 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
3297 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
3298 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
3299 #endif
3300 
3301 /***                              Integer store                            ***/
3302 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
3303 static void glue(gen_, name##x)(DisasContext *ctx)                            \
3304 {                                                                             \
3305     TCGv EA;                                                                  \
3306     chk(ctx);                                                                 \
3307     gen_set_access_type(ctx, ACCESS_INT);                                     \
3308     EA = tcg_temp_new();                                                      \
3309     gen_addr_reg_index(ctx, EA);                                              \
3310     gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA);                       \
3311     tcg_temp_free(EA);                                                        \
3312 }
3313 #define GEN_STX(name, stop, opc2, opc3, type)                                 \
3314     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
3315 
3316 #define GEN_STX_HVRM(name, stop, opc2, opc3, type)                            \
3317     GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
3318 
3319 #define GEN_STEPX(name, stop, opc2, opc3)                                     \
3320 static void glue(gen_, name##epx)(DisasContext *ctx)                          \
3321 {                                                                             \
3322     TCGv EA;                                                                  \
3323     CHK_SV(ctx);                                                              \
3324     gen_set_access_type(ctx, ACCESS_INT);                                     \
3325     EA = tcg_temp_new();                                                      \
3326     gen_addr_reg_index(ctx, EA);                                              \
3327     tcg_gen_qemu_st_tl(                                                       \
3328         cpu_gpr[rD(ctx->opcode)], EA, PPC_TLB_EPID_STORE, stop);              \
3329     tcg_temp_free(EA);                                                        \
3330 }
3331 
3332 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
3333 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
3334 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
3335 #if defined(TARGET_PPC64)
3336 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
3337 #endif
3338 
3339 #if defined(TARGET_PPC64)
3340 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
3341 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
3342 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
3343 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
3344 #endif
3345 /***                Integer load and store with byte reverse               ***/
3346 
3347 /* lhbrx */
3348 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
3349 
3350 /* lwbrx */
3351 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
3352 
3353 #if defined(TARGET_PPC64)
3354 /* ldbrx */
3355 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
3356 /* stdbrx */
3357 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
3358 #endif  /* TARGET_PPC64 */
3359 
3360 /* sthbrx */
3361 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
3362 /* stwbrx */
3363 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
3364 
3365 /***                    Integer load and store multiple                    ***/
3366 
3367 /* lmw */
3368 static void gen_lmw(DisasContext *ctx)
3369 {
3370     TCGv t0;
3371     TCGv_i32 t1;
3372 
3373     if (ctx->le_mode) {
3374         gen_align_no_le(ctx);
3375         return;
3376     }
3377     gen_set_access_type(ctx, ACCESS_INT);
3378     t0 = tcg_temp_new();
3379     t1 = tcg_const_i32(rD(ctx->opcode));
3380     gen_addr_imm_index(ctx, t0, 0);
3381     gen_helper_lmw(cpu_env, t0, t1);
3382     tcg_temp_free(t0);
3383     tcg_temp_free_i32(t1);
3384 }
3385 
3386 /* stmw */
3387 static void gen_stmw(DisasContext *ctx)
3388 {
3389     TCGv t0;
3390     TCGv_i32 t1;
3391 
3392     if (ctx->le_mode) {
3393         gen_align_no_le(ctx);
3394         return;
3395     }
3396     gen_set_access_type(ctx, ACCESS_INT);
3397     t0 = tcg_temp_new();
3398     t1 = tcg_const_i32(rS(ctx->opcode));
3399     gen_addr_imm_index(ctx, t0, 0);
3400     gen_helper_stmw(cpu_env, t0, t1);
3401     tcg_temp_free(t0);
3402     tcg_temp_free_i32(t1);
3403 }
3404 
3405 /***                    Integer load and store strings                     ***/
3406 
3407 /* lswi */
3408 /*
3409  * PowerPC32 specification says we must generate an exception if rA is
3410  * in the range of registers to be loaded.  In an other hand, IBM says
3411  * this is valid, but rA won't be loaded.  For now, I'll follow the
3412  * spec...
3413  */
3414 static void gen_lswi(DisasContext *ctx)
3415 {
3416     TCGv t0;
3417     TCGv_i32 t1, t2;
3418     int nb = NB(ctx->opcode);
3419     int start = rD(ctx->opcode);
3420     int ra = rA(ctx->opcode);
3421     int nr;
3422 
3423     if (ctx->le_mode) {
3424         gen_align_no_le(ctx);
3425         return;
3426     }
3427     if (nb == 0) {
3428         nb = 32;
3429     }
3430     nr = DIV_ROUND_UP(nb, 4);
3431     if (unlikely(lsw_reg_in_range(start, nr, ra))) {
3432         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
3433         return;
3434     }
3435     gen_set_access_type(ctx, ACCESS_INT);
3436     t0 = tcg_temp_new();
3437     gen_addr_register(ctx, t0);
3438     t1 = tcg_const_i32(nb);
3439     t2 = tcg_const_i32(start);
3440     gen_helper_lsw(cpu_env, t0, t1, t2);
3441     tcg_temp_free(t0);
3442     tcg_temp_free_i32(t1);
3443     tcg_temp_free_i32(t2);
3444 }
3445 
3446 /* lswx */
3447 static void gen_lswx(DisasContext *ctx)
3448 {
3449     TCGv t0;
3450     TCGv_i32 t1, t2, t3;
3451 
3452     if (ctx->le_mode) {
3453         gen_align_no_le(ctx);
3454         return;
3455     }
3456     gen_set_access_type(ctx, ACCESS_INT);
3457     t0 = tcg_temp_new();
3458     gen_addr_reg_index(ctx, t0);
3459     t1 = tcg_const_i32(rD(ctx->opcode));
3460     t2 = tcg_const_i32(rA(ctx->opcode));
3461     t3 = tcg_const_i32(rB(ctx->opcode));
3462     gen_helper_lswx(cpu_env, t0, t1, t2, t3);
3463     tcg_temp_free(t0);
3464     tcg_temp_free_i32(t1);
3465     tcg_temp_free_i32(t2);
3466     tcg_temp_free_i32(t3);
3467 }
3468 
3469 /* stswi */
3470 static void gen_stswi(DisasContext *ctx)
3471 {
3472     TCGv t0;
3473     TCGv_i32 t1, t2;
3474     int nb = NB(ctx->opcode);
3475 
3476     if (ctx->le_mode) {
3477         gen_align_no_le(ctx);
3478         return;
3479     }
3480     gen_set_access_type(ctx, ACCESS_INT);
3481     t0 = tcg_temp_new();
3482     gen_addr_register(ctx, t0);
3483     if (nb == 0) {
3484         nb = 32;
3485     }
3486     t1 = tcg_const_i32(nb);
3487     t2 = tcg_const_i32(rS(ctx->opcode));
3488     gen_helper_stsw(cpu_env, t0, t1, t2);
3489     tcg_temp_free(t0);
3490     tcg_temp_free_i32(t1);
3491     tcg_temp_free_i32(t2);
3492 }
3493 
3494 /* stswx */
3495 static void gen_stswx(DisasContext *ctx)
3496 {
3497     TCGv t0;
3498     TCGv_i32 t1, t2;
3499 
3500     if (ctx->le_mode) {
3501         gen_align_no_le(ctx);
3502         return;
3503     }
3504     gen_set_access_type(ctx, ACCESS_INT);
3505     t0 = tcg_temp_new();
3506     gen_addr_reg_index(ctx, t0);
3507     t1 = tcg_temp_new_i32();
3508     tcg_gen_trunc_tl_i32(t1, cpu_xer);
3509     tcg_gen_andi_i32(t1, t1, 0x7F);
3510     t2 = tcg_const_i32(rS(ctx->opcode));
3511     gen_helper_stsw(cpu_env, t0, t1, t2);
3512     tcg_temp_free(t0);
3513     tcg_temp_free_i32(t1);
3514     tcg_temp_free_i32(t2);
3515 }
3516 
3517 /***                        Memory synchronisation                         ***/
3518 /* eieio */
3519 static void gen_eieio(DisasContext *ctx)
3520 {
3521     TCGBar bar = TCG_MO_ALL;
3522 
3523     /*
3524      * eieio has complex semanitcs. It provides memory ordering between
3525      * operations in the set:
3526      * - loads from CI memory.
3527      * - stores to CI memory.
3528      * - stores to WT memory.
3529      *
3530      * It separately also orders memory for operations in the set:
3531      * - stores to cacheble memory.
3532      *
3533      * It also serializes instructions:
3534      * - dcbt and dcbst.
3535      *
3536      * It separately serializes:
3537      * - tlbie and tlbsync.
3538      *
3539      * And separately serializes:
3540      * - slbieg, slbiag, and slbsync.
3541      *
3542      * The end result is that CI memory ordering requires TCG_MO_ALL
3543      * and it is not possible to special-case more relaxed ordering for
3544      * cacheable accesses. TCG_BAR_SC is required to provide this
3545      * serialization.
3546      */
3547 
3548     /*
3549      * POWER9 has a eieio instruction variant using bit 6 as a hint to
3550      * tell the CPU it is a store-forwarding barrier.
3551      */
3552     if (ctx->opcode & 0x2000000) {
3553         /*
3554          * ISA says that "Reserved fields in instructions are ignored
3555          * by the processor". So ignore the bit 6 on non-POWER9 CPU but
3556          * as this is not an instruction software should be using,
3557          * complain to the user.
3558          */
3559         if (!(ctx->insns_flags2 & PPC2_ISA300)) {
3560             qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @"
3561                           TARGET_FMT_lx "\n", ctx->cia);
3562         } else {
3563             bar = TCG_MO_ST_LD;
3564         }
3565     }
3566 
3567     tcg_gen_mb(bar | TCG_BAR_SC);
3568 }
3569 
3570 #if !defined(CONFIG_USER_ONLY)
3571 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
3572 {
3573     TCGv_i32 t;
3574     TCGLabel *l;
3575 
3576     if (!ctx->lazy_tlb_flush) {
3577         return;
3578     }
3579     l = gen_new_label();
3580     t = tcg_temp_new_i32();
3581     tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
3582     tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
3583     if (global) {
3584         gen_helper_check_tlb_flush_global(cpu_env);
3585     } else {
3586         gen_helper_check_tlb_flush_local(cpu_env);
3587     }
3588     gen_set_label(l);
3589     tcg_temp_free_i32(t);
3590 }
3591 #else
3592 static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
3593 #endif
3594 
3595 /* isync */
3596 static void gen_isync(DisasContext *ctx)
3597 {
3598     /*
3599      * We need to check for a pending TLB flush. This can only happen in
3600      * kernel mode however so check MSR_PR
3601      */
3602     if (!ctx->pr) {
3603         gen_check_tlb_flush(ctx, false);
3604     }
3605     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
3606     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
3607 }
3608 
3609 #define MEMOP_GET_SIZE(x)  (1 << ((x) & MO_SIZE))
3610 
3611 static void gen_load_locked(DisasContext *ctx, MemOp memop)
3612 {
3613     TCGv gpr = cpu_gpr[rD(ctx->opcode)];
3614     TCGv t0 = tcg_temp_new();
3615 
3616     gen_set_access_type(ctx, ACCESS_RES);
3617     gen_addr_reg_index(ctx, t0);
3618     tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN);
3619     tcg_gen_mov_tl(cpu_reserve, t0);
3620     tcg_gen_mov_tl(cpu_reserve_val, gpr);
3621     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
3622     tcg_temp_free(t0);
3623 }
3624 
3625 #define LARX(name, memop)                  \
3626 static void gen_##name(DisasContext *ctx)  \
3627 {                                          \
3628     gen_load_locked(ctx, memop);           \
3629 }
3630 
3631 /* lwarx */
3632 LARX(lbarx, DEF_MEMOP(MO_UB))
3633 LARX(lharx, DEF_MEMOP(MO_UW))
3634 LARX(lwarx, DEF_MEMOP(MO_UL))
3635 
3636 static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop,
3637                                       TCGv EA, TCGCond cond, int addend)
3638 {
3639     TCGv t = tcg_temp_new();
3640     TCGv t2 = tcg_temp_new();
3641     TCGv u = tcg_temp_new();
3642 
3643     tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3644     tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop));
3645     tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop);
3646     tcg_gen_addi_tl(u, t, addend);
3647 
3648     /* E.g. for fetch and increment bounded... */
3649     /* mem(EA,s) = (t != t2 ? u = t + 1 : t) */
3650     tcg_gen_movcond_tl(cond, u, t, t2, u, t);
3651     tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop);
3652 
3653     /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */
3654     tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1));
3655     tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u);
3656 
3657     tcg_temp_free(t);
3658     tcg_temp_free(t2);
3659     tcg_temp_free(u);
3660 }
3661 
3662 static void gen_ld_atomic(DisasContext *ctx, MemOp memop)
3663 {
3664     uint32_t gpr_FC = FC(ctx->opcode);
3665     TCGv EA = tcg_temp_new();
3666     int rt = rD(ctx->opcode);
3667     bool need_serial;
3668     TCGv src, dst;
3669 
3670     gen_addr_register(ctx, EA);
3671     dst = cpu_gpr[rt];
3672     src = cpu_gpr[(rt + 1) & 31];
3673 
3674     need_serial = false;
3675     memop |= MO_ALIGN;
3676     switch (gpr_FC) {
3677     case 0: /* Fetch and add */
3678         tcg_gen_atomic_fetch_add_tl(dst, EA, src, ctx->mem_idx, memop);
3679         break;
3680     case 1: /* Fetch and xor */
3681         tcg_gen_atomic_fetch_xor_tl(dst, EA, src, ctx->mem_idx, memop);
3682         break;
3683     case 2: /* Fetch and or */
3684         tcg_gen_atomic_fetch_or_tl(dst, EA, src, ctx->mem_idx, memop);
3685         break;
3686     case 3: /* Fetch and 'and' */
3687         tcg_gen_atomic_fetch_and_tl(dst, EA, src, ctx->mem_idx, memop);
3688         break;
3689     case 4:  /* Fetch and max unsigned */
3690         tcg_gen_atomic_fetch_umax_tl(dst, EA, src, ctx->mem_idx, memop);
3691         break;
3692     case 5:  /* Fetch and max signed */
3693         tcg_gen_atomic_fetch_smax_tl(dst, EA, src, ctx->mem_idx, memop);
3694         break;
3695     case 6:  /* Fetch and min unsigned */
3696         tcg_gen_atomic_fetch_umin_tl(dst, EA, src, ctx->mem_idx, memop);
3697         break;
3698     case 7:  /* Fetch and min signed */
3699         tcg_gen_atomic_fetch_smin_tl(dst, EA, src, ctx->mem_idx, memop);
3700         break;
3701     case 8: /* Swap */
3702         tcg_gen_atomic_xchg_tl(dst, EA, src, ctx->mem_idx, memop);
3703         break;
3704 
3705     case 16: /* Compare and swap not equal */
3706         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3707             need_serial = true;
3708         } else {
3709             TCGv t0 = tcg_temp_new();
3710             TCGv t1 = tcg_temp_new();
3711 
3712             tcg_gen_qemu_ld_tl(t0, EA, ctx->mem_idx, memop);
3713             if ((memop & MO_SIZE) == MO_64 || TARGET_LONG_BITS == 32) {
3714                 tcg_gen_mov_tl(t1, src);
3715             } else {
3716                 tcg_gen_ext32u_tl(t1, src);
3717             }
3718             tcg_gen_movcond_tl(TCG_COND_NE, t1, t0, t1,
3719                                cpu_gpr[(rt + 2) & 31], t0);
3720             tcg_gen_qemu_st_tl(t1, EA, ctx->mem_idx, memop);
3721             tcg_gen_mov_tl(dst, t0);
3722 
3723             tcg_temp_free(t0);
3724             tcg_temp_free(t1);
3725         }
3726         break;
3727 
3728     case 24: /* Fetch and increment bounded */
3729         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3730             need_serial = true;
3731         } else {
3732             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, 1);
3733         }
3734         break;
3735     case 25: /* Fetch and increment equal */
3736         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3737             need_serial = true;
3738         } else {
3739             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_EQ, 1);
3740         }
3741         break;
3742     case 28: /* Fetch and decrement bounded */
3743         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3744             need_serial = true;
3745         } else {
3746             gen_fetch_inc_conditional(ctx, memop, EA, TCG_COND_NE, -1);
3747         }
3748         break;
3749 
3750     default:
3751         /* invoke data storage error handler */
3752         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3753     }
3754     tcg_temp_free(EA);
3755 
3756     if (need_serial) {
3757         /* Restart with exclusive lock.  */
3758         gen_helper_exit_atomic(cpu_env);
3759         ctx->base.is_jmp = DISAS_NORETURN;
3760     }
3761 }
3762 
3763 static void gen_lwat(DisasContext *ctx)
3764 {
3765     gen_ld_atomic(ctx, DEF_MEMOP(MO_UL));
3766 }
3767 
3768 #ifdef TARGET_PPC64
3769 static void gen_ldat(DisasContext *ctx)
3770 {
3771     gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
3772 }
3773 #endif
3774 
3775 static void gen_st_atomic(DisasContext *ctx, MemOp memop)
3776 {
3777     uint32_t gpr_FC = FC(ctx->opcode);
3778     TCGv EA = tcg_temp_new();
3779     TCGv src, discard;
3780 
3781     gen_addr_register(ctx, EA);
3782     src = cpu_gpr[rD(ctx->opcode)];
3783     discard = tcg_temp_new();
3784 
3785     memop |= MO_ALIGN;
3786     switch (gpr_FC) {
3787     case 0: /* add and Store */
3788         tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3789         break;
3790     case 1: /* xor and Store */
3791         tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3792         break;
3793     case 2: /* Or and Store */
3794         tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3795         break;
3796     case 3: /* 'and' and Store */
3797         tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3798         break;
3799     case 4:  /* Store max unsigned */
3800         tcg_gen_atomic_umax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3801         break;
3802     case 5:  /* Store max signed */
3803         tcg_gen_atomic_smax_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3804         break;
3805     case 6:  /* Store min unsigned */
3806         tcg_gen_atomic_umin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3807         break;
3808     case 7:  /* Store min signed */
3809         tcg_gen_atomic_smin_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
3810         break;
3811     case 24: /* Store twin  */
3812         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3813             /* Restart with exclusive lock.  */
3814             gen_helper_exit_atomic(cpu_env);
3815             ctx->base.is_jmp = DISAS_NORETURN;
3816         } else {
3817             TCGv t = tcg_temp_new();
3818             TCGv t2 = tcg_temp_new();
3819             TCGv s = tcg_temp_new();
3820             TCGv s2 = tcg_temp_new();
3821             TCGv ea_plus_s = tcg_temp_new();
3822 
3823             tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop);
3824             tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop));
3825             tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop);
3826             tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t);
3827             tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2);
3828             tcg_gen_qemu_st_tl(s, EA, ctx->mem_idx, memop);
3829             tcg_gen_qemu_st_tl(s2, ea_plus_s, ctx->mem_idx, memop);
3830 
3831             tcg_temp_free(ea_plus_s);
3832             tcg_temp_free(s2);
3833             tcg_temp_free(s);
3834             tcg_temp_free(t2);
3835             tcg_temp_free(t);
3836         }
3837         break;
3838     default:
3839         /* invoke data storage error handler */
3840         gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
3841     }
3842     tcg_temp_free(discard);
3843     tcg_temp_free(EA);
3844 }
3845 
3846 static void gen_stwat(DisasContext *ctx)
3847 {
3848     gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
3849 }
3850 
3851 #ifdef TARGET_PPC64
3852 static void gen_stdat(DisasContext *ctx)
3853 {
3854     gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
3855 }
3856 #endif
3857 
3858 static void gen_conditional_store(DisasContext *ctx, MemOp memop)
3859 {
3860     TCGLabel *l1 = gen_new_label();
3861     TCGLabel *l2 = gen_new_label();
3862     TCGv t0 = tcg_temp_new();
3863     int reg = rS(ctx->opcode);
3864 
3865     gen_set_access_type(ctx, ACCESS_RES);
3866     gen_addr_reg_index(ctx, t0);
3867     tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
3868     tcg_temp_free(t0);
3869 
3870     t0 = tcg_temp_new();
3871     tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val,
3872                               cpu_gpr[reg], ctx->mem_idx,
3873                               DEF_MEMOP(memop) | MO_ALIGN);
3874     tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val);
3875     tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
3876     tcg_gen_or_tl(t0, t0, cpu_so);
3877     tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
3878     tcg_temp_free(t0);
3879     tcg_gen_br(l2);
3880 
3881     gen_set_label(l1);
3882 
3883     /*
3884      * Address mismatch implies failure.  But we still need to provide
3885      * the memory barrier semantics of the instruction.
3886      */
3887     tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
3888     tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
3889 
3890     gen_set_label(l2);
3891     tcg_gen_movi_tl(cpu_reserve, -1);
3892 }
3893 
3894 #define STCX(name, memop)                  \
3895 static void gen_##name(DisasContext *ctx)  \
3896 {                                          \
3897     gen_conditional_store(ctx, memop);     \
3898 }
3899 
3900 STCX(stbcx_, DEF_MEMOP(MO_UB))
3901 STCX(sthcx_, DEF_MEMOP(MO_UW))
3902 STCX(stwcx_, DEF_MEMOP(MO_UL))
3903 
3904 #if defined(TARGET_PPC64)
3905 /* ldarx */
3906 LARX(ldarx, DEF_MEMOP(MO_UQ))
3907 /* stdcx. */
3908 STCX(stdcx_, DEF_MEMOP(MO_UQ))
3909 
3910 /* lqarx */
3911 static void gen_lqarx(DisasContext *ctx)
3912 {
3913     int rd = rD(ctx->opcode);
3914     TCGv EA, hi, lo;
3915 
3916     if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
3917                  (rd == rB(ctx->opcode)))) {
3918         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3919         return;
3920     }
3921 
3922     gen_set_access_type(ctx, ACCESS_RES);
3923     EA = tcg_temp_new();
3924     gen_addr_reg_index(ctx, EA);
3925 
3926     /* Note that the low part is always in RD+1, even in LE mode.  */
3927     lo = cpu_gpr[rd + 1];
3928     hi = cpu_gpr[rd];
3929 
3930     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3931         if (HAVE_ATOMIC128) {
3932             TCGv_i32 oi = tcg_temp_new_i32();
3933             if (ctx->le_mode) {
3934                 tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
3935                                                     ctx->mem_idx));
3936                 gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
3937             } else {
3938                 tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
3939                                                     ctx->mem_idx));
3940                 gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
3941             }
3942             tcg_temp_free_i32(oi);
3943             tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
3944         } else {
3945             /* Restart with exclusive lock.  */
3946             gen_helper_exit_atomic(cpu_env);
3947             ctx->base.is_jmp = DISAS_NORETURN;
3948             tcg_temp_free(EA);
3949             return;
3950         }
3951     } else if (ctx->le_mode) {
3952         tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16);
3953         tcg_gen_mov_tl(cpu_reserve, EA);
3954         gen_addr_add(ctx, EA, EA, 8);
3955         tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ);
3956     } else {
3957         tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16);
3958         tcg_gen_mov_tl(cpu_reserve, EA);
3959         gen_addr_add(ctx, EA, EA, 8);
3960         tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ);
3961     }
3962     tcg_temp_free(EA);
3963 
3964     tcg_gen_st_tl(hi, cpu_env, offsetof(CPUPPCState, reserve_val));
3965     tcg_gen_st_tl(lo, cpu_env, offsetof(CPUPPCState, reserve_val2));
3966 }
3967 
3968 /* stqcx. */
3969 static void gen_stqcx_(DisasContext *ctx)
3970 {
3971     int rs = rS(ctx->opcode);
3972     TCGv EA, hi, lo;
3973 
3974     if (unlikely(rs & 1)) {
3975         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
3976         return;
3977     }
3978 
3979     gen_set_access_type(ctx, ACCESS_RES);
3980     EA = tcg_temp_new();
3981     gen_addr_reg_index(ctx, EA);
3982 
3983     /* Note that the low part is always in RS+1, even in LE mode.  */
3984     lo = cpu_gpr[rs + 1];
3985     hi = cpu_gpr[rs];
3986 
3987     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3988         if (HAVE_CMPXCHG128) {
3989             TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
3990             if (ctx->le_mode) {
3991                 gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
3992                                              EA, lo, hi, oi);
3993             } else {
3994                 gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
3995                                              EA, lo, hi, oi);
3996             }
3997             tcg_temp_free_i32(oi);
3998         } else {
3999             /* Restart with exclusive lock.  */
4000             gen_helper_exit_atomic(cpu_env);
4001             ctx->base.is_jmp = DISAS_NORETURN;
4002         }
4003         tcg_temp_free(EA);
4004     } else {
4005         TCGLabel *lab_fail = gen_new_label();
4006         TCGLabel *lab_over = gen_new_label();
4007         TCGv_i64 t0 = tcg_temp_new_i64();
4008         TCGv_i64 t1 = tcg_temp_new_i64();
4009 
4010         tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
4011         tcg_temp_free(EA);
4012 
4013         gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
4014         tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
4015                                      ? offsetof(CPUPPCState, reserve_val2)
4016                                      : offsetof(CPUPPCState, reserve_val)));
4017         tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
4018 
4019         tcg_gen_addi_i64(t0, cpu_reserve, 8);
4020         gen_qemu_ld64_i64(ctx, t0, t0);
4021         tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
4022                                      ? offsetof(CPUPPCState, reserve_val)
4023                                      : offsetof(CPUPPCState, reserve_val2)));
4024         tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
4025 
4026         /* Success */
4027         gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
4028         tcg_gen_addi_i64(t0, cpu_reserve, 8);
4029         gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
4030 
4031         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
4032         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
4033         tcg_gen_br(lab_over);
4034 
4035         gen_set_label(lab_fail);
4036         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
4037 
4038         gen_set_label(lab_over);
4039         tcg_gen_movi_tl(cpu_reserve, -1);
4040         tcg_temp_free_i64(t0);
4041         tcg_temp_free_i64(t1);
4042     }
4043 }
4044 #endif /* defined(TARGET_PPC64) */
4045 
4046 /* sync */
4047 static void gen_sync(DisasContext *ctx)
4048 {
4049     TCGBar bar = TCG_MO_ALL;
4050     uint32_t l = (ctx->opcode >> 21) & 3;
4051 
4052     if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) {
4053         bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST;
4054     }
4055 
4056     /*
4057      * We may need to check for a pending TLB flush.
4058      *
4059      * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
4060      *
4061      * Additionally, this can only happen in kernel mode however so
4062      * check MSR_PR as well.
4063      */
4064     if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
4065         gen_check_tlb_flush(ctx, true);
4066     }
4067 
4068     tcg_gen_mb(bar | TCG_BAR_SC);
4069 }
4070 
4071 /* wait */
4072 static void gen_wait(DisasContext *ctx)
4073 {
4074     uint32_t wc;
4075 
4076     if (ctx->insns_flags & PPC_WAIT) {
4077         /* v2.03-v2.07 define an older incompatible 'wait' encoding. */
4078 
4079         if (ctx->insns_flags2 & PPC2_PM_ISA206) {
4080             /* v2.06 introduced the WC field. WC > 0 may be treated as no-op. */
4081             wc = WC(ctx->opcode);
4082         } else {
4083             wc = 0;
4084         }
4085 
4086     } else if (ctx->insns_flags2 & PPC2_ISA300) {
4087         /* v3.0 defines a new 'wait' encoding. */
4088         wc = WC(ctx->opcode);
4089         if (ctx->insns_flags2 & PPC2_ISA310) {
4090             uint32_t pl = PL(ctx->opcode);
4091 
4092             /* WC 1,2 may be treated as no-op. WC 3 is reserved. */
4093             if (wc == 3) {
4094                 gen_invalid(ctx);
4095                 return;
4096             }
4097 
4098             /* PL 1-3 are reserved. If WC=2 then the insn is treated as noop. */
4099             if (pl > 0 && wc != 2) {
4100                 gen_invalid(ctx);
4101                 return;
4102             }
4103 
4104         } else { /* ISA300 */
4105             /* WC 1-3 are reserved */
4106             if (wc > 0) {
4107                 gen_invalid(ctx);
4108                 return;
4109             }
4110         }
4111 
4112     } else {
4113         warn_report("wait instruction decoded with wrong ISA flags.");
4114         gen_invalid(ctx);
4115         return;
4116     }
4117 
4118     /*
4119      * wait without WC field or with WC=0 waits for an exception / interrupt
4120      * to occur.
4121      */
4122     if (wc == 0) {
4123         TCGv_i32 t0 = tcg_const_i32(1);
4124         tcg_gen_st_i32(t0, cpu_env,
4125                        -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
4126         tcg_temp_free_i32(t0);
4127         /* Stop translation, as the CPU is supposed to sleep from now */
4128         gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4129     }
4130 
4131     /*
4132      * Other wait types must not just wait until an exception occurs because
4133      * ignoring their other wake-up conditions could cause a hang.
4134      *
4135      * For v2.06 and 2.07, wc=1,2,3 are architected but may be implemented as
4136      * no-ops.
4137      *
4138      * wc=1 and wc=3 explicitly allow the instruction to be treated as a no-op.
4139      *
4140      * wc=2 waits for an implementation-specific condition, such could be
4141      * always true, so it can be implemented as a no-op.
4142      *
4143      * For v3.1, wc=1,2 are architected but may be implemented as no-ops.
4144      *
4145      * wc=1 (waitrsv) waits for an exception or a reservation to be lost.
4146      * Reservation-loss may have implementation-specific conditions, so it
4147      * can be implemented as a no-op.
4148      *
4149      * wc=2 waits for an exception or an amount of time to pass. This
4150      * amount is implementation-specific so it can be implemented as a
4151      * no-op.
4152      *
4153      * ISA v3.1 allows for execution to resume "in the rare case of
4154      * an implementation-dependent event", so in any case software must
4155      * not depend on the architected resumption condition to become
4156      * true, so no-op implementations should be architecturally correct
4157      * (if suboptimal).
4158      */
4159 }
4160 
4161 #if defined(TARGET_PPC64)
4162 static void gen_doze(DisasContext *ctx)
4163 {
4164 #if defined(CONFIG_USER_ONLY)
4165     GEN_PRIV(ctx);
4166 #else
4167     TCGv_i32 t;
4168 
4169     CHK_HV(ctx);
4170     t = tcg_const_i32(PPC_PM_DOZE);
4171     gen_helper_pminsn(cpu_env, t);
4172     tcg_temp_free_i32(t);
4173     /* Stop translation, as the CPU is supposed to sleep from now */
4174     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4175 #endif /* defined(CONFIG_USER_ONLY) */
4176 }
4177 
4178 static void gen_nap(DisasContext *ctx)
4179 {
4180 #if defined(CONFIG_USER_ONLY)
4181     GEN_PRIV(ctx);
4182 #else
4183     TCGv_i32 t;
4184 
4185     CHK_HV(ctx);
4186     t = tcg_const_i32(PPC_PM_NAP);
4187     gen_helper_pminsn(cpu_env, t);
4188     tcg_temp_free_i32(t);
4189     /* Stop translation, as the CPU is supposed to sleep from now */
4190     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4191 #endif /* defined(CONFIG_USER_ONLY) */
4192 }
4193 
4194 static void gen_stop(DisasContext *ctx)
4195 {
4196 #if defined(CONFIG_USER_ONLY)
4197     GEN_PRIV(ctx);
4198 #else
4199     TCGv_i32 t;
4200 
4201     CHK_HV(ctx);
4202     t = tcg_const_i32(PPC_PM_STOP);
4203     gen_helper_pminsn(cpu_env, t);
4204     tcg_temp_free_i32(t);
4205     /* Stop translation, as the CPU is supposed to sleep from now */
4206     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4207 #endif /* defined(CONFIG_USER_ONLY) */
4208 }
4209 
4210 static void gen_sleep(DisasContext *ctx)
4211 {
4212 #if defined(CONFIG_USER_ONLY)
4213     GEN_PRIV(ctx);
4214 #else
4215     TCGv_i32 t;
4216 
4217     CHK_HV(ctx);
4218     t = tcg_const_i32(PPC_PM_SLEEP);
4219     gen_helper_pminsn(cpu_env, t);
4220     tcg_temp_free_i32(t);
4221     /* Stop translation, as the CPU is supposed to sleep from now */
4222     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4223 #endif /* defined(CONFIG_USER_ONLY) */
4224 }
4225 
4226 static void gen_rvwinkle(DisasContext *ctx)
4227 {
4228 #if defined(CONFIG_USER_ONLY)
4229     GEN_PRIV(ctx);
4230 #else
4231     TCGv_i32 t;
4232 
4233     CHK_HV(ctx);
4234     t = tcg_const_i32(PPC_PM_RVWINKLE);
4235     gen_helper_pminsn(cpu_env, t);
4236     tcg_temp_free_i32(t);
4237     /* Stop translation, as the CPU is supposed to sleep from now */
4238     gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next);
4239 #endif /* defined(CONFIG_USER_ONLY) */
4240 }
4241 #endif /* #if defined(TARGET_PPC64) */
4242 
4243 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
4244 {
4245 #if defined(TARGET_PPC64)
4246     if (ctx->has_cfar) {
4247         tcg_gen_movi_tl(cpu_cfar, nip);
4248     }
4249 #endif
4250 }
4251 
4252 #if defined(TARGET_PPC64)
4253 static void pmu_count_insns(DisasContext *ctx)
4254 {
4255     /*
4256      * Do not bother calling the helper if the PMU isn't counting
4257      * instructions.
4258      */
4259     if (!ctx->pmu_insn_cnt) {
4260         return;
4261     }
4262 
4263  #if !defined(CONFIG_USER_ONLY)
4264     /*
4265      * The PMU insns_inc() helper stops the internal PMU timer if a
4266      * counter overflows happens. In that case, if the guest is
4267      * running with icount and we do not handle it beforehand,
4268      * the helper can trigger a 'bad icount read'.
4269      */
4270     gen_icount_io_start(ctx);
4271 
4272     gen_helper_insns_inc(cpu_env, tcg_constant_i32(ctx->base.num_insns));
4273 #else
4274     /*
4275      * User mode can read (but not write) PMC5 and start/stop
4276      * the PMU via MMCR0_FC. In this case just increment
4277      * PMC5 with base.num_insns.
4278      */
4279     TCGv t0 = tcg_temp_new();
4280 
4281     gen_load_spr(t0, SPR_POWER_PMC5);
4282     tcg_gen_addi_tl(t0, t0, ctx->base.num_insns);
4283     gen_store_spr(SPR_POWER_PMC5, t0);
4284 
4285     tcg_temp_free(t0);
4286 #endif /* #if !defined(CONFIG_USER_ONLY) */
4287 }
4288 #else
4289 static void pmu_count_insns(DisasContext *ctx)
4290 {
4291     return;
4292 }
4293 #endif /* #if defined(TARGET_PPC64) */
4294 
4295 static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
4296 {
4297     return translator_use_goto_tb(&ctx->base, dest);
4298 }
4299 
4300 static void gen_lookup_and_goto_ptr(DisasContext *ctx)
4301 {
4302     if (unlikely(ctx->singlestep_enabled)) {
4303         gen_debug_exception(ctx);
4304     } else {
4305         /*
4306          * tcg_gen_lookup_and_goto_ptr will exit the TB if
4307          * CF_NO_GOTO_PTR is set. Count insns now.
4308          */
4309         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
4310             pmu_count_insns(ctx);
4311         }
4312 
4313         tcg_gen_lookup_and_goto_ptr();
4314     }
4315 }
4316 
4317 /***                                Branch                                 ***/
4318 static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
4319 {
4320     if (NARROW_MODE(ctx)) {
4321         dest = (uint32_t) dest;
4322     }
4323     if (use_goto_tb(ctx, dest)) {
4324         pmu_count_insns(ctx);
4325         tcg_gen_goto_tb(n);
4326         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4327         tcg_gen_exit_tb(ctx->base.tb, n);
4328     } else {
4329         tcg_gen_movi_tl(cpu_nip, dest & ~3);
4330         gen_lookup_and_goto_ptr(ctx);
4331     }
4332 }
4333 
4334 static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
4335 {
4336     if (NARROW_MODE(ctx)) {
4337         nip = (uint32_t)nip;
4338     }
4339     tcg_gen_movi_tl(cpu_lr, nip);
4340 }
4341 
4342 /* b ba bl bla */
4343 static void gen_b(DisasContext *ctx)
4344 {
4345     target_ulong li, target;
4346 
4347     /* sign extend LI */
4348     li = LI(ctx->opcode);
4349     li = (li ^ 0x02000000) - 0x02000000;
4350     if (likely(AA(ctx->opcode) == 0)) {
4351         target = ctx->cia + li;
4352     } else {
4353         target = li;
4354     }
4355     if (LK(ctx->opcode)) {
4356         gen_setlr(ctx, ctx->base.pc_next);
4357     }
4358     gen_update_cfar(ctx, ctx->cia);
4359     gen_goto_tb(ctx, 0, target);
4360     ctx->base.is_jmp = DISAS_NORETURN;
4361 }
4362 
4363 #define BCOND_IM  0
4364 #define BCOND_LR  1
4365 #define BCOND_CTR 2
4366 #define BCOND_TAR 3
4367 
4368 static void gen_bcond(DisasContext *ctx, int type)
4369 {
4370     uint32_t bo = BO(ctx->opcode);
4371     TCGLabel *l1;
4372     TCGv target;
4373 
4374     if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
4375         target = tcg_temp_local_new();
4376         if (type == BCOND_CTR) {
4377             tcg_gen_mov_tl(target, cpu_ctr);
4378         } else if (type == BCOND_TAR) {
4379             gen_load_spr(target, SPR_TAR);
4380         } else {
4381             tcg_gen_mov_tl(target, cpu_lr);
4382         }
4383     } else {
4384         target = NULL;
4385     }
4386     if (LK(ctx->opcode)) {
4387         gen_setlr(ctx, ctx->base.pc_next);
4388     }
4389     l1 = gen_new_label();
4390     if ((bo & 0x4) == 0) {
4391         /* Decrement and test CTR */
4392         TCGv temp = tcg_temp_new();
4393 
4394         if (type == BCOND_CTR) {
4395             /*
4396              * All ISAs up to v3 describe this form of bcctr as invalid but
4397              * some processors, ie. 64-bit server processors compliant with
4398              * arch 2.x, do implement a "test and decrement" logic instead,
4399              * as described in their respective UMs. This logic involves CTR
4400              * to act as both the branch target and a counter, which makes
4401              * it basically useless and thus never used in real code.
4402              *
4403              * This form was hence chosen to trigger extra micro-architectural
4404              * side-effect on real HW needed for the Spectre v2 workaround.
4405              * It is up to guests that implement such workaround, ie. linux, to
4406              * use this form in a way it just triggers the side-effect without
4407              * doing anything else harmful.
4408              */
4409             if (unlikely(!is_book3s_arch2x(ctx))) {
4410                 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4411                 tcg_temp_free(temp);
4412                 tcg_temp_free(target);
4413                 return;
4414             }
4415 
4416             if (NARROW_MODE(ctx)) {
4417                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4418             } else {
4419                 tcg_gen_mov_tl(temp, cpu_ctr);
4420             }
4421             if (bo & 0x2) {
4422                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4423             } else {
4424                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4425             }
4426             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4427         } else {
4428             tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
4429             if (NARROW_MODE(ctx)) {
4430                 tcg_gen_ext32u_tl(temp, cpu_ctr);
4431             } else {
4432                 tcg_gen_mov_tl(temp, cpu_ctr);
4433             }
4434             if (bo & 0x2) {
4435                 tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
4436             } else {
4437                 tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
4438             }
4439         }
4440         tcg_temp_free(temp);
4441     }
4442     if ((bo & 0x10) == 0) {
4443         /* Test CR */
4444         uint32_t bi = BI(ctx->opcode);
4445         uint32_t mask = 0x08 >> (bi & 0x03);
4446         TCGv_i32 temp = tcg_temp_new_i32();
4447 
4448         if (bo & 0x8) {
4449             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4450             tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
4451         } else {
4452             tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
4453             tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
4454         }
4455         tcg_temp_free_i32(temp);
4456     }
4457     gen_update_cfar(ctx, ctx->cia);
4458     if (type == BCOND_IM) {
4459         target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
4460         if (likely(AA(ctx->opcode) == 0)) {
4461             gen_goto_tb(ctx, 0, ctx->cia + li);
4462         } else {
4463             gen_goto_tb(ctx, 0, li);
4464         }
4465     } else {
4466         if (NARROW_MODE(ctx)) {
4467             tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
4468         } else {
4469             tcg_gen_andi_tl(cpu_nip, target, ~3);
4470         }
4471         gen_lookup_and_goto_ptr(ctx);
4472         tcg_temp_free(target);
4473     }
4474     if ((bo & 0x14) != 0x14) {
4475         /* fallthrough case */
4476         gen_set_label(l1);
4477         gen_goto_tb(ctx, 1, ctx->base.pc_next);
4478     }
4479     ctx->base.is_jmp = DISAS_NORETURN;
4480 }
4481 
4482 static void gen_bc(DisasContext *ctx)
4483 {
4484     gen_bcond(ctx, BCOND_IM);
4485 }
4486 
4487 static void gen_bcctr(DisasContext *ctx)
4488 {
4489     gen_bcond(ctx, BCOND_CTR);
4490 }
4491 
4492 static void gen_bclr(DisasContext *ctx)
4493 {
4494     gen_bcond(ctx, BCOND_LR);
4495 }
4496 
4497 static void gen_bctar(DisasContext *ctx)
4498 {
4499     gen_bcond(ctx, BCOND_TAR);
4500 }
4501 
4502 /***                      Condition register logical                       ***/
4503 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
4504 static void glue(gen_, name)(DisasContext *ctx)                               \
4505 {                                                                             \
4506     uint8_t bitmask;                                                          \
4507     int sh;                                                                   \
4508     TCGv_i32 t0, t1;                                                          \
4509     sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03);             \
4510     t0 = tcg_temp_new_i32();                                                  \
4511     if (sh > 0)                                                               \
4512         tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh);            \
4513     else if (sh < 0)                                                          \
4514         tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh);           \
4515     else                                                                      \
4516         tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]);                 \
4517     t1 = tcg_temp_new_i32();                                                  \
4518     sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03);             \
4519     if (sh > 0)                                                               \
4520         tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh);            \
4521     else if (sh < 0)                                                          \
4522         tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh);           \
4523     else                                                                      \
4524         tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]);                 \
4525     tcg_op(t0, t0, t1);                                                       \
4526     bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03);                             \
4527     tcg_gen_andi_i32(t0, t0, bitmask);                                        \
4528     tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask);          \
4529     tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1);                  \
4530     tcg_temp_free_i32(t0);                                                    \
4531     tcg_temp_free_i32(t1);                                                    \
4532 }
4533 
4534 /* crand */
4535 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
4536 /* crandc */
4537 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
4538 /* creqv */
4539 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
4540 /* crnand */
4541 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
4542 /* crnor */
4543 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
4544 /* cror */
4545 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
4546 /* crorc */
4547 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
4548 /* crxor */
4549 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
4550 
4551 /* mcrf */
4552 static void gen_mcrf(DisasContext *ctx)
4553 {
4554     tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
4555 }
4556 
4557 /***                           System linkage                              ***/
4558 
4559 /* rfi (supervisor only) */
4560 static void gen_rfi(DisasContext *ctx)
4561 {
4562 #if defined(CONFIG_USER_ONLY)
4563     GEN_PRIV(ctx);
4564 #else
4565     /*
4566      * This instruction doesn't exist anymore on 64-bit server
4567      * processors compliant with arch 2.x
4568      */
4569     if (is_book3s_arch2x(ctx)) {
4570         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
4571         return;
4572     }
4573     /* Restore CPU state */
4574     CHK_SV(ctx);
4575     gen_icount_io_start(ctx);
4576     gen_update_cfar(ctx, ctx->cia);
4577     gen_helper_rfi(cpu_env);
4578     ctx->base.is_jmp = DISAS_EXIT;
4579 #endif
4580 }
4581 
4582 #if defined(TARGET_PPC64)
4583 static void gen_rfid(DisasContext *ctx)
4584 {
4585 #if defined(CONFIG_USER_ONLY)
4586     GEN_PRIV(ctx);
4587 #else
4588     /* Restore CPU state */
4589     CHK_SV(ctx);
4590     gen_icount_io_start(ctx);
4591     gen_update_cfar(ctx, ctx->cia);
4592     gen_helper_rfid(cpu_env);
4593     ctx->base.is_jmp = DISAS_EXIT;
4594 #endif
4595 }
4596 
4597 #if !defined(CONFIG_USER_ONLY)
4598 static void gen_rfscv(DisasContext *ctx)
4599 {
4600 #if defined(CONFIG_USER_ONLY)
4601     GEN_PRIV(ctx);
4602 #else
4603     /* Restore CPU state */
4604     CHK_SV(ctx);
4605     gen_icount_io_start(ctx);
4606     gen_update_cfar(ctx, ctx->cia);
4607     gen_helper_rfscv(cpu_env);
4608     ctx->base.is_jmp = DISAS_EXIT;
4609 #endif
4610 }
4611 #endif
4612 
4613 static void gen_hrfid(DisasContext *ctx)
4614 {
4615 #if defined(CONFIG_USER_ONLY)
4616     GEN_PRIV(ctx);
4617 #else
4618     /* Restore CPU state */
4619     CHK_HV(ctx);
4620     gen_helper_hrfid(cpu_env);
4621     ctx->base.is_jmp = DISAS_EXIT;
4622 #endif
4623 }
4624 #endif
4625 
4626 /* sc */
4627 #if defined(CONFIG_USER_ONLY)
4628 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
4629 #else
4630 #define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
4631 #define POWERPC_SYSCALL_VECTORED POWERPC_EXCP_SYSCALL_VECTORED
4632 #endif
4633 static void gen_sc(DisasContext *ctx)
4634 {
4635     uint32_t lev;
4636 
4637     lev = (ctx->opcode >> 5) & 0x7F;
4638     gen_exception_err(ctx, POWERPC_SYSCALL, lev);
4639 }
4640 
4641 #if defined(TARGET_PPC64)
4642 #if !defined(CONFIG_USER_ONLY)
4643 static void gen_scv(DisasContext *ctx)
4644 {
4645     uint32_t lev = (ctx->opcode >> 5) & 0x7F;
4646 
4647     /* Set the PC back to the faulting instruction. */
4648     gen_update_nip(ctx, ctx->cia);
4649     gen_helper_scv(cpu_env, tcg_constant_i32(lev));
4650 
4651     ctx->base.is_jmp = DISAS_NORETURN;
4652 }
4653 #endif
4654 #endif
4655 
4656 /***                                Trap                                   ***/
4657 
4658 /* Check for unconditional traps (always or never) */
4659 static bool check_unconditional_trap(DisasContext *ctx)
4660 {
4661     /* Trap never */
4662     if (TO(ctx->opcode) == 0) {
4663         return true;
4664     }
4665     /* Trap always */
4666     if (TO(ctx->opcode) == 31) {
4667         gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
4668         return true;
4669     }
4670     return false;
4671 }
4672 
4673 /* tw */
4674 static void gen_tw(DisasContext *ctx)
4675 {
4676     TCGv_i32 t0;
4677 
4678     if (check_unconditional_trap(ctx)) {
4679         return;
4680     }
4681     t0 = tcg_const_i32(TO(ctx->opcode));
4682     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4683                   t0);
4684     tcg_temp_free_i32(t0);
4685 }
4686 
4687 /* twi */
4688 static void gen_twi(DisasContext *ctx)
4689 {
4690     TCGv t0;
4691     TCGv_i32 t1;
4692 
4693     if (check_unconditional_trap(ctx)) {
4694         return;
4695     }
4696     t0 = tcg_const_tl(SIMM(ctx->opcode));
4697     t1 = tcg_const_i32(TO(ctx->opcode));
4698     gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4699     tcg_temp_free(t0);
4700     tcg_temp_free_i32(t1);
4701 }
4702 
4703 #if defined(TARGET_PPC64)
4704 /* td */
4705 static void gen_td(DisasContext *ctx)
4706 {
4707     TCGv_i32 t0;
4708 
4709     if (check_unconditional_trap(ctx)) {
4710         return;
4711     }
4712     t0 = tcg_const_i32(TO(ctx->opcode));
4713     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
4714                   t0);
4715     tcg_temp_free_i32(t0);
4716 }
4717 
4718 /* tdi */
4719 static void gen_tdi(DisasContext *ctx)
4720 {
4721     TCGv t0;
4722     TCGv_i32 t1;
4723 
4724     if (check_unconditional_trap(ctx)) {
4725         return;
4726     }
4727     t0 = tcg_const_tl(SIMM(ctx->opcode));
4728     t1 = tcg_const_i32(TO(ctx->opcode));
4729     gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
4730     tcg_temp_free(t0);
4731     tcg_temp_free_i32(t1);
4732 }
4733 #endif
4734 
4735 /***                          Processor control                            ***/
4736 
4737 /* mcrxr */
4738 static void gen_mcrxr(DisasContext *ctx)
4739 {
4740     TCGv_i32 t0 = tcg_temp_new_i32();
4741     TCGv_i32 t1 = tcg_temp_new_i32();
4742     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4743 
4744     tcg_gen_trunc_tl_i32(t0, cpu_so);
4745     tcg_gen_trunc_tl_i32(t1, cpu_ov);
4746     tcg_gen_trunc_tl_i32(dst, cpu_ca);
4747     tcg_gen_shli_i32(t0, t0, 3);
4748     tcg_gen_shli_i32(t1, t1, 2);
4749     tcg_gen_shli_i32(dst, dst, 1);
4750     tcg_gen_or_i32(dst, dst, t0);
4751     tcg_gen_or_i32(dst, dst, t1);
4752     tcg_temp_free_i32(t0);
4753     tcg_temp_free_i32(t1);
4754 
4755     tcg_gen_movi_tl(cpu_so, 0);
4756     tcg_gen_movi_tl(cpu_ov, 0);
4757     tcg_gen_movi_tl(cpu_ca, 0);
4758 }
4759 
4760 #ifdef TARGET_PPC64
4761 /* mcrxrx */
4762 static void gen_mcrxrx(DisasContext *ctx)
4763 {
4764     TCGv t0 = tcg_temp_new();
4765     TCGv t1 = tcg_temp_new();
4766     TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
4767 
4768     /* copy OV and OV32 */
4769     tcg_gen_shli_tl(t0, cpu_ov, 1);
4770     tcg_gen_or_tl(t0, t0, cpu_ov32);
4771     tcg_gen_shli_tl(t0, t0, 2);
4772     /* copy CA and CA32 */
4773     tcg_gen_shli_tl(t1, cpu_ca, 1);
4774     tcg_gen_or_tl(t1, t1, cpu_ca32);
4775     tcg_gen_or_tl(t0, t0, t1);
4776     tcg_gen_trunc_tl_i32(dst, t0);
4777     tcg_temp_free(t0);
4778     tcg_temp_free(t1);
4779 }
4780 #endif
4781 
4782 /* mfcr mfocrf */
4783 static void gen_mfcr(DisasContext *ctx)
4784 {
4785     uint32_t crm, crn;
4786 
4787     if (likely(ctx->opcode & 0x00100000)) {
4788         crm = CRM(ctx->opcode);
4789         if (likely(crm && ((crm & (crm - 1)) == 0))) {
4790             crn = ctz32(crm);
4791             tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
4792             tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
4793                             cpu_gpr[rD(ctx->opcode)], crn * 4);
4794         }
4795     } else {
4796         TCGv_i32 t0 = tcg_temp_new_i32();
4797         tcg_gen_mov_i32(t0, cpu_crf[0]);
4798         tcg_gen_shli_i32(t0, t0, 4);
4799         tcg_gen_or_i32(t0, t0, cpu_crf[1]);
4800         tcg_gen_shli_i32(t0, t0, 4);
4801         tcg_gen_or_i32(t0, t0, cpu_crf[2]);
4802         tcg_gen_shli_i32(t0, t0, 4);
4803         tcg_gen_or_i32(t0, t0, cpu_crf[3]);
4804         tcg_gen_shli_i32(t0, t0, 4);
4805         tcg_gen_or_i32(t0, t0, cpu_crf[4]);
4806         tcg_gen_shli_i32(t0, t0, 4);
4807         tcg_gen_or_i32(t0, t0, cpu_crf[5]);
4808         tcg_gen_shli_i32(t0, t0, 4);
4809         tcg_gen_or_i32(t0, t0, cpu_crf[6]);
4810         tcg_gen_shli_i32(t0, t0, 4);
4811         tcg_gen_or_i32(t0, t0, cpu_crf[7]);
4812         tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
4813         tcg_temp_free_i32(t0);
4814     }
4815 }
4816 
4817 /* mfmsr */
4818 static void gen_mfmsr(DisasContext *ctx)
4819 {
4820     CHK_SV(ctx);
4821     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
4822 }
4823 
4824 /* mfspr */
4825 static inline void gen_op_mfspr(DisasContext *ctx)
4826 {
4827     void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
4828     uint32_t sprn = SPR(ctx->opcode);
4829 
4830 #if defined(CONFIG_USER_ONLY)
4831     read_cb = ctx->spr_cb[sprn].uea_read;
4832 #else
4833     if (ctx->pr) {
4834         read_cb = ctx->spr_cb[sprn].uea_read;
4835     } else if (ctx->hv) {
4836         read_cb = ctx->spr_cb[sprn].hea_read;
4837     } else {
4838         read_cb = ctx->spr_cb[sprn].oea_read;
4839     }
4840 #endif
4841     if (likely(read_cb != NULL)) {
4842         if (likely(read_cb != SPR_NOACCESS)) {
4843             (*read_cb)(ctx, rD(ctx->opcode), sprn);
4844         } else {
4845             /* Privilege exception */
4846             /*
4847              * This is a hack to avoid warnings when running Linux:
4848              * this OS breaks the PowerPC virtualisation model,
4849              * allowing userland application to read the PVR
4850              */
4851             if (sprn != SPR_PVR) {
4852                 qemu_log_mask(LOG_GUEST_ERROR, "Trying to read privileged spr "
4853                               "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
4854                               ctx->cia);
4855             }
4856             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4857         }
4858     } else {
4859         /* ISA 2.07 defines these as no-ops */
4860         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
4861             (sprn >= 808 && sprn <= 811)) {
4862             /* This is a nop */
4863             return;
4864         }
4865         /* Not defined */
4866         qemu_log_mask(LOG_GUEST_ERROR,
4867                       "Trying to read invalid spr %d (0x%03x) at "
4868                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
4869 
4870         /*
4871          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
4872          * generate a priv, a hv emu or a no-op
4873          */
4874         if (sprn & 0x10) {
4875             if (ctx->pr) {
4876                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4877             }
4878         } else {
4879             if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
4880                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
4881             }
4882         }
4883     }
4884 }
4885 
4886 static void gen_mfspr(DisasContext *ctx)
4887 {
4888     gen_op_mfspr(ctx);
4889 }
4890 
4891 /* mftb */
4892 static void gen_mftb(DisasContext *ctx)
4893 {
4894     gen_op_mfspr(ctx);
4895 }
4896 
4897 /* mtcrf mtocrf*/
4898 static void gen_mtcrf(DisasContext *ctx)
4899 {
4900     uint32_t crm, crn;
4901 
4902     crm = CRM(ctx->opcode);
4903     if (likely((ctx->opcode & 0x00100000))) {
4904         if (crm && ((crm & (crm - 1)) == 0)) {
4905             TCGv_i32 temp = tcg_temp_new_i32();
4906             crn = ctz32(crm);
4907             tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4908             tcg_gen_shri_i32(temp, temp, crn * 4);
4909             tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
4910             tcg_temp_free_i32(temp);
4911         }
4912     } else {
4913         TCGv_i32 temp = tcg_temp_new_i32();
4914         tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
4915         for (crn = 0 ; crn < 8 ; crn++) {
4916             if (crm & (1 << crn)) {
4917                     tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
4918                     tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
4919             }
4920         }
4921         tcg_temp_free_i32(temp);
4922     }
4923 }
4924 
4925 /* mtmsr */
4926 #if defined(TARGET_PPC64)
4927 static void gen_mtmsrd(DisasContext *ctx)
4928 {
4929     if (unlikely(!is_book3s_arch2x(ctx))) {
4930         gen_invalid(ctx);
4931         return;
4932     }
4933 
4934     CHK_SV(ctx);
4935 
4936 #if !defined(CONFIG_USER_ONLY)
4937     TCGv t0, t1;
4938     target_ulong mask;
4939 
4940     t0 = tcg_temp_new();
4941     t1 = tcg_temp_new();
4942 
4943     gen_icount_io_start(ctx);
4944 
4945     if (ctx->opcode & 0x00010000) {
4946         /* L=1 form only updates EE and RI */
4947         mask = (1ULL << MSR_RI) | (1ULL << MSR_EE);
4948     } else {
4949         /* mtmsrd does not alter HV, S, ME, or LE */
4950         mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) |
4951                  (1ULL << MSR_HV));
4952         /*
4953          * XXX: we need to update nip before the store if we enter
4954          *      power saving mode, we will exit the loop directly from
4955          *      ppc_store_msr
4956          */
4957         gen_update_nip(ctx, ctx->base.pc_next);
4958     }
4959 
4960     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
4961     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
4962     tcg_gen_or_tl(t0, t0, t1);
4963 
4964     gen_helper_store_msr(cpu_env, t0);
4965 
4966     /* Must stop the translation as machine state (may have) changed */
4967     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
4968 
4969     tcg_temp_free(t0);
4970     tcg_temp_free(t1);
4971 #endif /* !defined(CONFIG_USER_ONLY) */
4972 }
4973 #endif /* defined(TARGET_PPC64) */
4974 
4975 static void gen_mtmsr(DisasContext *ctx)
4976 {
4977     CHK_SV(ctx);
4978 
4979 #if !defined(CONFIG_USER_ONLY)
4980     TCGv t0, t1;
4981     target_ulong mask = 0xFFFFFFFF;
4982 
4983     t0 = tcg_temp_new();
4984     t1 = tcg_temp_new();
4985 
4986     gen_icount_io_start(ctx);
4987     if (ctx->opcode & 0x00010000) {
4988         /* L=1 form only updates EE and RI */
4989         mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE);
4990     } else {
4991         /* mtmsr does not alter S, ME, or LE */
4992         mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S));
4993 
4994         /*
4995          * XXX: we need to update nip before the store if we enter
4996          *      power saving mode, we will exit the loop directly from
4997          *      ppc_store_msr
4998          */
4999         gen_update_nip(ctx, ctx->base.pc_next);
5000     }
5001 
5002     tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask);
5003     tcg_gen_andi_tl(t1, cpu_msr, ~mask);
5004     tcg_gen_or_tl(t0, t0, t1);
5005 
5006     gen_helper_store_msr(cpu_env, t0);
5007 
5008     /* Must stop the translation as machine state (may have) changed */
5009     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
5010 
5011     tcg_temp_free(t0);
5012     tcg_temp_free(t1);
5013 #endif
5014 }
5015 
5016 /* mtspr */
5017 static void gen_mtspr(DisasContext *ctx)
5018 {
5019     void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
5020     uint32_t sprn = SPR(ctx->opcode);
5021 
5022 #if defined(CONFIG_USER_ONLY)
5023     write_cb = ctx->spr_cb[sprn].uea_write;
5024 #else
5025     if (ctx->pr) {
5026         write_cb = ctx->spr_cb[sprn].uea_write;
5027     } else if (ctx->hv) {
5028         write_cb = ctx->spr_cb[sprn].hea_write;
5029     } else {
5030         write_cb = ctx->spr_cb[sprn].oea_write;
5031     }
5032 #endif
5033     if (likely(write_cb != NULL)) {
5034         if (likely(write_cb != SPR_NOACCESS)) {
5035             (*write_cb)(ctx, sprn, rS(ctx->opcode));
5036         } else {
5037             /* Privilege exception */
5038             qemu_log_mask(LOG_GUEST_ERROR, "Trying to write privileged spr "
5039                           "%d (0x%03x) at " TARGET_FMT_lx "\n", sprn, sprn,
5040                           ctx->cia);
5041             gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
5042         }
5043     } else {
5044         /* ISA 2.07 defines these as no-ops */
5045         if ((ctx->insns_flags2 & PPC2_ISA207S) &&
5046             (sprn >= 808 && sprn <= 811)) {
5047             /* This is a nop */
5048             return;
5049         }
5050 
5051         /* Not defined */
5052         qemu_log_mask(LOG_GUEST_ERROR,
5053                       "Trying to write invalid spr %d (0x%03x) at "
5054                       TARGET_FMT_lx "\n", sprn, sprn, ctx->cia);
5055 
5056 
5057         /*
5058          * The behaviour depends on MSR:PR and SPR# bit 0x10, it can
5059          * generate a priv, a hv emu or a no-op
5060          */
5061         if (sprn & 0x10) {
5062             if (ctx->pr) {
5063                 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
5064             }
5065         } else {
5066             if (ctx->pr || sprn == 0) {
5067                 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG);
5068             }
5069         }
5070     }
5071 }
5072 
5073 #if defined(TARGET_PPC64)
5074 /* setb */
5075 static void gen_setb(DisasContext *ctx)
5076 {
5077     TCGv_i32 t0 = tcg_temp_new_i32();
5078     TCGv_i32 t8 = tcg_constant_i32(8);
5079     TCGv_i32 tm1 = tcg_constant_i32(-1);
5080     int crf = crfS(ctx->opcode);
5081 
5082     tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
5083     tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
5084     tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
5085 
5086     tcg_temp_free_i32(t0);
5087 }
5088 #endif
5089 
5090 /***                         Cache management                              ***/
5091 
5092 /* dcbf */
5093 static void gen_dcbf(DisasContext *ctx)
5094 {
5095     /* XXX: specification says this is treated as a load by the MMU */
5096     TCGv t0;
5097     gen_set_access_type(ctx, ACCESS_CACHE);
5098     t0 = tcg_temp_new();
5099     gen_addr_reg_index(ctx, t0);
5100     gen_qemu_ld8u(ctx, t0, t0);
5101     tcg_temp_free(t0);
5102 }
5103 
5104 /* dcbfep (external PID dcbf) */
5105 static void gen_dcbfep(DisasContext *ctx)
5106 {
5107     /* XXX: specification says this is treated as a load by the MMU */
5108     TCGv t0;
5109     CHK_SV(ctx);
5110     gen_set_access_type(ctx, ACCESS_CACHE);
5111     t0 = tcg_temp_new();
5112     gen_addr_reg_index(ctx, t0);
5113     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5114     tcg_temp_free(t0);
5115 }
5116 
5117 /* dcbi (Supervisor only) */
5118 static void gen_dcbi(DisasContext *ctx)
5119 {
5120 #if defined(CONFIG_USER_ONLY)
5121     GEN_PRIV(ctx);
5122 #else
5123     TCGv EA, val;
5124 
5125     CHK_SV(ctx);
5126     EA = tcg_temp_new();
5127     gen_set_access_type(ctx, ACCESS_CACHE);
5128     gen_addr_reg_index(ctx, EA);
5129     val = tcg_temp_new();
5130     /* XXX: specification says this should be treated as a store by the MMU */
5131     gen_qemu_ld8u(ctx, val, EA);
5132     gen_qemu_st8(ctx, val, EA);
5133     tcg_temp_free(val);
5134     tcg_temp_free(EA);
5135 #endif /* defined(CONFIG_USER_ONLY) */
5136 }
5137 
5138 /* dcdst */
5139 static void gen_dcbst(DisasContext *ctx)
5140 {
5141     /* XXX: specification say this is treated as a load by the MMU */
5142     TCGv t0;
5143     gen_set_access_type(ctx, ACCESS_CACHE);
5144     t0 = tcg_temp_new();
5145     gen_addr_reg_index(ctx, t0);
5146     gen_qemu_ld8u(ctx, t0, t0);
5147     tcg_temp_free(t0);
5148 }
5149 
5150 /* dcbstep (dcbstep External PID version) */
5151 static void gen_dcbstep(DisasContext *ctx)
5152 {
5153     /* XXX: specification say this is treated as a load by the MMU */
5154     TCGv t0;
5155     gen_set_access_type(ctx, ACCESS_CACHE);
5156     t0 = tcg_temp_new();
5157     gen_addr_reg_index(ctx, t0);
5158     tcg_gen_qemu_ld_tl(t0, t0, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UB));
5159     tcg_temp_free(t0);
5160 }
5161 
5162 /* dcbt */
5163 static void gen_dcbt(DisasContext *ctx)
5164 {
5165     /*
5166      * interpreted as no-op
5167      * XXX: specification say this is treated as a load by the MMU but
5168      *      does not generate any exception
5169      */
5170 }
5171 
5172 /* dcbtep */
5173 static void gen_dcbtep(DisasContext *ctx)
5174 {
5175     /*
5176      * interpreted as no-op
5177      * XXX: specification say this is treated as a load by the MMU but
5178      *      does not generate any exception
5179      */
5180 }
5181 
5182 /* dcbtst */
5183 static void gen_dcbtst(DisasContext *ctx)
5184 {
5185     /*
5186      * interpreted as no-op
5187      * XXX: specification say this is treated as a load by the MMU but
5188      *      does not generate any exception
5189      */
5190 }
5191 
5192 /* dcbtstep */
5193 static void gen_dcbtstep(DisasContext *ctx)
5194 {
5195     /*
5196      * interpreted as no-op
5197      * XXX: specification say this is treated as a load by the MMU but
5198      *      does not generate any exception
5199      */
5200 }
5201 
5202 /* dcbtls */
5203 static void gen_dcbtls(DisasContext *ctx)
5204 {
5205     /* Always fails locking the cache */
5206     TCGv t0 = tcg_temp_new();
5207     gen_load_spr(t0, SPR_Exxx_L1CSR0);
5208     tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
5209     gen_store_spr(SPR_Exxx_L1CSR0, t0);
5210     tcg_temp_free(t0);
5211 }
5212 
5213 /* dcbz */
5214 static void gen_dcbz(DisasContext *ctx)
5215 {
5216     TCGv tcgv_addr;
5217     TCGv_i32 tcgv_op;
5218 
5219     gen_set_access_type(ctx, ACCESS_CACHE);
5220     tcgv_addr = tcg_temp_new();
5221     tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
5222     gen_addr_reg_index(ctx, tcgv_addr);
5223     gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
5224     tcg_temp_free(tcgv_addr);
5225     tcg_temp_free_i32(tcgv_op);
5226 }
5227 
5228 /* dcbzep */
5229 static void gen_dcbzep(DisasContext *ctx)
5230 {
5231     TCGv tcgv_addr;
5232     TCGv_i32 tcgv_op;
5233 
5234     gen_set_access_type(ctx, ACCESS_CACHE);
5235     tcgv_addr = tcg_temp_new();
5236     tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
5237     gen_addr_reg_index(ctx, tcgv_addr);
5238     gen_helper_dcbzep(cpu_env, tcgv_addr, tcgv_op);
5239     tcg_temp_free(tcgv_addr);
5240     tcg_temp_free_i32(tcgv_op);
5241 }
5242 
5243 /* dst / dstt */
5244 static void gen_dst(DisasContext *ctx)
5245 {
5246     if (rA(ctx->opcode) == 0) {
5247         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5248     } else {
5249         /* interpreted as no-op */
5250     }
5251 }
5252 
5253 /* dstst /dststt */
5254 static void gen_dstst(DisasContext *ctx)
5255 {
5256     if (rA(ctx->opcode) == 0) {
5257         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5258     } else {
5259         /* interpreted as no-op */
5260     }
5261 
5262 }
5263 
5264 /* dss / dssall */
5265 static void gen_dss(DisasContext *ctx)
5266 {
5267     /* interpreted as no-op */
5268 }
5269 
5270 /* icbi */
5271 static void gen_icbi(DisasContext *ctx)
5272 {
5273     TCGv t0;
5274     gen_set_access_type(ctx, ACCESS_CACHE);
5275     t0 = tcg_temp_new();
5276     gen_addr_reg_index(ctx, t0);
5277     gen_helper_icbi(cpu_env, t0);
5278     tcg_temp_free(t0);
5279 }
5280 
5281 /* icbiep */
5282 static void gen_icbiep(DisasContext *ctx)
5283 {
5284     TCGv t0;
5285     gen_set_access_type(ctx, ACCESS_CACHE);
5286     t0 = tcg_temp_new();
5287     gen_addr_reg_index(ctx, t0);
5288     gen_helper_icbiep(cpu_env, t0);
5289     tcg_temp_free(t0);
5290 }
5291 
5292 /* Optional: */
5293 /* dcba */
5294 static void gen_dcba(DisasContext *ctx)
5295 {
5296     /*
5297      * interpreted as no-op
5298      * XXX: specification say this is treated as a store by the MMU
5299      *      but does not generate any exception
5300      */
5301 }
5302 
5303 /***                    Segment register manipulation                      ***/
5304 /* Supervisor only: */
5305 
5306 /* mfsr */
5307 static void gen_mfsr(DisasContext *ctx)
5308 {
5309 #if defined(CONFIG_USER_ONLY)
5310     GEN_PRIV(ctx);
5311 #else
5312     TCGv t0;
5313 
5314     CHK_SV(ctx);
5315     t0 = tcg_const_tl(SR(ctx->opcode));
5316     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5317     tcg_temp_free(t0);
5318 #endif /* defined(CONFIG_USER_ONLY) */
5319 }
5320 
5321 /* mfsrin */
5322 static void gen_mfsrin(DisasContext *ctx)
5323 {
5324 #if defined(CONFIG_USER_ONLY)
5325     GEN_PRIV(ctx);
5326 #else
5327     TCGv t0;
5328 
5329     CHK_SV(ctx);
5330     t0 = tcg_temp_new();
5331     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5332     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5333     tcg_temp_free(t0);
5334 #endif /* defined(CONFIG_USER_ONLY) */
5335 }
5336 
5337 /* mtsr */
5338 static void gen_mtsr(DisasContext *ctx)
5339 {
5340 #if defined(CONFIG_USER_ONLY)
5341     GEN_PRIV(ctx);
5342 #else
5343     TCGv t0;
5344 
5345     CHK_SV(ctx);
5346     t0 = tcg_const_tl(SR(ctx->opcode));
5347     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5348     tcg_temp_free(t0);
5349 #endif /* defined(CONFIG_USER_ONLY) */
5350 }
5351 
5352 /* mtsrin */
5353 static void gen_mtsrin(DisasContext *ctx)
5354 {
5355 #if defined(CONFIG_USER_ONLY)
5356     GEN_PRIV(ctx);
5357 #else
5358     TCGv t0;
5359     CHK_SV(ctx);
5360 
5361     t0 = tcg_temp_new();
5362     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5363     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
5364     tcg_temp_free(t0);
5365 #endif /* defined(CONFIG_USER_ONLY) */
5366 }
5367 
5368 #if defined(TARGET_PPC64)
5369 /* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
5370 
5371 /* mfsr */
5372 static void gen_mfsr_64b(DisasContext *ctx)
5373 {
5374 #if defined(CONFIG_USER_ONLY)
5375     GEN_PRIV(ctx);
5376 #else
5377     TCGv t0;
5378 
5379     CHK_SV(ctx);
5380     t0 = tcg_const_tl(SR(ctx->opcode));
5381     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5382     tcg_temp_free(t0);
5383 #endif /* defined(CONFIG_USER_ONLY) */
5384 }
5385 
5386 /* mfsrin */
5387 static void gen_mfsrin_64b(DisasContext *ctx)
5388 {
5389 #if defined(CONFIG_USER_ONLY)
5390     GEN_PRIV(ctx);
5391 #else
5392     TCGv t0;
5393 
5394     CHK_SV(ctx);
5395     t0 = tcg_temp_new();
5396     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5397     gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5398     tcg_temp_free(t0);
5399 #endif /* defined(CONFIG_USER_ONLY) */
5400 }
5401 
5402 /* mtsr */
5403 static void gen_mtsr_64b(DisasContext *ctx)
5404 {
5405 #if defined(CONFIG_USER_ONLY)
5406     GEN_PRIV(ctx);
5407 #else
5408     TCGv t0;
5409 
5410     CHK_SV(ctx);
5411     t0 = tcg_const_tl(SR(ctx->opcode));
5412     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5413     tcg_temp_free(t0);
5414 #endif /* defined(CONFIG_USER_ONLY) */
5415 }
5416 
5417 /* mtsrin */
5418 static void gen_mtsrin_64b(DisasContext *ctx)
5419 {
5420 #if defined(CONFIG_USER_ONLY)
5421     GEN_PRIV(ctx);
5422 #else
5423     TCGv t0;
5424 
5425     CHK_SV(ctx);
5426     t0 = tcg_temp_new();
5427     tcg_gen_extract_tl(t0, cpu_gpr[rB(ctx->opcode)], 28, 4);
5428     gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
5429     tcg_temp_free(t0);
5430 #endif /* defined(CONFIG_USER_ONLY) */
5431 }
5432 
5433 #endif /* defined(TARGET_PPC64) */
5434 
5435 /***                      Lookaside buffer management                      ***/
5436 /* Optional & supervisor only: */
5437 
5438 /* tlbia */
5439 static void gen_tlbia(DisasContext *ctx)
5440 {
5441 #if defined(CONFIG_USER_ONLY)
5442     GEN_PRIV(ctx);
5443 #else
5444     CHK_HV(ctx);
5445 
5446     gen_helper_tlbia(cpu_env);
5447 #endif  /* defined(CONFIG_USER_ONLY) */
5448 }
5449 
5450 /* tlbsync */
5451 static void gen_tlbsync(DisasContext *ctx)
5452 {
5453 #if defined(CONFIG_USER_ONLY)
5454     GEN_PRIV(ctx);
5455 #else
5456 
5457     if (ctx->gtse) {
5458         CHK_SV(ctx); /* If gtse is set then tlbsync is supervisor privileged */
5459     } else {
5460         CHK_HV(ctx); /* Else hypervisor privileged */
5461     }
5462 
5463     /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
5464     if (ctx->insns_flags & PPC_BOOKE) {
5465         gen_check_tlb_flush(ctx, true);
5466     }
5467 #endif /* defined(CONFIG_USER_ONLY) */
5468 }
5469 
5470 /***                              External control                         ***/
5471 /* Optional: */
5472 
5473 /* eciwx */
5474 static void gen_eciwx(DisasContext *ctx)
5475 {
5476     TCGv t0;
5477     /* Should check EAR[E] ! */
5478     gen_set_access_type(ctx, ACCESS_EXT);
5479     t0 = tcg_temp_new();
5480     gen_addr_reg_index(ctx, t0);
5481     tcg_gen_qemu_ld_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5482                        DEF_MEMOP(MO_UL | MO_ALIGN));
5483     tcg_temp_free(t0);
5484 }
5485 
5486 /* ecowx */
5487 static void gen_ecowx(DisasContext *ctx)
5488 {
5489     TCGv t0;
5490     /* Should check EAR[E] ! */
5491     gen_set_access_type(ctx, ACCESS_EXT);
5492     t0 = tcg_temp_new();
5493     gen_addr_reg_index(ctx, t0);
5494     tcg_gen_qemu_st_tl(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx,
5495                        DEF_MEMOP(MO_UL | MO_ALIGN));
5496     tcg_temp_free(t0);
5497 }
5498 
5499 /* 602 - 603 - G2 TLB management */
5500 
5501 /* tlbld */
5502 static void gen_tlbld_6xx(DisasContext *ctx)
5503 {
5504 #if defined(CONFIG_USER_ONLY)
5505     GEN_PRIV(ctx);
5506 #else
5507     CHK_SV(ctx);
5508     gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5509 #endif /* defined(CONFIG_USER_ONLY) */
5510 }
5511 
5512 /* tlbli */
5513 static void gen_tlbli_6xx(DisasContext *ctx)
5514 {
5515 #if defined(CONFIG_USER_ONLY)
5516     GEN_PRIV(ctx);
5517 #else
5518     CHK_SV(ctx);
5519     gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5520 #endif /* defined(CONFIG_USER_ONLY) */
5521 }
5522 
5523 /* BookE specific instructions */
5524 
5525 /* XXX: not implemented on 440 ? */
5526 static void gen_mfapidi(DisasContext *ctx)
5527 {
5528     /* XXX: TODO */
5529     gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5530 }
5531 
5532 /* XXX: not implemented on 440 ? */
5533 static void gen_tlbiva(DisasContext *ctx)
5534 {
5535 #if defined(CONFIG_USER_ONLY)
5536     GEN_PRIV(ctx);
5537 #else
5538     TCGv t0;
5539 
5540     CHK_SV(ctx);
5541     t0 = tcg_temp_new();
5542     gen_addr_reg_index(ctx, t0);
5543     gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
5544     tcg_temp_free(t0);
5545 #endif /* defined(CONFIG_USER_ONLY) */
5546 }
5547 
5548 /* All 405 MAC instructions are translated here */
5549 static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
5550                                         int ra, int rb, int rt, int Rc)
5551 {
5552     TCGv t0, t1;
5553 
5554     t0 = tcg_temp_local_new();
5555     t1 = tcg_temp_local_new();
5556 
5557     switch (opc3 & 0x0D) {
5558     case 0x05:
5559         /* macchw    - macchw.    - macchwo   - macchwo.   */
5560         /* macchws   - macchws.   - macchwso  - macchwso.  */
5561         /* nmacchw   - nmacchw.   - nmacchwo  - nmacchwo.  */
5562         /* nmacchws  - nmacchws.  - nmacchwso - nmacchwso. */
5563         /* mulchw - mulchw. */
5564         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5565         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5566         tcg_gen_ext16s_tl(t1, t1);
5567         break;
5568     case 0x04:
5569         /* macchwu   - macchwu.   - macchwuo  - macchwuo.  */
5570         /* macchwsu  - macchwsu.  - macchwsuo - macchwsuo. */
5571         /* mulchwu - mulchwu. */
5572         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5573         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5574         tcg_gen_ext16u_tl(t1, t1);
5575         break;
5576     case 0x01:
5577         /* machhw    - machhw.    - machhwo   - machhwo.   */
5578         /* machhws   - machhws.   - machhwso  - machhwso.  */
5579         /* nmachhw   - nmachhw.   - nmachhwo  - nmachhwo.  */
5580         /* nmachhws  - nmachhws.  - nmachhwso - nmachhwso. */
5581         /* mulhhw - mulhhw. */
5582         tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
5583         tcg_gen_ext16s_tl(t0, t0);
5584         tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
5585         tcg_gen_ext16s_tl(t1, t1);
5586         break;
5587     case 0x00:
5588         /* machhwu   - machhwu.   - machhwuo  - machhwuo.  */
5589         /* machhwsu  - machhwsu.  - machhwsuo - machhwsuo. */
5590         /* mulhhwu - mulhhwu. */
5591         tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
5592         tcg_gen_ext16u_tl(t0, t0);
5593         tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
5594         tcg_gen_ext16u_tl(t1, t1);
5595         break;
5596     case 0x0D:
5597         /* maclhw    - maclhw.    - maclhwo   - maclhwo.   */
5598         /* maclhws   - maclhws.   - maclhwso  - maclhwso.  */
5599         /* nmaclhw   - nmaclhw.   - nmaclhwo  - nmaclhwo.  */
5600         /* nmaclhws  - nmaclhws.  - nmaclhwso - nmaclhwso. */
5601         /* mullhw - mullhw. */
5602         tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
5603         tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
5604         break;
5605     case 0x0C:
5606         /* maclhwu   - maclhwu.   - maclhwuo  - maclhwuo.  */
5607         /* maclhwsu  - maclhwsu.  - maclhwsuo - maclhwsuo. */
5608         /* mullhwu - mullhwu. */
5609         tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
5610         tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
5611         break;
5612     }
5613     if (opc2 & 0x04) {
5614         /* (n)multiply-and-accumulate (0x0C / 0x0E) */
5615         tcg_gen_mul_tl(t1, t0, t1);
5616         if (opc2 & 0x02) {
5617             /* nmultiply-and-accumulate (0x0E) */
5618             tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
5619         } else {
5620             /* multiply-and-accumulate (0x0C) */
5621             tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
5622         }
5623 
5624         if (opc3 & 0x12) {
5625             /* Check overflow and/or saturate */
5626             TCGLabel *l1 = gen_new_label();
5627 
5628             if (opc3 & 0x10) {
5629                 /* Start with XER OV disabled, the most likely case */
5630                 tcg_gen_movi_tl(cpu_ov, 0);
5631             }
5632             if (opc3 & 0x01) {
5633                 /* Signed */
5634                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
5635                 tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
5636                 tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
5637                 tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
5638                 if (opc3 & 0x02) {
5639                     /* Saturate */
5640                     tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
5641                     tcg_gen_xori_tl(t0, t0, 0x7fffffff);
5642                 }
5643             } else {
5644                 /* Unsigned */
5645                 tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
5646                 if (opc3 & 0x02) {
5647                     /* Saturate */
5648                     tcg_gen_movi_tl(t0, UINT32_MAX);
5649                 }
5650             }
5651             if (opc3 & 0x10) {
5652                 /* Check overflow */
5653                 tcg_gen_movi_tl(cpu_ov, 1);
5654                 tcg_gen_movi_tl(cpu_so, 1);
5655             }
5656             gen_set_label(l1);
5657             tcg_gen_mov_tl(cpu_gpr[rt], t0);
5658         }
5659     } else {
5660         tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
5661     }
5662     tcg_temp_free(t0);
5663     tcg_temp_free(t1);
5664     if (unlikely(Rc) != 0) {
5665         /* Update Rc0 */
5666         gen_set_Rc0(ctx, cpu_gpr[rt]);
5667     }
5668 }
5669 
5670 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
5671 static void glue(gen_, name)(DisasContext *ctx)                               \
5672 {                                                                             \
5673     gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode),   \
5674                          rD(ctx->opcode), Rc(ctx->opcode));                   \
5675 }
5676 
5677 /* macchw    - macchw.    */
5678 GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
5679 /* macchwo   - macchwo.   */
5680 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
5681 /* macchws   - macchws.   */
5682 GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
5683 /* macchwso  - macchwso.  */
5684 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
5685 /* macchwsu  - macchwsu.  */
5686 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
5687 /* macchwsuo - macchwsuo. */
5688 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
5689 /* macchwu   - macchwu.   */
5690 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
5691 /* macchwuo  - macchwuo.  */
5692 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
5693 /* machhw    - machhw.    */
5694 GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
5695 /* machhwo   - machhwo.   */
5696 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
5697 /* machhws   - machhws.   */
5698 GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
5699 /* machhwso  - machhwso.  */
5700 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
5701 /* machhwsu  - machhwsu.  */
5702 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
5703 /* machhwsuo - machhwsuo. */
5704 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
5705 /* machhwu   - machhwu.   */
5706 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
5707 /* machhwuo  - machhwuo.  */
5708 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
5709 /* maclhw    - maclhw.    */
5710 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
5711 /* maclhwo   - maclhwo.   */
5712 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
5713 /* maclhws   - maclhws.   */
5714 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
5715 /* maclhwso  - maclhwso.  */
5716 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
5717 /* maclhwu   - maclhwu.   */
5718 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
5719 /* maclhwuo  - maclhwuo.  */
5720 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
5721 /* maclhwsu  - maclhwsu.  */
5722 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
5723 /* maclhwsuo - maclhwsuo. */
5724 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
5725 /* nmacchw   - nmacchw.   */
5726 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
5727 /* nmacchwo  - nmacchwo.  */
5728 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
5729 /* nmacchws  - nmacchws.  */
5730 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
5731 /* nmacchwso - nmacchwso. */
5732 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
5733 /* nmachhw   - nmachhw.   */
5734 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
5735 /* nmachhwo  - nmachhwo.  */
5736 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
5737 /* nmachhws  - nmachhws.  */
5738 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
5739 /* nmachhwso - nmachhwso. */
5740 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
5741 /* nmaclhw   - nmaclhw.   */
5742 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
5743 /* nmaclhwo  - nmaclhwo.  */
5744 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
5745 /* nmaclhws  - nmaclhws.  */
5746 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
5747 /* nmaclhwso - nmaclhwso. */
5748 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
5749 
5750 /* mulchw  - mulchw.  */
5751 GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
5752 /* mulchwu - mulchwu. */
5753 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
5754 /* mulhhw  - mulhhw.  */
5755 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
5756 /* mulhhwu - mulhhwu. */
5757 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
5758 /* mullhw  - mullhw.  */
5759 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
5760 /* mullhwu - mullhwu. */
5761 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
5762 
5763 /* mfdcr */
5764 static void gen_mfdcr(DisasContext *ctx)
5765 {
5766 #if defined(CONFIG_USER_ONLY)
5767     GEN_PRIV(ctx);
5768 #else
5769     TCGv dcrn;
5770 
5771     CHK_SV(ctx);
5772     dcrn = tcg_const_tl(SPR(ctx->opcode));
5773     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
5774     tcg_temp_free(dcrn);
5775 #endif /* defined(CONFIG_USER_ONLY) */
5776 }
5777 
5778 /* mtdcr */
5779 static void gen_mtdcr(DisasContext *ctx)
5780 {
5781 #if defined(CONFIG_USER_ONLY)
5782     GEN_PRIV(ctx);
5783 #else
5784     TCGv dcrn;
5785 
5786     CHK_SV(ctx);
5787     dcrn = tcg_const_tl(SPR(ctx->opcode));
5788     gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
5789     tcg_temp_free(dcrn);
5790 #endif /* defined(CONFIG_USER_ONLY) */
5791 }
5792 
5793 /* mfdcrx */
5794 /* XXX: not implemented on 440 ? */
5795 static void gen_mfdcrx(DisasContext *ctx)
5796 {
5797 #if defined(CONFIG_USER_ONLY)
5798     GEN_PRIV(ctx);
5799 #else
5800     CHK_SV(ctx);
5801     gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
5802                         cpu_gpr[rA(ctx->opcode)]);
5803     /* Note: Rc update flag set leads to undefined state of Rc0 */
5804 #endif /* defined(CONFIG_USER_ONLY) */
5805 }
5806 
5807 /* mtdcrx */
5808 /* XXX: not implemented on 440 ? */
5809 static void gen_mtdcrx(DisasContext *ctx)
5810 {
5811 #if defined(CONFIG_USER_ONLY)
5812     GEN_PRIV(ctx);
5813 #else
5814     CHK_SV(ctx);
5815     gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
5816                          cpu_gpr[rS(ctx->opcode)]);
5817     /* Note: Rc update flag set leads to undefined state of Rc0 */
5818 #endif /* defined(CONFIG_USER_ONLY) */
5819 }
5820 
5821 /* dccci */
5822 static void gen_dccci(DisasContext *ctx)
5823 {
5824     CHK_SV(ctx);
5825     /* interpreted as no-op */
5826 }
5827 
5828 /* dcread */
5829 static void gen_dcread(DisasContext *ctx)
5830 {
5831 #if defined(CONFIG_USER_ONLY)
5832     GEN_PRIV(ctx);
5833 #else
5834     TCGv EA, val;
5835 
5836     CHK_SV(ctx);
5837     gen_set_access_type(ctx, ACCESS_CACHE);
5838     EA = tcg_temp_new();
5839     gen_addr_reg_index(ctx, EA);
5840     val = tcg_temp_new();
5841     gen_qemu_ld32u(ctx, val, EA);
5842     tcg_temp_free(val);
5843     tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
5844     tcg_temp_free(EA);
5845 #endif /* defined(CONFIG_USER_ONLY) */
5846 }
5847 
5848 /* icbt */
5849 static void gen_icbt_40x(DisasContext *ctx)
5850 {
5851     /*
5852      * interpreted as no-op
5853      * XXX: specification say this is treated as a load by the MMU but
5854      *      does not generate any exception
5855      */
5856 }
5857 
5858 /* iccci */
5859 static void gen_iccci(DisasContext *ctx)
5860 {
5861     CHK_SV(ctx);
5862     /* interpreted as no-op */
5863 }
5864 
5865 /* icread */
5866 static void gen_icread(DisasContext *ctx)
5867 {
5868     CHK_SV(ctx);
5869     /* interpreted as no-op */
5870 }
5871 
5872 /* rfci (supervisor only) */
5873 static void gen_rfci_40x(DisasContext *ctx)
5874 {
5875 #if defined(CONFIG_USER_ONLY)
5876     GEN_PRIV(ctx);
5877 #else
5878     CHK_SV(ctx);
5879     /* Restore CPU state */
5880     gen_helper_40x_rfci(cpu_env);
5881     ctx->base.is_jmp = DISAS_EXIT;
5882 #endif /* defined(CONFIG_USER_ONLY) */
5883 }
5884 
5885 static void gen_rfci(DisasContext *ctx)
5886 {
5887 #if defined(CONFIG_USER_ONLY)
5888     GEN_PRIV(ctx);
5889 #else
5890     CHK_SV(ctx);
5891     /* Restore CPU state */
5892     gen_helper_rfci(cpu_env);
5893     ctx->base.is_jmp = DISAS_EXIT;
5894 #endif /* defined(CONFIG_USER_ONLY) */
5895 }
5896 
5897 /* BookE specific */
5898 
5899 /* XXX: not implemented on 440 ? */
5900 static void gen_rfdi(DisasContext *ctx)
5901 {
5902 #if defined(CONFIG_USER_ONLY)
5903     GEN_PRIV(ctx);
5904 #else
5905     CHK_SV(ctx);
5906     /* Restore CPU state */
5907     gen_helper_rfdi(cpu_env);
5908     ctx->base.is_jmp = DISAS_EXIT;
5909 #endif /* defined(CONFIG_USER_ONLY) */
5910 }
5911 
5912 /* XXX: not implemented on 440 ? */
5913 static void gen_rfmci(DisasContext *ctx)
5914 {
5915 #if defined(CONFIG_USER_ONLY)
5916     GEN_PRIV(ctx);
5917 #else
5918     CHK_SV(ctx);
5919     /* Restore CPU state */
5920     gen_helper_rfmci(cpu_env);
5921     ctx->base.is_jmp = DISAS_EXIT;
5922 #endif /* defined(CONFIG_USER_ONLY) */
5923 }
5924 
5925 /* TLB management - PowerPC 405 implementation */
5926 
5927 /* tlbre */
5928 static void gen_tlbre_40x(DisasContext *ctx)
5929 {
5930 #if defined(CONFIG_USER_ONLY)
5931     GEN_PRIV(ctx);
5932 #else
5933     CHK_SV(ctx);
5934     switch (rB(ctx->opcode)) {
5935     case 0:
5936         gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
5937                                 cpu_gpr[rA(ctx->opcode)]);
5938         break;
5939     case 1:
5940         gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
5941                                 cpu_gpr[rA(ctx->opcode)]);
5942         break;
5943     default:
5944         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5945         break;
5946     }
5947 #endif /* defined(CONFIG_USER_ONLY) */
5948 }
5949 
5950 /* tlbsx - tlbsx. */
5951 static void gen_tlbsx_40x(DisasContext *ctx)
5952 {
5953 #if defined(CONFIG_USER_ONLY)
5954     GEN_PRIV(ctx);
5955 #else
5956     TCGv t0;
5957 
5958     CHK_SV(ctx);
5959     t0 = tcg_temp_new();
5960     gen_addr_reg_index(ctx, t0);
5961     gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
5962     tcg_temp_free(t0);
5963     if (Rc(ctx->opcode)) {
5964         TCGLabel *l1 = gen_new_label();
5965         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
5966         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
5967         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
5968         gen_set_label(l1);
5969     }
5970 #endif /* defined(CONFIG_USER_ONLY) */
5971 }
5972 
5973 /* tlbwe */
5974 static void gen_tlbwe_40x(DisasContext *ctx)
5975 {
5976 #if defined(CONFIG_USER_ONLY)
5977     GEN_PRIV(ctx);
5978 #else
5979     CHK_SV(ctx);
5980 
5981     switch (rB(ctx->opcode)) {
5982     case 0:
5983         gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
5984                                 cpu_gpr[rS(ctx->opcode)]);
5985         break;
5986     case 1:
5987         gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
5988                                 cpu_gpr[rS(ctx->opcode)]);
5989         break;
5990     default:
5991         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
5992         break;
5993     }
5994 #endif /* defined(CONFIG_USER_ONLY) */
5995 }
5996 
5997 /* TLB management - PowerPC 440 implementation */
5998 
5999 /* tlbre */
6000 static void gen_tlbre_440(DisasContext *ctx)
6001 {
6002 #if defined(CONFIG_USER_ONLY)
6003     GEN_PRIV(ctx);
6004 #else
6005     CHK_SV(ctx);
6006 
6007     switch (rB(ctx->opcode)) {
6008     case 0:
6009     case 1:
6010     case 2:
6011         {
6012             TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
6013             gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
6014                                  t0, cpu_gpr[rA(ctx->opcode)]);
6015             tcg_temp_free_i32(t0);
6016         }
6017         break;
6018     default:
6019         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6020         break;
6021     }
6022 #endif /* defined(CONFIG_USER_ONLY) */
6023 }
6024 
6025 /* tlbsx - tlbsx. */
6026 static void gen_tlbsx_440(DisasContext *ctx)
6027 {
6028 #if defined(CONFIG_USER_ONLY)
6029     GEN_PRIV(ctx);
6030 #else
6031     TCGv t0;
6032 
6033     CHK_SV(ctx);
6034     t0 = tcg_temp_new();
6035     gen_addr_reg_index(ctx, t0);
6036     gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
6037     tcg_temp_free(t0);
6038     if (Rc(ctx->opcode)) {
6039         TCGLabel *l1 = gen_new_label();
6040         tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
6041         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
6042         tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
6043         gen_set_label(l1);
6044     }
6045 #endif /* defined(CONFIG_USER_ONLY) */
6046 }
6047 
6048 /* tlbwe */
6049 static void gen_tlbwe_440(DisasContext *ctx)
6050 {
6051 #if defined(CONFIG_USER_ONLY)
6052     GEN_PRIV(ctx);
6053 #else
6054     CHK_SV(ctx);
6055     switch (rB(ctx->opcode)) {
6056     case 0:
6057     case 1:
6058     case 2:
6059         {
6060             TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
6061             gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
6062                                  cpu_gpr[rS(ctx->opcode)]);
6063             tcg_temp_free_i32(t0);
6064         }
6065         break;
6066     default:
6067         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6068         break;
6069     }
6070 #endif /* defined(CONFIG_USER_ONLY) */
6071 }
6072 
6073 /* TLB management - PowerPC BookE 2.06 implementation */
6074 
6075 /* tlbre */
6076 static void gen_tlbre_booke206(DisasContext *ctx)
6077 {
6078  #if defined(CONFIG_USER_ONLY)
6079     GEN_PRIV(ctx);
6080 #else
6081    CHK_SV(ctx);
6082     gen_helper_booke206_tlbre(cpu_env);
6083 #endif /* defined(CONFIG_USER_ONLY) */
6084 }
6085 
6086 /* tlbsx - tlbsx. */
6087 static void gen_tlbsx_booke206(DisasContext *ctx)
6088 {
6089 #if defined(CONFIG_USER_ONLY)
6090     GEN_PRIV(ctx);
6091 #else
6092     TCGv t0;
6093 
6094     CHK_SV(ctx);
6095     if (rA(ctx->opcode)) {
6096         t0 = tcg_temp_new();
6097         tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
6098     } else {
6099         t0 = tcg_const_tl(0);
6100     }
6101 
6102     tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
6103     gen_helper_booke206_tlbsx(cpu_env, t0);
6104     tcg_temp_free(t0);
6105 #endif /* defined(CONFIG_USER_ONLY) */
6106 }
6107 
6108 /* tlbwe */
6109 static void gen_tlbwe_booke206(DisasContext *ctx)
6110 {
6111 #if defined(CONFIG_USER_ONLY)
6112     GEN_PRIV(ctx);
6113 #else
6114     CHK_SV(ctx);
6115     gen_helper_booke206_tlbwe(cpu_env);
6116 #endif /* defined(CONFIG_USER_ONLY) */
6117 }
6118 
6119 static void gen_tlbivax_booke206(DisasContext *ctx)
6120 {
6121 #if defined(CONFIG_USER_ONLY)
6122     GEN_PRIV(ctx);
6123 #else
6124     TCGv t0;
6125 
6126     CHK_SV(ctx);
6127     t0 = tcg_temp_new();
6128     gen_addr_reg_index(ctx, t0);
6129     gen_helper_booke206_tlbivax(cpu_env, t0);
6130     tcg_temp_free(t0);
6131 #endif /* defined(CONFIG_USER_ONLY) */
6132 }
6133 
6134 static void gen_tlbilx_booke206(DisasContext *ctx)
6135 {
6136 #if defined(CONFIG_USER_ONLY)
6137     GEN_PRIV(ctx);
6138 #else
6139     TCGv t0;
6140 
6141     CHK_SV(ctx);
6142     t0 = tcg_temp_new();
6143     gen_addr_reg_index(ctx, t0);
6144 
6145     switch ((ctx->opcode >> 21) & 0x3) {
6146     case 0:
6147         gen_helper_booke206_tlbilx0(cpu_env, t0);
6148         break;
6149     case 1:
6150         gen_helper_booke206_tlbilx1(cpu_env, t0);
6151         break;
6152     case 3:
6153         gen_helper_booke206_tlbilx3(cpu_env, t0);
6154         break;
6155     default:
6156         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6157         break;
6158     }
6159 
6160     tcg_temp_free(t0);
6161 #endif /* defined(CONFIG_USER_ONLY) */
6162 }
6163 
6164 
6165 /* wrtee */
6166 static void gen_wrtee(DisasContext *ctx)
6167 {
6168 #if defined(CONFIG_USER_ONLY)
6169     GEN_PRIV(ctx);
6170 #else
6171     TCGv t0;
6172 
6173     CHK_SV(ctx);
6174     t0 = tcg_temp_new();
6175     tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
6176     tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6177     tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
6178     tcg_temp_free(t0);
6179     /*
6180      * Stop translation to have a chance to raise an exception if we
6181      * just set msr_ee to 1
6182      */
6183     ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6184 #endif /* defined(CONFIG_USER_ONLY) */
6185 }
6186 
6187 /* wrteei */
6188 static void gen_wrteei(DisasContext *ctx)
6189 {
6190 #if defined(CONFIG_USER_ONLY)
6191     GEN_PRIV(ctx);
6192 #else
6193     CHK_SV(ctx);
6194     if (ctx->opcode & 0x00008000) {
6195         tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
6196         /* Stop translation to have a chance to raise an exception */
6197         ctx->base.is_jmp = DISAS_EXIT_UPDATE;
6198     } else {
6199         tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
6200     }
6201 #endif /* defined(CONFIG_USER_ONLY) */
6202 }
6203 
6204 /* PowerPC 440 specific instructions */
6205 
6206 /* dlmzb */
6207 static void gen_dlmzb(DisasContext *ctx)
6208 {
6209     TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
6210     gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
6211                      cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
6212     tcg_temp_free_i32(t0);
6213 }
6214 
6215 /* mbar replaces eieio on 440 */
6216 static void gen_mbar(DisasContext *ctx)
6217 {
6218     /* interpreted as no-op */
6219 }
6220 
6221 /* msync replaces sync on 440 */
6222 static void gen_msync_4xx(DisasContext *ctx)
6223 {
6224     /* Only e500 seems to treat reserved bits as invalid */
6225     if ((ctx->insns_flags2 & PPC2_BOOKE206) &&
6226         (ctx->opcode & 0x03FFF801)) {
6227         gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
6228     }
6229     /* otherwise interpreted as no-op */
6230 }
6231 
6232 /* icbt */
6233 static void gen_icbt_440(DisasContext *ctx)
6234 {
6235     /*
6236      * interpreted as no-op
6237      * XXX: specification say this is treated as a load by the MMU but
6238      *      does not generate any exception
6239      */
6240 }
6241 
6242 /* Embedded.Processor Control */
6243 
6244 static void gen_msgclr(DisasContext *ctx)
6245 {
6246 #if defined(CONFIG_USER_ONLY)
6247     GEN_PRIV(ctx);
6248 #else
6249     CHK_HV(ctx);
6250     if (is_book3s_arch2x(ctx)) {
6251         gen_helper_book3s_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
6252     } else {
6253         gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
6254     }
6255 #endif /* defined(CONFIG_USER_ONLY) */
6256 }
6257 
6258 static void gen_msgsnd(DisasContext *ctx)
6259 {
6260 #if defined(CONFIG_USER_ONLY)
6261     GEN_PRIV(ctx);
6262 #else
6263     CHK_HV(ctx);
6264     if (is_book3s_arch2x(ctx)) {
6265         gen_helper_book3s_msgsnd(cpu_gpr[rB(ctx->opcode)]);
6266     } else {
6267         gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]);
6268     }
6269 #endif /* defined(CONFIG_USER_ONLY) */
6270 }
6271 
6272 #if defined(TARGET_PPC64)
6273 static void gen_msgclrp(DisasContext *ctx)
6274 {
6275 #if defined(CONFIG_USER_ONLY)
6276     GEN_PRIV(ctx);
6277 #else
6278     CHK_SV(ctx);
6279     gen_helper_book3s_msgclrp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
6280 #endif /* defined(CONFIG_USER_ONLY) */
6281 }
6282 
6283 static void gen_msgsndp(DisasContext *ctx)
6284 {
6285 #if defined(CONFIG_USER_ONLY)
6286     GEN_PRIV(ctx);
6287 #else
6288     CHK_SV(ctx);
6289     gen_helper_book3s_msgsndp(cpu_env, cpu_gpr[rB(ctx->opcode)]);
6290 #endif /* defined(CONFIG_USER_ONLY) */
6291 }
6292 #endif
6293 
6294 static void gen_msgsync(DisasContext *ctx)
6295 {
6296 #if defined(CONFIG_USER_ONLY)
6297     GEN_PRIV(ctx);
6298 #else
6299     CHK_HV(ctx);
6300 #endif /* defined(CONFIG_USER_ONLY) */
6301     /* interpreted as no-op */
6302 }
6303 
6304 #if defined(TARGET_PPC64)
6305 static void gen_maddld(DisasContext *ctx)
6306 {
6307     TCGv_i64 t1 = tcg_temp_new_i64();
6308 
6309     tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
6310     tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
6311     tcg_temp_free_i64(t1);
6312 }
6313 
6314 /* maddhd maddhdu */
6315 static void gen_maddhd_maddhdu(DisasContext *ctx)
6316 {
6317     TCGv_i64 lo = tcg_temp_new_i64();
6318     TCGv_i64 hi = tcg_temp_new_i64();
6319     TCGv_i64 t1 = tcg_temp_new_i64();
6320 
6321     if (Rc(ctx->opcode)) {
6322         tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6323                           cpu_gpr[rB(ctx->opcode)]);
6324         tcg_gen_movi_i64(t1, 0);
6325     } else {
6326         tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
6327                           cpu_gpr[rB(ctx->opcode)]);
6328         tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
6329     }
6330     tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
6331                      cpu_gpr[rC(ctx->opcode)], t1);
6332     tcg_temp_free_i64(lo);
6333     tcg_temp_free_i64(hi);
6334     tcg_temp_free_i64(t1);
6335 }
6336 #endif /* defined(TARGET_PPC64) */
6337 
6338 static void gen_tbegin(DisasContext *ctx)
6339 {
6340     if (unlikely(!ctx->tm_enabled)) {
6341         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6342         return;
6343     }
6344     gen_helper_tbegin(cpu_env);
6345 }
6346 
6347 #define GEN_TM_NOOP(name)                                      \
6348 static inline void gen_##name(DisasContext *ctx)               \
6349 {                                                              \
6350     if (unlikely(!ctx->tm_enabled)) {                          \
6351         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6352         return;                                                \
6353     }                                                          \
6354     /*                                                         \
6355      * Because tbegin always fails in QEMU, these user         \
6356      * space instructions all have a simple implementation:    \
6357      *                                                         \
6358      *     CR[0] = 0b0 || MSR[TS] || 0b0                       \
6359      *           = 0b0 || 0b00    || 0b0                       \
6360      */                                                        \
6361     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6362 }
6363 
6364 GEN_TM_NOOP(tend);
6365 GEN_TM_NOOP(tabort);
6366 GEN_TM_NOOP(tabortwc);
6367 GEN_TM_NOOP(tabortwci);
6368 GEN_TM_NOOP(tabortdc);
6369 GEN_TM_NOOP(tabortdci);
6370 GEN_TM_NOOP(tsr);
6371 
6372 static inline void gen_cp_abort(DisasContext *ctx)
6373 {
6374     /* Do Nothing */
6375 }
6376 
6377 #define GEN_CP_PASTE_NOOP(name)                           \
6378 static inline void gen_##name(DisasContext *ctx)          \
6379 {                                                         \
6380     /*                                                    \
6381      * Generate invalid exception until we have an        \
6382      * implementation of the copy paste facility          \
6383      */                                                   \
6384     gen_invalid(ctx);                                     \
6385 }
6386 
6387 GEN_CP_PASTE_NOOP(copy)
6388 GEN_CP_PASTE_NOOP(paste)
6389 
6390 static void gen_tcheck(DisasContext *ctx)
6391 {
6392     if (unlikely(!ctx->tm_enabled)) {
6393         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
6394         return;
6395     }
6396     /*
6397      * Because tbegin always fails, the tcheck implementation is
6398      * simple:
6399      *
6400      * CR[CRF] = TDOOMED || MSR[TS] || 0b0
6401      *         = 0b1 || 0b00 || 0b0
6402      */
6403     tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
6404 }
6405 
6406 #if defined(CONFIG_USER_ONLY)
6407 #define GEN_TM_PRIV_NOOP(name)                                 \
6408 static inline void gen_##name(DisasContext *ctx)               \
6409 {                                                              \
6410     gen_priv_opc(ctx);                                         \
6411 }
6412 
6413 #else
6414 
6415 #define GEN_TM_PRIV_NOOP(name)                                 \
6416 static inline void gen_##name(DisasContext *ctx)               \
6417 {                                                              \
6418     CHK_SV(ctx);                                               \
6419     if (unlikely(!ctx->tm_enabled)) {                          \
6420         gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);   \
6421         return;                                                \
6422     }                                                          \
6423     /*                                                         \
6424      * Because tbegin always fails, the implementation is      \
6425      * simple:                                                 \
6426      *                                                         \
6427      *   CR[0] = 0b0 || MSR[TS] || 0b0                         \
6428      *         = 0b0 || 0b00 | 0b0                             \
6429      */                                                        \
6430     tcg_gen_movi_i32(cpu_crf[0], 0);                           \
6431 }
6432 
6433 #endif
6434 
6435 GEN_TM_PRIV_NOOP(treclaim);
6436 GEN_TM_PRIV_NOOP(trechkpt);
6437 
6438 static inline void get_fpr(TCGv_i64 dst, int regno)
6439 {
6440     tcg_gen_ld_i64(dst, cpu_env, fpr_offset(regno));
6441 }
6442 
6443 static inline void set_fpr(int regno, TCGv_i64 src)
6444 {
6445     tcg_gen_st_i64(src, cpu_env, fpr_offset(regno));
6446     /*
6447      * Before PowerISA v3.1 the result of doubleword 1 of the VSR
6448      * corresponding to the target FPR was undefined. However,
6449      * most (if not all) real hardware were setting the result to 0.
6450      * Starting at ISA v3.1, the result for doubleword 1 is now defined
6451      * to be 0.
6452      */
6453     tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false));
6454 }
6455 
6456 static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
6457 {
6458     tcg_gen_ld_i64(dst, cpu_env, avr64_offset(regno, high));
6459 }
6460 
6461 static inline void set_avr64(int regno, TCGv_i64 src, bool high)
6462 {
6463     tcg_gen_st_i64(src, cpu_env, avr64_offset(regno, high));
6464 }
6465 
6466 /*
6467  * Helpers for decodetree used by !function for decoding arguments.
6468  */
6469 static int times_2(DisasContext *ctx, int x)
6470 {
6471     return x * 2;
6472 }
6473 
6474 static int times_4(DisasContext *ctx, int x)
6475 {
6476     return x * 4;
6477 }
6478 
6479 static int times_16(DisasContext *ctx, int x)
6480 {
6481     return x * 16;
6482 }
6483 
6484 static int64_t dw_compose_ea(DisasContext *ctx, int x)
6485 {
6486     return deposit64(0xfffffffffffffe00, 3, 6, x);
6487 }
6488 
6489 /*
6490  * Helpers for trans_* functions to check for specific insns flags.
6491  * Use token pasting to ensure that we use the proper flag with the
6492  * proper variable.
6493  */
6494 #define REQUIRE_INSNS_FLAGS(CTX, NAME) \
6495     do {                                                \
6496         if (((CTX)->insns_flags & PPC_##NAME) == 0) {   \
6497             return false;                               \
6498         }                                               \
6499     } while (0)
6500 
6501 #define REQUIRE_INSNS_FLAGS2(CTX, NAME) \
6502     do {                                                \
6503         if (((CTX)->insns_flags2 & PPC2_##NAME) == 0) { \
6504             return false;                               \
6505         }                                               \
6506     } while (0)
6507 
6508 /* Then special-case the check for 64-bit so that we elide code for ppc32. */
6509 #if TARGET_LONG_BITS == 32
6510 # define REQUIRE_64BIT(CTX)  return false
6511 #else
6512 # define REQUIRE_64BIT(CTX)  REQUIRE_INSNS_FLAGS(CTX, 64B)
6513 #endif
6514 
6515 #define REQUIRE_VECTOR(CTX)                             \
6516     do {                                                \
6517         if (unlikely(!(CTX)->altivec_enabled)) {        \
6518             gen_exception((CTX), POWERPC_EXCP_VPU);     \
6519             return true;                                \
6520         }                                               \
6521     } while (0)
6522 
6523 #define REQUIRE_VSX(CTX)                                \
6524     do {                                                \
6525         if (unlikely(!(CTX)->vsx_enabled)) {            \
6526             gen_exception((CTX), POWERPC_EXCP_VSXU);    \
6527             return true;                                \
6528         }                                               \
6529     } while (0)
6530 
6531 #define REQUIRE_FPU(ctx)                                \
6532     do {                                                \
6533         if (unlikely(!(ctx)->fpu_enabled)) {            \
6534             gen_exception((ctx), POWERPC_EXCP_FPU);     \
6535             return true;                                \
6536         }                                               \
6537     } while (0)
6538 
6539 #if !defined(CONFIG_USER_ONLY)
6540 #define REQUIRE_SV(CTX)             \
6541     do {                            \
6542         if (unlikely((CTX)->pr)) {  \
6543             gen_priv_opc(CTX);      \
6544             return true;            \
6545         }                           \
6546     } while (0)
6547 
6548 #define REQUIRE_HV(CTX)                         \
6549     do {                                        \
6550         if (unlikely((CTX)->pr || !(CTX)->hv))  \
6551             gen_priv_opc(CTX);                  \
6552             return true;                        \
6553         }                                       \
6554     } while (0)
6555 #else
6556 #define REQUIRE_SV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6557 #define REQUIRE_HV(CTX) do { gen_priv_opc(CTX); return true; } while (0)
6558 #endif
6559 
6560 /*
6561  * Helpers for implementing sets of trans_* functions.
6562  * Defer the implementation of NAME to FUNC, with optional extra arguments.
6563  */
6564 #define TRANS(NAME, FUNC, ...) \
6565     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6566     { return FUNC(ctx, a, __VA_ARGS__); }
6567 #define TRANS_FLAGS(FLAGS, NAME, FUNC, ...) \
6568     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6569     {                                                          \
6570         REQUIRE_INSNS_FLAGS(ctx, FLAGS);                       \
6571         return FUNC(ctx, a, __VA_ARGS__);                      \
6572     }
6573 #define TRANS_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6574     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6575     {                                                          \
6576         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6577         return FUNC(ctx, a, __VA_ARGS__);                      \
6578     }
6579 
6580 #define TRANS64(NAME, FUNC, ...) \
6581     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6582     { REQUIRE_64BIT(ctx); return FUNC(ctx, a, __VA_ARGS__); }
6583 #define TRANS64_FLAGS2(FLAGS2, NAME, FUNC, ...) \
6584     static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \
6585     {                                                          \
6586         REQUIRE_64BIT(ctx);                                    \
6587         REQUIRE_INSNS_FLAGS2(ctx, FLAGS2);                     \
6588         return FUNC(ctx, a, __VA_ARGS__);                      \
6589     }
6590 
6591 /* TODO: More TRANS* helpers for extra insn_flags checks. */
6592 
6593 
6594 #include "decode-insn32.c.inc"
6595 #include "decode-insn64.c.inc"
6596 #include "power8-pmu-regs.c.inc"
6597 
6598 /*
6599  * Incorporate CIA into the constant when R=1.
6600  * Validate that when R=1, RA=0.
6601  */
6602 static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a)
6603 {
6604     d->rt = a->rt;
6605     d->ra = a->ra;
6606     d->si = a->si;
6607     if (a->r) {
6608         if (unlikely(a->ra != 0)) {
6609             gen_invalid(ctx);
6610             return false;
6611         }
6612         d->si += ctx->cia;
6613     }
6614     return true;
6615 }
6616 
6617 #include "translate/fixedpoint-impl.c.inc"
6618 
6619 #include "translate/fp-impl.c.inc"
6620 
6621 #include "translate/vmx-impl.c.inc"
6622 
6623 #include "translate/vsx-impl.c.inc"
6624 
6625 #include "translate/dfp-impl.c.inc"
6626 
6627 #include "translate/spe-impl.c.inc"
6628 
6629 #include "translate/branch-impl.c.inc"
6630 
6631 #include "translate/storage-ctrl-impl.c.inc"
6632 
6633 /* Handles lfdp */
6634 static void gen_dform39(DisasContext *ctx)
6635 {
6636     if ((ctx->opcode & 0x3) == 0) {
6637         if (ctx->insns_flags2 & PPC2_ISA205) {
6638             return gen_lfdp(ctx);
6639         }
6640     }
6641     return gen_invalid(ctx);
6642 }
6643 
6644 /* Handles stfdp */
6645 static void gen_dform3D(DisasContext *ctx)
6646 {
6647     if ((ctx->opcode & 3) == 0) { /* DS-FORM */
6648         /* stfdp */
6649         if (ctx->insns_flags2 & PPC2_ISA205) {
6650             return gen_stfdp(ctx);
6651         }
6652     }
6653     return gen_invalid(ctx);
6654 }
6655 
6656 #if defined(TARGET_PPC64)
6657 /* brd */
6658 static void gen_brd(DisasContext *ctx)
6659 {
6660     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6661 }
6662 
6663 /* brw */
6664 static void gen_brw(DisasContext *ctx)
6665 {
6666     tcg_gen_bswap64_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
6667     tcg_gen_rotli_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32);
6668 
6669 }
6670 
6671 /* brh */
6672 static void gen_brh(DisasContext *ctx)
6673 {
6674     TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull);
6675     TCGv_i64 t1 = tcg_temp_new_i64();
6676     TCGv_i64 t2 = tcg_temp_new_i64();
6677 
6678     tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8);
6679     tcg_gen_and_i64(t2, t1, mask);
6680     tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask);
6681     tcg_gen_shli_i64(t1, t1, 8);
6682     tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2);
6683 
6684     tcg_temp_free_i64(t1);
6685     tcg_temp_free_i64(t2);
6686 }
6687 #endif
6688 
6689 static opcode_t opcodes[] = {
6690 #if defined(TARGET_PPC64)
6691 GEN_HANDLER_E(brd, 0x1F, 0x1B, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA310),
6692 GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310),
6693 GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310),
6694 #endif
6695 GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
6696 #if defined(TARGET_PPC64)
6697 GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
6698 #endif
6699 GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
6700 GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
6701 GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
6702 GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6703 GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6704 GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
6705 GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
6706 GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
6707 GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
6708 GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6709 #if defined(TARGET_PPC64)
6710 GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
6711 #endif
6712 GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
6713 GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
6714 GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6715 GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6716 GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6717 GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
6718 GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
6719 GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300),
6720 GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6721 GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300),
6722 GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
6723 GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
6724 GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6725 GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6726 GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6727 GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6728 GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
6729 GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
6730 GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
6731 #if defined(TARGET_PPC64)
6732 GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
6733 GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
6734 GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
6735 GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
6736 GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
6737 GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
6738 #endif
6739 GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6740 GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6741 GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6742 GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
6743 GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
6744 GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
6745 GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
6746 #if defined(TARGET_PPC64)
6747 GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
6748 GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
6749 GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
6750 GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
6751 GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
6752 GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
6753                PPC_NONE, PPC2_ISA300),
6754 GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
6755                PPC_NONE, PPC2_ISA300),
6756 #endif
6757 /* handles lfdp, lxsd, lxssp */
6758 GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6759 /* handles stfdp, stxsd, stxssp */
6760 GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205),
6761 GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6762 GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
6763 GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
6764 GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
6765 GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
6766 GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
6767 GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO),
6768 GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
6769 GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6770 GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6771 GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
6772 GEN_HANDLER_E(lwat, 0x1F, 0x06, 0x12, 0x00000001, PPC_NONE, PPC2_ISA300),
6773 GEN_HANDLER_E(stwat, 0x1F, 0x06, 0x16, 0x00000001, PPC_NONE, PPC2_ISA300),
6774 GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6775 GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
6776 GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
6777 #if defined(TARGET_PPC64)
6778 GEN_HANDLER_E(ldat, 0x1F, 0x06, 0x13, 0x00000001, PPC_NONE, PPC2_ISA300),
6779 GEN_HANDLER_E(stdat, 0x1F, 0x06, 0x17, 0x00000001, PPC_NONE, PPC2_ISA300),
6780 GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
6781 GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
6782 GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
6783 GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
6784 #endif
6785 GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
6786 /* ISA v3.0 changed the extended opcode from 62 to 30 */
6787 GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT),
6788 GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300),
6789 GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6790 GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6791 GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
6792 GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
6793 GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
6794 GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
6795 GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
6796 #if defined(TARGET_PPC64)
6797 GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
6798 #if !defined(CONFIG_USER_ONLY)
6799 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6800 GEN_HANDLER_E(scv, 0x11, 0x10, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6801 GEN_HANDLER_E(scv, 0x11, 0x00, 0xFF, 0x03FFF01E, PPC_NONE, PPC2_ISA300),
6802 GEN_HANDLER_E(rfscv, 0x13, 0x12, 0x02, 0x03FF8001, PPC_NONE, PPC2_ISA300),
6803 #endif
6804 GEN_HANDLER_E(stop, 0x13, 0x12, 0x0b, 0x03FFF801, PPC_NONE, PPC2_ISA300),
6805 GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6806 GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6807 GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6808 GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
6809 GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
6810 #endif
6811 /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */
6812 GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW),
6813 GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW),
6814 GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
6815 GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
6816 #if defined(TARGET_PPC64)
6817 GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
6818 GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
6819 #endif
6820 GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
6821 GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
6822 GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
6823 GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
6824 GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
6825 GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
6826 #if defined(TARGET_PPC64)
6827 GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
6828 GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
6829 GEN_HANDLER_E(mcrxrx, 0x1F, 0x00, 0x12, 0x007FF801, PPC_NONE, PPC2_ISA300),
6830 #endif
6831 GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
6832 GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
6833 GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
6834 GEN_HANDLER_E(dcbfep, 0x1F, 0x1F, 0x03, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6835 GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
6836 GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
6837 GEN_HANDLER_E(dcbstep, 0x1F, 0x1F, 0x01, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6838 GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
6839 GEN_HANDLER_E(dcbtep, 0x1F, 0x1F, 0x09, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6840 GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
6841 GEN_HANDLER_E(dcbtstep, 0x1F, 0x1F, 0x07, 0x00000001, PPC_NONE, PPC2_BOOKE206),
6842 GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
6843 GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
6844 GEN_HANDLER_E(dcbzep, 0x1F, 0x1F, 0x1F, 0x03C00001, PPC_NONE, PPC2_BOOKE206),
6845 GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
6846 GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x01800001, PPC_ALTIVEC),
6847 GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
6848 GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
6849 GEN_HANDLER_E(icbiep, 0x1F, 0x1F, 0x1E, 0x03E00001, PPC_NONE, PPC2_BOOKE206),
6850 GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
6851 GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
6852 GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
6853 GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
6854 GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
6855 #if defined(TARGET_PPC64)
6856 GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
6857 GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
6858              PPC_SEGMENT_64B),
6859 GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
6860 GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
6861              PPC_SEGMENT_64B),
6862 #endif
6863 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
6864 /*
6865  * XXX Those instructions will need to be handled differently for
6866  * different ISA versions
6867  */
6868 GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
6869 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
6870 GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
6871 GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
6872 GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
6873 GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
6874 GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
6875 GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
6876 GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
6877 GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
6878 GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
6879 GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
6880 GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
6881 GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
6882 GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
6883 GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
6884 GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
6885 GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
6886 GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
6887 GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
6888 GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
6889 GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
6890 GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
6891 GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
6892 GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
6893 GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
6894 GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
6895                PPC_NONE, PPC2_BOOKE206),
6896 GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
6897                PPC_NONE, PPC2_BOOKE206),
6898 GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
6899                PPC_NONE, PPC2_BOOKE206),
6900 GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
6901                PPC_NONE, PPC2_BOOKE206),
6902 GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
6903                PPC_NONE, PPC2_BOOKE206),
6904 GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001,
6905                PPC_NONE, PPC2_PRCNTL),
6906 GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001,
6907                PPC_NONE, PPC2_PRCNTL),
6908 GEN_HANDLER2_E(msgsync, "msgsync", 0x1F, 0x16, 0x1B, 0x00000000,
6909                PPC_NONE, PPC2_PRCNTL),
6910 GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
6911 GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
6912 GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
6913 GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
6914               PPC_BOOKE, PPC2_BOOKE206),
6915 GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE),
6916 GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
6917                PPC_BOOKE, PPC2_BOOKE206),
6918 GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001,
6919              PPC_440_SPEC),
6920 GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
6921 GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
6922 GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
6923 GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
6924 GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
6925 #if defined(TARGET_PPC64)
6926 GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
6927               PPC2_ISA300),
6928 GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
6929 GEN_HANDLER2_E(msgsndp, "msgsndp", 0x1F, 0x0E, 0x04, 0x03ff0001,
6930                PPC_NONE, PPC2_ISA207S),
6931 GEN_HANDLER2_E(msgclrp, "msgclrp", 0x1F, 0x0E, 0x05, 0x03ff0001,
6932                PPC_NONE, PPC2_ISA207S),
6933 #endif
6934 
6935 #undef GEN_INT_ARITH_ADD
6936 #undef GEN_INT_ARITH_ADD_CONST
6937 #define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov)         \
6938 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
6939 #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val,                        \
6940                                 add_ca, compute_ca, compute_ov)               \
6941 GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
6942 GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
6943 GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
6944 GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
6945 GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
6946 GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
6947 GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
6948 GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
6949 GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
6950 GEN_HANDLER_E(addex, 0x1F, 0x0A, 0x05, 0x00000000, PPC_NONE, PPC2_ISA300),
6951 GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
6952 GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
6953 
6954 #undef GEN_INT_ARITH_DIVW
6955 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov)                      \
6956 GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
6957 GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
6958 GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
6959 GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
6960 GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
6961 GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6962 GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6963 GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6964 GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6965 GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6966 GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6967 
6968 #if defined(TARGET_PPC64)
6969 #undef GEN_INT_ARITH_DIVD
6970 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov)                      \
6971 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6972 GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
6973 GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
6974 GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
6975 GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
6976 
6977 GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6978 GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
6979 GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6980 GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
6981 GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
6982 GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
6983 
6984 #undef GEN_INT_ARITH_MUL_HELPER
6985 #define GEN_INT_ARITH_MUL_HELPER(name, opc3)                                  \
6986 GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
6987 GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
6988 GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
6989 GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
6990 #endif
6991 
6992 #undef GEN_INT_ARITH_SUBF
6993 #undef GEN_INT_ARITH_SUBF_CONST
6994 #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov)        \
6995 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
6996 #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val,                       \
6997                                 add_ca, compute_ca, compute_ov)               \
6998 GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
6999 GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
7000 GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
7001 GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
7002 GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
7003 GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
7004 GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
7005 GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
7006 GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
7007 GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
7008 GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
7009 
7010 #undef GEN_LOGICAL1
7011 #undef GEN_LOGICAL2
7012 #define GEN_LOGICAL2(name, tcg_op, opc, type)                                 \
7013 GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
7014 #define GEN_LOGICAL1(name, tcg_op, opc, type)                                 \
7015 GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
7016 GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
7017 GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
7018 GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
7019 GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
7020 GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
7021 GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
7022 GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
7023 GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
7024 #if defined(TARGET_PPC64)
7025 GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
7026 #endif
7027 
7028 #if defined(TARGET_PPC64)
7029 #undef GEN_PPC64_R2
7030 #undef GEN_PPC64_R4
7031 #define GEN_PPC64_R2(name, opc1, opc2)                                        \
7032 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
7033 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
7034              PPC_64B)
7035 #define GEN_PPC64_R4(name, opc1, opc2)                                        \
7036 GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
7037 GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000,   \
7038              PPC_64B),                                                        \
7039 GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000,   \
7040              PPC_64B),                                                        \
7041 GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000,   \
7042              PPC_64B)
7043 GEN_PPC64_R4(rldicl, 0x1E, 0x00),
7044 GEN_PPC64_R4(rldicr, 0x1E, 0x02),
7045 GEN_PPC64_R4(rldic, 0x1E, 0x04),
7046 GEN_PPC64_R2(rldcl, 0x1E, 0x08),
7047 GEN_PPC64_R2(rldcr, 0x1E, 0x09),
7048 GEN_PPC64_R4(rldimi, 0x1E, 0x06),
7049 #endif
7050 
7051 #undef GEN_LDX_E
7052 #define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk)                   \
7053 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
7054 
7055 #if defined(TARGET_PPC64)
7056 GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
7057 
7058 /* HV/P7 and later only */
7059 GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
7060 GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
7061 GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
7062 GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
7063 #endif
7064 GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
7065 GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
7066 
7067 /* External PID based load */
7068 #undef GEN_LDEPX
7069 #define GEN_LDEPX(name, ldop, opc2, opc3)                                     \
7070 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
7071               0x00000001, PPC_NONE, PPC2_BOOKE206),
7072 
7073 GEN_LDEPX(lb, DEF_MEMOP(MO_UB), 0x1F, 0x02)
7074 GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
7075 GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
7076 #if defined(TARGET_PPC64)
7077 GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
7078 #endif
7079 
7080 #undef GEN_STX_E
7081 #define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk)                   \
7082 GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000000, type, type2),
7083 
7084 #if defined(TARGET_PPC64)
7085 GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
7086 GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
7087 GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
7088 GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
7089 GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
7090 #endif
7091 GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
7092 GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
7093 
7094 #undef GEN_STEPX
7095 #define GEN_STEPX(name, ldop, opc2, opc3)                                     \
7096 GEN_HANDLER_E(name##epx, 0x1F, opc2, opc3,                                    \
7097               0x00000001, PPC_NONE, PPC2_BOOKE206),
7098 
7099 GEN_STEPX(stb, DEF_MEMOP(MO_UB), 0x1F, 0x06)
7100 GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
7101 GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
7102 #if defined(TARGET_PPC64)
7103 GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
7104 #endif
7105 
7106 #undef GEN_CRLOGIC
7107 #define GEN_CRLOGIC(name, tcg_op, opc)                                        \
7108 GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
7109 GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
7110 GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
7111 GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
7112 GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
7113 GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
7114 GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
7115 GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
7116 GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
7117 
7118 #undef GEN_MAC_HANDLER
7119 #define GEN_MAC_HANDLER(name, opc2, opc3)                                     \
7120 GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
7121 GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
7122 GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
7123 GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
7124 GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
7125 GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
7126 GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
7127 GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
7128 GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
7129 GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
7130 GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
7131 GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
7132 GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
7133 GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
7134 GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
7135 GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
7136 GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
7137 GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
7138 GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
7139 GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
7140 GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
7141 GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
7142 GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
7143 GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
7144 GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
7145 GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
7146 GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
7147 GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
7148 GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
7149 GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
7150 GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
7151 GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
7152 GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
7153 GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
7154 GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
7155 GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
7156 GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
7157 GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
7158 GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
7159 GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
7160 GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
7161 GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
7162 GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
7163 
7164 GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
7165                PPC_NONE, PPC2_TM),
7166 GEN_HANDLER2_E(tend,   "tend",   0x1F, 0x0E, 0x15, 0x01FFF800, \
7167                PPC_NONE, PPC2_TM),
7168 GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
7169                PPC_NONE, PPC2_TM),
7170 GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
7171                PPC_NONE, PPC2_TM),
7172 GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
7173                PPC_NONE, PPC2_TM),
7174 GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
7175                PPC_NONE, PPC2_TM),
7176 GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
7177                PPC_NONE, PPC2_TM),
7178 GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
7179                PPC_NONE, PPC2_TM),
7180 GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
7181                PPC_NONE, PPC2_TM),
7182 GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
7183                PPC_NONE, PPC2_TM),
7184 GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
7185                PPC_NONE, PPC2_TM),
7186 
7187 #include "translate/fp-ops.c.inc"
7188 
7189 #include "translate/vmx-ops.c.inc"
7190 
7191 #include "translate/vsx-ops.c.inc"
7192 
7193 #include "translate/spe-ops.c.inc"
7194 };
7195 
7196 /*****************************************************************************/
7197 /* Opcode types */
7198 enum {
7199     PPC_DIRECT   = 0, /* Opcode routine        */
7200     PPC_INDIRECT = 1, /* Indirect opcode table */
7201 };
7202 
7203 #define PPC_OPCODE_MASK 0x3
7204 
7205 static inline int is_indirect_opcode(void *handler)
7206 {
7207     return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
7208 }
7209 
7210 static inline opc_handler_t **ind_table(void *handler)
7211 {
7212     return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
7213 }
7214 
7215 /* Instruction table creation */
7216 /* Opcodes tables creation */
7217 static void fill_new_table(opc_handler_t **table, int len)
7218 {
7219     int i;
7220 
7221     for (i = 0; i < len; i++) {
7222         table[i] = &invalid_handler;
7223     }
7224 }
7225 
7226 static int create_new_table(opc_handler_t **table, unsigned char idx)
7227 {
7228     opc_handler_t **tmp;
7229 
7230     tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
7231     fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
7232     table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
7233 
7234     return 0;
7235 }
7236 
7237 static int insert_in_table(opc_handler_t **table, unsigned char idx,
7238                             opc_handler_t *handler)
7239 {
7240     if (table[idx] != &invalid_handler) {
7241         return -1;
7242     }
7243     table[idx] = handler;
7244 
7245     return 0;
7246 }
7247 
7248 static int register_direct_insn(opc_handler_t **ppc_opcodes,
7249                                 unsigned char idx, opc_handler_t *handler)
7250 {
7251     if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
7252         printf("*** ERROR: opcode %02x already assigned in main "
7253                "opcode table\n", idx);
7254         return -1;
7255     }
7256 
7257     return 0;
7258 }
7259 
7260 static int register_ind_in_table(opc_handler_t **table,
7261                                  unsigned char idx1, unsigned char idx2,
7262                                  opc_handler_t *handler)
7263 {
7264     if (table[idx1] == &invalid_handler) {
7265         if (create_new_table(table, idx1) < 0) {
7266             printf("*** ERROR: unable to create indirect table "
7267                    "idx=%02x\n", idx1);
7268             return -1;
7269         }
7270     } else {
7271         if (!is_indirect_opcode(table[idx1])) {
7272             printf("*** ERROR: idx %02x already assigned to a direct "
7273                    "opcode\n", idx1);
7274             return -1;
7275         }
7276     }
7277     if (handler != NULL &&
7278         insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
7279         printf("*** ERROR: opcode %02x already assigned in "
7280                "opcode table %02x\n", idx2, idx1);
7281         return -1;
7282     }
7283 
7284     return 0;
7285 }
7286 
7287 static int register_ind_insn(opc_handler_t **ppc_opcodes,
7288                              unsigned char idx1, unsigned char idx2,
7289                              opc_handler_t *handler)
7290 {
7291     return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
7292 }
7293 
7294 static int register_dblind_insn(opc_handler_t **ppc_opcodes,
7295                                 unsigned char idx1, unsigned char idx2,
7296                                 unsigned char idx3, opc_handler_t *handler)
7297 {
7298     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7299         printf("*** ERROR: unable to join indirect table idx "
7300                "[%02x-%02x]\n", idx1, idx2);
7301         return -1;
7302     }
7303     if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
7304                               handler) < 0) {
7305         printf("*** ERROR: unable to insert opcode "
7306                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7307         return -1;
7308     }
7309 
7310     return 0;
7311 }
7312 
7313 static int register_trplind_insn(opc_handler_t **ppc_opcodes,
7314                                  unsigned char idx1, unsigned char idx2,
7315                                  unsigned char idx3, unsigned char idx4,
7316                                  opc_handler_t *handler)
7317 {
7318     opc_handler_t **table;
7319 
7320     if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
7321         printf("*** ERROR: unable to join indirect table idx "
7322                "[%02x-%02x]\n", idx1, idx2);
7323         return -1;
7324     }
7325     table = ind_table(ppc_opcodes[idx1]);
7326     if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
7327         printf("*** ERROR: unable to join 2nd-level indirect table idx "
7328                "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
7329         return -1;
7330     }
7331     table = ind_table(table[idx2]);
7332     if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
7333         printf("*** ERROR: unable to insert opcode "
7334                "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
7335         return -1;
7336     }
7337     return 0;
7338 }
7339 static int register_insn(opc_handler_t **ppc_opcodes, opcode_t *insn)
7340 {
7341     if (insn->opc2 != 0xFF) {
7342         if (insn->opc3 != 0xFF) {
7343             if (insn->opc4 != 0xFF) {
7344                 if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7345                                           insn->opc3, insn->opc4,
7346                                           &insn->handler) < 0) {
7347                     return -1;
7348                 }
7349             } else {
7350                 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
7351                                          insn->opc3, &insn->handler) < 0) {
7352                     return -1;
7353                 }
7354             }
7355         } else {
7356             if (register_ind_insn(ppc_opcodes, insn->opc1,
7357                                   insn->opc2, &insn->handler) < 0) {
7358                 return -1;
7359             }
7360         }
7361     } else {
7362         if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) {
7363             return -1;
7364         }
7365     }
7366 
7367     return 0;
7368 }
7369 
7370 static int test_opcode_table(opc_handler_t **table, int len)
7371 {
7372     int i, count, tmp;
7373 
7374     for (i = 0, count = 0; i < len; i++) {
7375         /* Consistency fixup */
7376         if (table[i] == NULL) {
7377             table[i] = &invalid_handler;
7378         }
7379         if (table[i] != &invalid_handler) {
7380             if (is_indirect_opcode(table[i])) {
7381                 tmp = test_opcode_table(ind_table(table[i]),
7382                     PPC_CPU_INDIRECT_OPCODES_LEN);
7383                 if (tmp == 0) {
7384                     free(table[i]);
7385                     table[i] = &invalid_handler;
7386                 } else {
7387                     count++;
7388                 }
7389             } else {
7390                 count++;
7391             }
7392         }
7393     }
7394 
7395     return count;
7396 }
7397 
7398 static void fix_opcode_tables(opc_handler_t **ppc_opcodes)
7399 {
7400     if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) {
7401         printf("*** WARNING: no opcode defined !\n");
7402     }
7403 }
7404 
7405 /*****************************************************************************/
7406 void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
7407 {
7408     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
7409     opcode_t *opc;
7410 
7411     fill_new_table(cpu->opcodes, PPC_CPU_OPCODES_LEN);
7412     for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
7413         if (((opc->handler.type & pcc->insns_flags) != 0) ||
7414             ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
7415             if (register_insn(cpu->opcodes, opc) < 0) {
7416                 error_setg(errp, "ERROR initializing PowerPC instruction "
7417                            "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
7418                            opc->opc3);
7419                 return;
7420             }
7421         }
7422     }
7423     fix_opcode_tables(cpu->opcodes);
7424     fflush(stdout);
7425     fflush(stderr);
7426 }
7427 
7428 void destroy_ppc_opcodes(PowerPCCPU *cpu)
7429 {
7430     opc_handler_t **table, **table_2;
7431     int i, j, k;
7432 
7433     for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
7434         if (cpu->opcodes[i] == &invalid_handler) {
7435             continue;
7436         }
7437         if (is_indirect_opcode(cpu->opcodes[i])) {
7438             table = ind_table(cpu->opcodes[i]);
7439             for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
7440                 if (table[j] == &invalid_handler) {
7441                     continue;
7442                 }
7443                 if (is_indirect_opcode(table[j])) {
7444                     table_2 = ind_table(table[j]);
7445                     for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
7446                         if (table_2[k] != &invalid_handler &&
7447                             is_indirect_opcode(table_2[k])) {
7448                             g_free((opc_handler_t *)((uintptr_t)table_2[k] &
7449                                                      ~PPC_INDIRECT));
7450                         }
7451                     }
7452                     g_free((opc_handler_t *)((uintptr_t)table[j] &
7453                                              ~PPC_INDIRECT));
7454                 }
7455             }
7456             g_free((opc_handler_t *)((uintptr_t)cpu->opcodes[i] &
7457                 ~PPC_INDIRECT));
7458         }
7459     }
7460 }
7461 
7462 int ppc_fixup_cpu(PowerPCCPU *cpu)
7463 {
7464     CPUPPCState *env = &cpu->env;
7465 
7466     /*
7467      * TCG doesn't (yet) emulate some groups of instructions that are
7468      * implemented on some otherwise supported CPUs (e.g. VSX and
7469      * decimal floating point instructions on POWER7).  We remove
7470      * unsupported instruction groups from the cpu state's instruction
7471      * masks and hope the guest can cope.  For at least the pseries
7472      * machine, the unavailability of these instructions can be
7473      * advertised to the guest via the device tree.
7474      */
7475     if ((env->insns_flags & ~PPC_TCG_INSNS)
7476         || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
7477         warn_report("Disabling some instructions which are not "
7478                     "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")",
7479                     env->insns_flags & ~PPC_TCG_INSNS,
7480                     env->insns_flags2 & ~PPC_TCG_INSNS2);
7481     }
7482     env->insns_flags &= PPC_TCG_INSNS;
7483     env->insns_flags2 &= PPC_TCG_INSNS2;
7484     return 0;
7485 }
7486 
7487 static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn)
7488 {
7489     opc_handler_t **table, *handler;
7490     uint32_t inval;
7491 
7492     ctx->opcode = insn;
7493 
7494     LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
7495               insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7496               ctx->le_mode ? "little" : "big");
7497 
7498     table = cpu->opcodes;
7499     handler = table[opc1(insn)];
7500     if (is_indirect_opcode(handler)) {
7501         table = ind_table(handler);
7502         handler = table[opc2(insn)];
7503         if (is_indirect_opcode(handler)) {
7504             table = ind_table(handler);
7505             handler = table[opc3(insn)];
7506             if (is_indirect_opcode(handler)) {
7507                 table = ind_table(handler);
7508                 handler = table[opc4(insn)];
7509             }
7510         }
7511     }
7512 
7513     /* Is opcode *REALLY* valid ? */
7514     if (unlikely(handler->handler == &gen_invalid)) {
7515         qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
7516                       "%02x - %02x - %02x - %02x (%08x) "
7517                       TARGET_FMT_lx "\n",
7518                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7519                       insn, ctx->cia);
7520         return false;
7521     }
7522 
7523     if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE)
7524                  && Rc(insn))) {
7525         inval = handler->inval2;
7526     } else {
7527         inval = handler->inval1;
7528     }
7529 
7530     if (unlikely((insn & inval) != 0)) {
7531         qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
7532                       "%02x - %02x - %02x - %02x (%08x) "
7533                       TARGET_FMT_lx "\n", insn & inval,
7534                       opc1(insn), opc2(insn), opc3(insn), opc4(insn),
7535                       insn, ctx->cia);
7536         return false;
7537     }
7538 
7539     handler->handler(ctx);
7540     return true;
7541 }
7542 
7543 static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
7544 {
7545     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7546     CPUPPCState *env = cs->env_ptr;
7547     uint32_t hflags = ctx->base.tb->flags;
7548 
7549     ctx->spr_cb = env->spr_cb;
7550     ctx->pr = (hflags >> HFLAGS_PR) & 1;
7551     ctx->mem_idx = (hflags >> HFLAGS_DMMU_IDX) & 7;
7552     ctx->dr = (hflags >> HFLAGS_DR) & 1;
7553     ctx->hv = (hflags >> HFLAGS_HV) & 1;
7554     ctx->insns_flags = env->insns_flags;
7555     ctx->insns_flags2 = env->insns_flags2;
7556     ctx->access_type = -1;
7557     ctx->need_access_type = !mmu_is_64bit(env->mmu_model);
7558     ctx->le_mode = (hflags >> HFLAGS_LE) & 1;
7559     ctx->default_tcg_memop_mask = ctx->le_mode ? MO_LE : MO_BE;
7560     ctx->flags = env->flags;
7561 #if defined(TARGET_PPC64)
7562     ctx->sf_mode = (hflags >> HFLAGS_64) & 1;
7563     ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
7564 #endif
7565     ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B
7566         || env->mmu_model & POWERPC_MMU_64;
7567 
7568     ctx->fpu_enabled = (hflags >> HFLAGS_FP) & 1;
7569     ctx->spe_enabled = (hflags >> HFLAGS_SPE) & 1;
7570     ctx->altivec_enabled = (hflags >> HFLAGS_VR) & 1;
7571     ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1;
7572     ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1;
7573     ctx->gtse = (hflags >> HFLAGS_GTSE) & 1;
7574     ctx->hr = (hflags >> HFLAGS_HR) & 1;
7575     ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1;
7576     ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1;
7577     ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1;
7578 
7579     ctx->singlestep_enabled = 0;
7580     if ((hflags >> HFLAGS_SE) & 1) {
7581         ctx->singlestep_enabled |= CPU_SINGLE_STEP;
7582         ctx->base.max_insns = 1;
7583     }
7584     if ((hflags >> HFLAGS_BE) & 1) {
7585         ctx->singlestep_enabled |= CPU_BRANCH_STEP;
7586     }
7587 }
7588 
7589 static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs)
7590 {
7591 }
7592 
7593 static void ppc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
7594 {
7595     tcg_gen_insn_start(dcbase->pc_next);
7596 }
7597 
7598 static bool is_prefix_insn(DisasContext *ctx, uint32_t insn)
7599 {
7600     REQUIRE_INSNS_FLAGS2(ctx, ISA310);
7601     return opc1(insn) == 1;
7602 }
7603 
7604 static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
7605 {
7606     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7607     PowerPCCPU *cpu = POWERPC_CPU(cs);
7608     CPUPPCState *env = cs->env_ptr;
7609     target_ulong pc;
7610     uint32_t insn;
7611     bool ok;
7612 
7613     LOG_DISAS("----------------\n");
7614     LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
7615               ctx->base.pc_next, ctx->mem_idx, (int)msr_ir);
7616 
7617     ctx->cia = pc = ctx->base.pc_next;
7618     insn = translator_ldl_swap(env, dcbase, pc, need_byteswap(ctx));
7619     ctx->base.pc_next = pc += 4;
7620 
7621     if (!is_prefix_insn(ctx, insn)) {
7622         ok = (decode_insn32(ctx, insn) ||
7623               decode_legacy(cpu, ctx, insn));
7624     } else if ((pc & 63) == 0) {
7625         /*
7626          * Power v3.1, section 1.9 Exceptions:
7627          * attempt to execute a prefixed instruction that crosses a
7628          * 64-byte address boundary (system alignment error).
7629          */
7630         gen_exception_err(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_INSN);
7631         ok = true;
7632     } else {
7633         uint32_t insn2 = translator_ldl_swap(env, dcbase, pc,
7634                                              need_byteswap(ctx));
7635         ctx->base.pc_next = pc += 4;
7636         ok = decode_insn64(ctx, deposit64(insn2, 32, 32, insn));
7637     }
7638     if (!ok) {
7639         gen_invalid(ctx);
7640     }
7641 
7642     /* End the TB when crossing a page boundary. */
7643     if (ctx->base.is_jmp == DISAS_NEXT && !(pc & ~TARGET_PAGE_MASK)) {
7644         ctx->base.is_jmp = DISAS_TOO_MANY;
7645     }
7646 
7647     translator_loop_temp_check(&ctx->base);
7648 }
7649 
7650 static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
7651 {
7652     DisasContext *ctx = container_of(dcbase, DisasContext, base);
7653     DisasJumpType is_jmp = ctx->base.is_jmp;
7654     target_ulong nip = ctx->base.pc_next;
7655 
7656     if (is_jmp == DISAS_NORETURN) {
7657         /* We have already exited the TB. */
7658         return;
7659     }
7660 
7661     /* Honor single stepping. */
7662     if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP)
7663         && (nip <= 0x100 || nip > 0xf00)) {
7664         switch (is_jmp) {
7665         case DISAS_TOO_MANY:
7666         case DISAS_EXIT_UPDATE:
7667         case DISAS_CHAIN_UPDATE:
7668             gen_update_nip(ctx, nip);
7669             break;
7670         case DISAS_EXIT:
7671         case DISAS_CHAIN:
7672             break;
7673         default:
7674             g_assert_not_reached();
7675         }
7676 
7677         gen_debug_exception(ctx);
7678         return;
7679     }
7680 
7681     switch (is_jmp) {
7682     case DISAS_TOO_MANY:
7683         if (use_goto_tb(ctx, nip)) {
7684             pmu_count_insns(ctx);
7685             tcg_gen_goto_tb(0);
7686             gen_update_nip(ctx, nip);
7687             tcg_gen_exit_tb(ctx->base.tb, 0);
7688             break;
7689         }
7690         /* fall through */
7691     case DISAS_CHAIN_UPDATE:
7692         gen_update_nip(ctx, nip);
7693         /* fall through */
7694     case DISAS_CHAIN:
7695         /*
7696          * tcg_gen_lookup_and_goto_ptr will exit the TB if
7697          * CF_NO_GOTO_PTR is set. Count insns now.
7698          */
7699         if (ctx->base.tb->flags & CF_NO_GOTO_PTR) {
7700             pmu_count_insns(ctx);
7701         }
7702 
7703         tcg_gen_lookup_and_goto_ptr();
7704         break;
7705 
7706     case DISAS_EXIT_UPDATE:
7707         gen_update_nip(ctx, nip);
7708         /* fall through */
7709     case DISAS_EXIT:
7710         pmu_count_insns(ctx);
7711         tcg_gen_exit_tb(NULL, 0);
7712         break;
7713 
7714     default:
7715         g_assert_not_reached();
7716     }
7717 }
7718 
7719 static void ppc_tr_disas_log(const DisasContextBase *dcbase,
7720                              CPUState *cs, FILE *logfile)
7721 {
7722     fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
7723     target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
7724 }
7725 
7726 static const TranslatorOps ppc_tr_ops = {
7727     .init_disas_context = ppc_tr_init_disas_context,
7728     .tb_start           = ppc_tr_tb_start,
7729     .insn_start         = ppc_tr_insn_start,
7730     .translate_insn     = ppc_tr_translate_insn,
7731     .tb_stop            = ppc_tr_tb_stop,
7732     .disas_log          = ppc_tr_disas_log,
7733 };
7734 
7735 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
7736                            target_ulong pc, void *host_pc)
7737 {
7738     DisasContext ctx;
7739 
7740     translator_loop(cs, tb, max_insns, pc, host_pc, &ppc_tr_ops, &ctx.base);
7741 }
7742 
7743 void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
7744                           target_ulong *data)
7745 {
7746     env->nip = data[0];
7747 }
7748