xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision e953aeaa)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Single-step support.
4  *
5  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6  */
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
17 
18 extern char system_call_common[];
19 extern char system_call_vectored_emulate[];
20 
21 #ifdef CONFIG_PPC64
22 /* Bits in SRR1 that are copied from MSR */
23 #define MSR_MASK	0xffffffff87c0ffffUL
24 #else
25 #define MSR_MASK	0x87c0ffff
26 #endif
27 
28 /* Bits in XER */
29 #define XER_SO		0x80000000U
30 #define XER_OV		0x40000000U
31 #define XER_CA		0x20000000U
32 #define XER_OV32	0x00080000U
33 #define XER_CA32	0x00040000U
34 
35 #ifdef CONFIG_PPC_FPU
36 /*
37  * Functions in ldstfp.S
38  */
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
47 #endif
48 
49 #ifdef __powerpc64__
50 /*
51  * Functions in quad.S
52  */
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
57 		    unsigned int *crp);
58 #endif
59 
60 #ifdef __LITTLE_ENDIAN__
61 #define IS_LE	1
62 #define IS_BE	0
63 #else
64 #define IS_LE	0
65 #define IS_BE	1
66 #endif
67 
68 /*
69  * Emulate the truncation of 64 bit values in 32-bit mode.
70  */
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
72 							unsigned long val)
73 {
74 #ifdef __powerpc64__
75 	if ((msr & MSR_64BIT) == 0)
76 		val &= 0xffffffffUL;
77 #endif
78 	return val;
79 }
80 
81 /*
82  * Determine whether a conditional branch instruction would branch.
83  */
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 					const struct pt_regs *regs,
86 					struct instruction_op *op)
87 {
88 	unsigned int bo = (instr >> 21) & 0x1f;
89 	unsigned int bi;
90 
91 	if ((bo & 4) == 0) {
92 		/* decrement counter */
93 		op->type |= DECCTR;
94 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
95 			return 0;
96 	}
97 	if ((bo & 0x10) == 0) {
98 		/* check bit from CR */
99 		bi = (instr >> 16) & 0x1f;
100 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
101 			return 0;
102 	}
103 	return 1;
104 }
105 
106 static nokprobe_inline long address_ok(struct pt_regs *regs,
107 				       unsigned long ea, int nb)
108 {
109 	if (!user_mode(regs))
110 		return 1;
111 	if (__access_ok(ea, nb, USER_DS))
112 		return 1;
113 	if (__access_ok(ea, 1, USER_DS))
114 		/* Access overlaps the end of the user region */
115 		regs->dar = USER_DS.seg;
116 	else
117 		regs->dar = ea;
118 	return 0;
119 }
120 
121 /*
122  * Calculate effective address for a D-form instruction
123  */
124 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
125 					      const struct pt_regs *regs)
126 {
127 	int ra;
128 	unsigned long ea;
129 
130 	ra = (instr >> 16) & 0x1f;
131 	ea = (signed short) instr;		/* sign-extend */
132 	if (ra)
133 		ea += regs->gpr[ra];
134 
135 	return ea;
136 }
137 
138 #ifdef __powerpc64__
139 /*
140  * Calculate effective address for a DS-form instruction
141  */
142 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
143 					       const struct pt_regs *regs)
144 {
145 	int ra;
146 	unsigned long ea;
147 
148 	ra = (instr >> 16) & 0x1f;
149 	ea = (signed short) (instr & ~3);	/* sign-extend */
150 	if (ra)
151 		ea += regs->gpr[ra];
152 
153 	return ea;
154 }
155 
156 /*
157  * Calculate effective address for a DQ-form instruction
158  */
159 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
160 					       const struct pt_regs *regs)
161 {
162 	int ra;
163 	unsigned long ea;
164 
165 	ra = (instr >> 16) & 0x1f;
166 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
167 	if (ra)
168 		ea += regs->gpr[ra];
169 
170 	return ea;
171 }
172 #endif /* __powerpc64 */
173 
174 /*
175  * Calculate effective address for an X-form instruction
176  */
177 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
178 					      const struct pt_regs *regs)
179 {
180 	int ra, rb;
181 	unsigned long ea;
182 
183 	ra = (instr >> 16) & 0x1f;
184 	rb = (instr >> 11) & 0x1f;
185 	ea = regs->gpr[rb];
186 	if (ra)
187 		ea += regs->gpr[ra];
188 
189 	return ea;
190 }
191 
192 /*
193  * Calculate effective address for a MLS:D-form / 8LS:D-form
194  * prefixed instruction
195  */
196 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
197 						  unsigned int suffix,
198 						  const struct pt_regs *regs)
199 {
200 	int ra, prefix_r;
201 	unsigned int  dd;
202 	unsigned long ea, d0, d1, d;
203 
204 	prefix_r = GET_PREFIX_R(instr);
205 	ra = GET_PREFIX_RA(suffix);
206 
207 	d0 = instr & 0x3ffff;
208 	d1 = suffix & 0xffff;
209 	d = (d0 << 16) | d1;
210 
211 	/*
212 	 * sign extend a 34 bit number
213 	 */
214 	dd = (unsigned int)(d >> 2);
215 	ea = (signed int)dd;
216 	ea = (ea << 2) | (d & 0x3);
217 
218 	if (!prefix_r && ra)
219 		ea += regs->gpr[ra];
220 	else if (!prefix_r && !ra)
221 		; /* Leave ea as is */
222 	else if (prefix_r && !ra)
223 		ea += regs->nip;
224 	else if (prefix_r && ra)
225 		; /* Invalid form. Should already be checked for by caller! */
226 
227 	return ea;
228 }
229 
230 /*
231  * Return the largest power of 2, not greater than sizeof(unsigned long),
232  * such that x is a multiple of it.
233  */
234 static nokprobe_inline unsigned long max_align(unsigned long x)
235 {
236 	x |= sizeof(unsigned long);
237 	return x & -x;		/* isolates rightmost bit */
238 }
239 
240 static nokprobe_inline unsigned long byterev_2(unsigned long x)
241 {
242 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
243 }
244 
245 static nokprobe_inline unsigned long byterev_4(unsigned long x)
246 {
247 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
248 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
249 }
250 
251 #ifdef __powerpc64__
252 static nokprobe_inline unsigned long byterev_8(unsigned long x)
253 {
254 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
255 }
256 #endif
257 
258 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
259 {
260 	switch (nb) {
261 	case 2:
262 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
263 		break;
264 	case 4:
265 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
266 		break;
267 #ifdef __powerpc64__
268 	case 8:
269 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
270 		break;
271 	case 16: {
272 		unsigned long *up = (unsigned long *)ptr;
273 		unsigned long tmp;
274 		tmp = byterev_8(up[0]);
275 		up[0] = byterev_8(up[1]);
276 		up[1] = tmp;
277 		break;
278 	}
279 #endif
280 	default:
281 		WARN_ON_ONCE(1);
282 	}
283 }
284 
285 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
286 					    unsigned long ea, int nb,
287 					    struct pt_regs *regs)
288 {
289 	int err = 0;
290 	unsigned long x = 0;
291 
292 	switch (nb) {
293 	case 1:
294 		err = __get_user(x, (unsigned char __user *) ea);
295 		break;
296 	case 2:
297 		err = __get_user(x, (unsigned short __user *) ea);
298 		break;
299 	case 4:
300 		err = __get_user(x, (unsigned int __user *) ea);
301 		break;
302 #ifdef __powerpc64__
303 	case 8:
304 		err = __get_user(x, (unsigned long __user *) ea);
305 		break;
306 #endif
307 	}
308 	if (!err)
309 		*dest = x;
310 	else
311 		regs->dar = ea;
312 	return err;
313 }
314 
315 /*
316  * Copy from userspace to a buffer, using the largest possible
317  * aligned accesses, up to sizeof(long).
318  */
319 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
320 				       struct pt_regs *regs)
321 {
322 	int err = 0;
323 	int c;
324 
325 	for (; nb > 0; nb -= c) {
326 		c = max_align(ea);
327 		if (c > nb)
328 			c = max_align(nb);
329 		switch (c) {
330 		case 1:
331 			err = __get_user(*dest, (unsigned char __user *) ea);
332 			break;
333 		case 2:
334 			err = __get_user(*(u16 *)dest,
335 					 (unsigned short __user *) ea);
336 			break;
337 		case 4:
338 			err = __get_user(*(u32 *)dest,
339 					 (unsigned int __user *) ea);
340 			break;
341 #ifdef __powerpc64__
342 		case 8:
343 			err = __get_user(*(unsigned long *)dest,
344 					 (unsigned long __user *) ea);
345 			break;
346 #endif
347 		}
348 		if (err) {
349 			regs->dar = ea;
350 			return err;
351 		}
352 		dest += c;
353 		ea += c;
354 	}
355 	return 0;
356 }
357 
358 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
359 					      unsigned long ea, int nb,
360 					      struct pt_regs *regs)
361 {
362 	union {
363 		unsigned long ul;
364 		u8 b[sizeof(unsigned long)];
365 	} u;
366 	int i;
367 	int err;
368 
369 	u.ul = 0;
370 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
371 	err = copy_mem_in(&u.b[i], ea, nb, regs);
372 	if (!err)
373 		*dest = u.ul;
374 	return err;
375 }
376 
377 /*
378  * Read memory at address ea for nb bytes, return 0 for success
379  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
380  * If nb < sizeof(long), the result is right-justified on BE systems.
381  */
382 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
383 			      struct pt_regs *regs)
384 {
385 	if (!address_ok(regs, ea, nb))
386 		return -EFAULT;
387 	if ((ea & (nb - 1)) == 0)
388 		return read_mem_aligned(dest, ea, nb, regs);
389 	return read_mem_unaligned(dest, ea, nb, regs);
390 }
391 NOKPROBE_SYMBOL(read_mem);
392 
393 static nokprobe_inline int write_mem_aligned(unsigned long val,
394 					     unsigned long ea, int nb,
395 					     struct pt_regs *regs)
396 {
397 	int err = 0;
398 
399 	switch (nb) {
400 	case 1:
401 		err = __put_user(val, (unsigned char __user *) ea);
402 		break;
403 	case 2:
404 		err = __put_user(val, (unsigned short __user *) ea);
405 		break;
406 	case 4:
407 		err = __put_user(val, (unsigned int __user *) ea);
408 		break;
409 #ifdef __powerpc64__
410 	case 8:
411 		err = __put_user(val, (unsigned long __user *) ea);
412 		break;
413 #endif
414 	}
415 	if (err)
416 		regs->dar = ea;
417 	return err;
418 }
419 
420 /*
421  * Copy from a buffer to userspace, using the largest possible
422  * aligned accesses, up to sizeof(long).
423  */
424 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
425 					struct pt_regs *regs)
426 {
427 	int err = 0;
428 	int c;
429 
430 	for (; nb > 0; nb -= c) {
431 		c = max_align(ea);
432 		if (c > nb)
433 			c = max_align(nb);
434 		switch (c) {
435 		case 1:
436 			err = __put_user(*dest, (unsigned char __user *) ea);
437 			break;
438 		case 2:
439 			err = __put_user(*(u16 *)dest,
440 					 (unsigned short __user *) ea);
441 			break;
442 		case 4:
443 			err = __put_user(*(u32 *)dest,
444 					 (unsigned int __user *) ea);
445 			break;
446 #ifdef __powerpc64__
447 		case 8:
448 			err = __put_user(*(unsigned long *)dest,
449 					 (unsigned long __user *) ea);
450 			break;
451 #endif
452 		}
453 		if (err) {
454 			regs->dar = ea;
455 			return err;
456 		}
457 		dest += c;
458 		ea += c;
459 	}
460 	return 0;
461 }
462 
463 static nokprobe_inline int write_mem_unaligned(unsigned long val,
464 					       unsigned long ea, int nb,
465 					       struct pt_regs *regs)
466 {
467 	union {
468 		unsigned long ul;
469 		u8 b[sizeof(unsigned long)];
470 	} u;
471 	int i;
472 
473 	u.ul = val;
474 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
475 	return copy_mem_out(&u.b[i], ea, nb, regs);
476 }
477 
478 /*
479  * Write memory at address ea for nb bytes, return 0 for success
480  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
481  */
482 static int write_mem(unsigned long val, unsigned long ea, int nb,
483 			       struct pt_regs *regs)
484 {
485 	if (!address_ok(regs, ea, nb))
486 		return -EFAULT;
487 	if ((ea & (nb - 1)) == 0)
488 		return write_mem_aligned(val, ea, nb, regs);
489 	return write_mem_unaligned(val, ea, nb, regs);
490 }
491 NOKPROBE_SYMBOL(write_mem);
492 
493 #ifdef CONFIG_PPC_FPU
494 /*
495  * These access either the real FP register or the image in the
496  * thread_struct, depending on regs->msr & MSR_FP.
497  */
498 static int do_fp_load(struct instruction_op *op, unsigned long ea,
499 		      struct pt_regs *regs, bool cross_endian)
500 {
501 	int err, rn, nb;
502 	union {
503 		int i;
504 		unsigned int u;
505 		float f;
506 		double d[2];
507 		unsigned long l[2];
508 		u8 b[2 * sizeof(double)];
509 	} u;
510 
511 	nb = GETSIZE(op->type);
512 	if (!address_ok(regs, ea, nb))
513 		return -EFAULT;
514 	rn = op->reg;
515 	err = copy_mem_in(u.b, ea, nb, regs);
516 	if (err)
517 		return err;
518 	if (unlikely(cross_endian)) {
519 		do_byte_reverse(u.b, min(nb, 8));
520 		if (nb == 16)
521 			do_byte_reverse(&u.b[8], 8);
522 	}
523 	preempt_disable();
524 	if (nb == 4) {
525 		if (op->type & FPCONV)
526 			conv_sp_to_dp(&u.f, &u.d[0]);
527 		else if (op->type & SIGNEXT)
528 			u.l[0] = u.i;
529 		else
530 			u.l[0] = u.u;
531 	}
532 	if (regs->msr & MSR_FP)
533 		put_fpr(rn, &u.d[0]);
534 	else
535 		current->thread.TS_FPR(rn) = u.l[0];
536 	if (nb == 16) {
537 		/* lfdp */
538 		rn |= 1;
539 		if (regs->msr & MSR_FP)
540 			put_fpr(rn, &u.d[1]);
541 		else
542 			current->thread.TS_FPR(rn) = u.l[1];
543 	}
544 	preempt_enable();
545 	return 0;
546 }
547 NOKPROBE_SYMBOL(do_fp_load);
548 
549 static int do_fp_store(struct instruction_op *op, unsigned long ea,
550 		       struct pt_regs *regs, bool cross_endian)
551 {
552 	int rn, nb;
553 	union {
554 		unsigned int u;
555 		float f;
556 		double d[2];
557 		unsigned long l[2];
558 		u8 b[2 * sizeof(double)];
559 	} u;
560 
561 	nb = GETSIZE(op->type);
562 	if (!address_ok(regs, ea, nb))
563 		return -EFAULT;
564 	rn = op->reg;
565 	preempt_disable();
566 	if (regs->msr & MSR_FP)
567 		get_fpr(rn, &u.d[0]);
568 	else
569 		u.l[0] = current->thread.TS_FPR(rn);
570 	if (nb == 4) {
571 		if (op->type & FPCONV)
572 			conv_dp_to_sp(&u.d[0], &u.f);
573 		else
574 			u.u = u.l[0];
575 	}
576 	if (nb == 16) {
577 		rn |= 1;
578 		if (regs->msr & MSR_FP)
579 			get_fpr(rn, &u.d[1]);
580 		else
581 			u.l[1] = current->thread.TS_FPR(rn);
582 	}
583 	preempt_enable();
584 	if (unlikely(cross_endian)) {
585 		do_byte_reverse(u.b, min(nb, 8));
586 		if (nb == 16)
587 			do_byte_reverse(&u.b[8], 8);
588 	}
589 	return copy_mem_out(u.b, ea, nb, regs);
590 }
591 NOKPROBE_SYMBOL(do_fp_store);
592 #endif
593 
594 #ifdef CONFIG_ALTIVEC
595 /* For Altivec/VMX, no need to worry about alignment */
596 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
597 				       int size, struct pt_regs *regs,
598 				       bool cross_endian)
599 {
600 	int err;
601 	union {
602 		__vector128 v;
603 		u8 b[sizeof(__vector128)];
604 	} u = {};
605 
606 	if (!address_ok(regs, ea & ~0xfUL, 16))
607 		return -EFAULT;
608 	/* align to multiple of size */
609 	ea &= ~(size - 1);
610 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
611 	if (err)
612 		return err;
613 	if (unlikely(cross_endian))
614 		do_byte_reverse(&u.b[ea & 0xf], size);
615 	preempt_disable();
616 	if (regs->msr & MSR_VEC)
617 		put_vr(rn, &u.v);
618 	else
619 		current->thread.vr_state.vr[rn] = u.v;
620 	preempt_enable();
621 	return 0;
622 }
623 
624 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
625 					int size, struct pt_regs *regs,
626 					bool cross_endian)
627 {
628 	union {
629 		__vector128 v;
630 		u8 b[sizeof(__vector128)];
631 	} u;
632 
633 	if (!address_ok(regs, ea & ~0xfUL, 16))
634 		return -EFAULT;
635 	/* align to multiple of size */
636 	ea &= ~(size - 1);
637 
638 	preempt_disable();
639 	if (regs->msr & MSR_VEC)
640 		get_vr(rn, &u.v);
641 	else
642 		u.v = current->thread.vr_state.vr[rn];
643 	preempt_enable();
644 	if (unlikely(cross_endian))
645 		do_byte_reverse(&u.b[ea & 0xf], size);
646 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
647 }
648 #endif /* CONFIG_ALTIVEC */
649 
650 #ifdef __powerpc64__
651 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
652 				      int reg, bool cross_endian)
653 {
654 	int err;
655 
656 	if (!address_ok(regs, ea, 16))
657 		return -EFAULT;
658 	/* if aligned, should be atomic */
659 	if ((ea & 0xf) == 0) {
660 		err = do_lq(ea, &regs->gpr[reg]);
661 	} else {
662 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
663 		if (!err)
664 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
665 	}
666 	if (!err && unlikely(cross_endian))
667 		do_byte_reverse(&regs->gpr[reg], 16);
668 	return err;
669 }
670 
671 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
672 				       int reg, bool cross_endian)
673 {
674 	int err;
675 	unsigned long vals[2];
676 
677 	if (!address_ok(regs, ea, 16))
678 		return -EFAULT;
679 	vals[0] = regs->gpr[reg];
680 	vals[1] = regs->gpr[reg + 1];
681 	if (unlikely(cross_endian))
682 		do_byte_reverse(vals, 16);
683 
684 	/* if aligned, should be atomic */
685 	if ((ea & 0xf) == 0)
686 		return do_stq(ea, vals[0], vals[1]);
687 
688 	err = write_mem(vals[IS_LE], ea, 8, regs);
689 	if (!err)
690 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
691 	return err;
692 }
693 #endif /* __powerpc64 */
694 
695 #ifdef CONFIG_VSX
696 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
697 		      const void *mem, bool rev)
698 {
699 	int size, read_size;
700 	int i, j;
701 	const unsigned int *wp;
702 	const unsigned short *hp;
703 	const unsigned char *bp;
704 
705 	size = GETSIZE(op->type);
706 	reg->d[0] = reg->d[1] = 0;
707 
708 	switch (op->element_size) {
709 	case 16:
710 		/* whole vector; lxv[x] or lxvl[l] */
711 		if (size == 0)
712 			break;
713 		memcpy(reg, mem, size);
714 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
715 			rev = !rev;
716 		if (rev)
717 			do_byte_reverse(reg, 16);
718 		break;
719 	case 8:
720 		/* scalar loads, lxvd2x, lxvdsx */
721 		read_size = (size >= 8) ? 8 : size;
722 		i = IS_LE ? 8 : 8 - read_size;
723 		memcpy(&reg->b[i], mem, read_size);
724 		if (rev)
725 			do_byte_reverse(&reg->b[i], 8);
726 		if (size < 8) {
727 			if (op->type & SIGNEXT) {
728 				/* size == 4 is the only case here */
729 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
730 			} else if (op->vsx_flags & VSX_FPCONV) {
731 				preempt_disable();
732 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
733 					      &reg->dp[IS_LE]);
734 				preempt_enable();
735 			}
736 		} else {
737 			if (size == 16) {
738 				unsigned long v = *(unsigned long *)(mem + 8);
739 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
740 			} else if (op->vsx_flags & VSX_SPLAT)
741 				reg->d[IS_BE] = reg->d[IS_LE];
742 		}
743 		break;
744 	case 4:
745 		/* lxvw4x, lxvwsx */
746 		wp = mem;
747 		for (j = 0; j < size / 4; ++j) {
748 			i = IS_LE ? 3 - j : j;
749 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
750 		}
751 		if (op->vsx_flags & VSX_SPLAT) {
752 			u32 val = reg->w[IS_LE ? 3 : 0];
753 			for (; j < 4; ++j) {
754 				i = IS_LE ? 3 - j : j;
755 				reg->w[i] = val;
756 			}
757 		}
758 		break;
759 	case 2:
760 		/* lxvh8x */
761 		hp = mem;
762 		for (j = 0; j < size / 2; ++j) {
763 			i = IS_LE ? 7 - j : j;
764 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
765 		}
766 		break;
767 	case 1:
768 		/* lxvb16x */
769 		bp = mem;
770 		for (j = 0; j < size; ++j) {
771 			i = IS_LE ? 15 - j : j;
772 			reg->b[i] = *bp++;
773 		}
774 		break;
775 	}
776 }
777 EXPORT_SYMBOL_GPL(emulate_vsx_load);
778 NOKPROBE_SYMBOL(emulate_vsx_load);
779 
780 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
781 		       void *mem, bool rev)
782 {
783 	int size, write_size;
784 	int i, j;
785 	union vsx_reg buf;
786 	unsigned int *wp;
787 	unsigned short *hp;
788 	unsigned char *bp;
789 
790 	size = GETSIZE(op->type);
791 
792 	switch (op->element_size) {
793 	case 16:
794 		/* stxv, stxvx, stxvl, stxvll */
795 		if (size == 0)
796 			break;
797 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
798 			rev = !rev;
799 		if (rev) {
800 			/* reverse 16 bytes */
801 			buf.d[0] = byterev_8(reg->d[1]);
802 			buf.d[1] = byterev_8(reg->d[0]);
803 			reg = &buf;
804 		}
805 		memcpy(mem, reg, size);
806 		break;
807 	case 8:
808 		/* scalar stores, stxvd2x */
809 		write_size = (size >= 8) ? 8 : size;
810 		i = IS_LE ? 8 : 8 - write_size;
811 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
812 			buf.d[0] = buf.d[1] = 0;
813 			preempt_disable();
814 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
815 			preempt_enable();
816 			reg = &buf;
817 		}
818 		memcpy(mem, &reg->b[i], write_size);
819 		if (size == 16)
820 			memcpy(mem + 8, &reg->d[IS_BE], 8);
821 		if (unlikely(rev)) {
822 			do_byte_reverse(mem, write_size);
823 			if (size == 16)
824 				do_byte_reverse(mem + 8, 8);
825 		}
826 		break;
827 	case 4:
828 		/* stxvw4x */
829 		wp = mem;
830 		for (j = 0; j < size / 4; ++j) {
831 			i = IS_LE ? 3 - j : j;
832 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
833 		}
834 		break;
835 	case 2:
836 		/* stxvh8x */
837 		hp = mem;
838 		for (j = 0; j < size / 2; ++j) {
839 			i = IS_LE ? 7 - j : j;
840 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
841 		}
842 		break;
843 	case 1:
844 		/* stvxb16x */
845 		bp = mem;
846 		for (j = 0; j < size; ++j) {
847 			i = IS_LE ? 15 - j : j;
848 			*bp++ = reg->b[i];
849 		}
850 		break;
851 	}
852 }
853 EXPORT_SYMBOL_GPL(emulate_vsx_store);
854 NOKPROBE_SYMBOL(emulate_vsx_store);
855 
856 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
857 				       unsigned long ea, struct pt_regs *regs,
858 				       bool cross_endian)
859 {
860 	int reg = op->reg;
861 	u8 mem[16];
862 	union vsx_reg buf;
863 	int size = GETSIZE(op->type);
864 
865 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
866 		return -EFAULT;
867 
868 	emulate_vsx_load(op, &buf, mem, cross_endian);
869 	preempt_disable();
870 	if (reg < 32) {
871 		/* FP regs + extensions */
872 		if (regs->msr & MSR_FP) {
873 			load_vsrn(reg, &buf);
874 		} else {
875 			current->thread.fp_state.fpr[reg][0] = buf.d[0];
876 			current->thread.fp_state.fpr[reg][1] = buf.d[1];
877 		}
878 	} else {
879 		if (regs->msr & MSR_VEC)
880 			load_vsrn(reg, &buf);
881 		else
882 			current->thread.vr_state.vr[reg - 32] = buf.v;
883 	}
884 	preempt_enable();
885 	return 0;
886 }
887 
888 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
889 					unsigned long ea, struct pt_regs *regs,
890 					bool cross_endian)
891 {
892 	int reg = op->reg;
893 	u8 mem[16];
894 	union vsx_reg buf;
895 	int size = GETSIZE(op->type);
896 
897 	if (!address_ok(regs, ea, size))
898 		return -EFAULT;
899 
900 	preempt_disable();
901 	if (reg < 32) {
902 		/* FP regs + extensions */
903 		if (regs->msr & MSR_FP) {
904 			store_vsrn(reg, &buf);
905 		} else {
906 			buf.d[0] = current->thread.fp_state.fpr[reg][0];
907 			buf.d[1] = current->thread.fp_state.fpr[reg][1];
908 		}
909 	} else {
910 		if (regs->msr & MSR_VEC)
911 			store_vsrn(reg, &buf);
912 		else
913 			buf.v = current->thread.vr_state.vr[reg - 32];
914 	}
915 	preempt_enable();
916 	emulate_vsx_store(op, &buf, mem, cross_endian);
917 	return  copy_mem_out(mem, ea, size, regs);
918 }
919 #endif /* CONFIG_VSX */
920 
921 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
922 {
923 	int err;
924 	unsigned long i, size;
925 
926 #ifdef __powerpc64__
927 	size = ppc64_caches.l1d.block_size;
928 	if (!(regs->msr & MSR_64BIT))
929 		ea &= 0xffffffffUL;
930 #else
931 	size = L1_CACHE_BYTES;
932 #endif
933 	ea &= ~(size - 1);
934 	if (!address_ok(regs, ea, size))
935 		return -EFAULT;
936 	for (i = 0; i < size; i += sizeof(long)) {
937 		err = __put_user(0, (unsigned long __user *) (ea + i));
938 		if (err) {
939 			regs->dar = ea;
940 			return err;
941 		}
942 	}
943 	return 0;
944 }
945 NOKPROBE_SYMBOL(emulate_dcbz);
946 
947 #define __put_user_asmx(x, addr, err, op, cr)		\
948 	__asm__ __volatile__(				\
949 		"1:	" op " %2,0,%3\n"		\
950 		"	mfcr	%1\n"			\
951 		"2:\n"					\
952 		".section .fixup,\"ax\"\n"		\
953 		"3:	li	%0,%4\n"		\
954 		"	b	2b\n"			\
955 		".previous\n"				\
956 		EX_TABLE(1b, 3b)			\
957 		: "=r" (err), "=r" (cr)			\
958 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
959 
960 #define __get_user_asmx(x, addr, err, op)		\
961 	__asm__ __volatile__(				\
962 		"1:	"op" %1,0,%2\n"			\
963 		"2:\n"					\
964 		".section .fixup,\"ax\"\n"		\
965 		"3:	li	%0,%3\n"		\
966 		"	b	2b\n"			\
967 		".previous\n"				\
968 		EX_TABLE(1b, 3b)			\
969 		: "=r" (err), "=r" (x)			\
970 		: "r" (addr), "i" (-EFAULT), "0" (err))
971 
972 #define __cacheop_user_asmx(addr, err, op)		\
973 	__asm__ __volatile__(				\
974 		"1:	"op" 0,%1\n"			\
975 		"2:\n"					\
976 		".section .fixup,\"ax\"\n"		\
977 		"3:	li	%0,%3\n"		\
978 		"	b	2b\n"			\
979 		".previous\n"				\
980 		EX_TABLE(1b, 3b)			\
981 		: "=r" (err)				\
982 		: "r" (addr), "i" (-EFAULT), "0" (err))
983 
984 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
985 				    struct instruction_op *op)
986 {
987 	long val = op->val;
988 
989 	op->type |= SETCC;
990 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
991 #ifdef __powerpc64__
992 	if (!(regs->msr & MSR_64BIT))
993 		val = (int) val;
994 #endif
995 	if (val < 0)
996 		op->ccval |= 0x80000000;
997 	else if (val > 0)
998 		op->ccval |= 0x40000000;
999 	else
1000 		op->ccval |= 0x20000000;
1001 }
1002 
1003 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1004 {
1005 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1006 		if (val)
1007 			op->xerval |= XER_CA32;
1008 		else
1009 			op->xerval &= ~XER_CA32;
1010 	}
1011 }
1012 
1013 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1014 				     struct instruction_op *op, int rd,
1015 				     unsigned long val1, unsigned long val2,
1016 				     unsigned long carry_in)
1017 {
1018 	unsigned long val = val1 + val2;
1019 
1020 	if (carry_in)
1021 		++val;
1022 	op->type = COMPUTE + SETREG + SETXER;
1023 	op->reg = rd;
1024 	op->val = val;
1025 #ifdef __powerpc64__
1026 	if (!(regs->msr & MSR_64BIT)) {
1027 		val = (unsigned int) val;
1028 		val1 = (unsigned int) val1;
1029 	}
1030 #endif
1031 	op->xerval = regs->xer;
1032 	if (val < val1 || (carry_in && val == val1))
1033 		op->xerval |= XER_CA;
1034 	else
1035 		op->xerval &= ~XER_CA;
1036 
1037 	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1038 			(carry_in && (unsigned int)val == (unsigned int)val1));
1039 }
1040 
1041 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1042 					  struct instruction_op *op,
1043 					  long v1, long v2, int crfld)
1044 {
1045 	unsigned int crval, shift;
1046 
1047 	op->type = COMPUTE + SETCC;
1048 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1049 	if (v1 < v2)
1050 		crval |= 8;
1051 	else if (v1 > v2)
1052 		crval |= 4;
1053 	else
1054 		crval |= 2;
1055 	shift = (7 - crfld) * 4;
1056 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1057 }
1058 
1059 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1060 					    struct instruction_op *op,
1061 					    unsigned long v1,
1062 					    unsigned long v2, int crfld)
1063 {
1064 	unsigned int crval, shift;
1065 
1066 	op->type = COMPUTE + SETCC;
1067 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1068 	if (v1 < v2)
1069 		crval |= 8;
1070 	else if (v1 > v2)
1071 		crval |= 4;
1072 	else
1073 		crval |= 2;
1074 	shift = (7 - crfld) * 4;
1075 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1076 }
1077 
1078 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1079 				    struct instruction_op *op,
1080 				    unsigned long v1, unsigned long v2)
1081 {
1082 	unsigned long long out_val, mask;
1083 	int i;
1084 
1085 	out_val = 0;
1086 	for (i = 0; i < 8; i++) {
1087 		mask = 0xffUL << (i * 8);
1088 		if ((v1 & mask) == (v2 & mask))
1089 			out_val |= mask;
1090 	}
1091 	op->val = out_val;
1092 }
1093 
1094 /*
1095  * The size parameter is used to adjust the equivalent popcnt instruction.
1096  * popcntb = 8, popcntw = 32, popcntd = 64
1097  */
1098 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1099 				      struct instruction_op *op,
1100 				      unsigned long v1, int size)
1101 {
1102 	unsigned long long out = v1;
1103 
1104 	out -= (out >> 1) & 0x5555555555555555ULL;
1105 	out = (0x3333333333333333ULL & out) +
1106 	      (0x3333333333333333ULL & (out >> 2));
1107 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1108 
1109 	if (size == 8) {	/* popcntb */
1110 		op->val = out;
1111 		return;
1112 	}
1113 	out += out >> 8;
1114 	out += out >> 16;
1115 	if (size == 32) {	/* popcntw */
1116 		op->val = out & 0x0000003f0000003fULL;
1117 		return;
1118 	}
1119 
1120 	out = (out + (out >> 32)) & 0x7f;
1121 	op->val = out;	/* popcntd */
1122 }
1123 
1124 #ifdef CONFIG_PPC64
1125 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1126 				      struct instruction_op *op,
1127 				      unsigned long v1, unsigned long v2)
1128 {
1129 	unsigned char perm, idx;
1130 	unsigned int i;
1131 
1132 	perm = 0;
1133 	for (i = 0; i < 8; i++) {
1134 		idx = (v1 >> (i * 8)) & 0xff;
1135 		if (idx < 64)
1136 			if (v2 & PPC_BIT(idx))
1137 				perm |= 1 << i;
1138 	}
1139 	op->val = perm;
1140 }
1141 #endif /* CONFIG_PPC64 */
1142 /*
1143  * The size parameter adjusts the equivalent prty instruction.
1144  * prtyw = 32, prtyd = 64
1145  */
1146 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1147 				    struct instruction_op *op,
1148 				    unsigned long v, int size)
1149 {
1150 	unsigned long long res = v ^ (v >> 8);
1151 
1152 	res ^= res >> 16;
1153 	if (size == 32) {		/* prtyw */
1154 		op->val = res & 0x0000000100000001ULL;
1155 		return;
1156 	}
1157 
1158 	res ^= res >> 32;
1159 	op->val = res & 1;	/*prtyd */
1160 }
1161 
1162 static nokprobe_inline int trap_compare(long v1, long v2)
1163 {
1164 	int ret = 0;
1165 
1166 	if (v1 < v2)
1167 		ret |= 0x10;
1168 	else if (v1 > v2)
1169 		ret |= 0x08;
1170 	else
1171 		ret |= 0x04;
1172 	if ((unsigned long)v1 < (unsigned long)v2)
1173 		ret |= 0x02;
1174 	else if ((unsigned long)v1 > (unsigned long)v2)
1175 		ret |= 0x01;
1176 	return ret;
1177 }
1178 
1179 /*
1180  * Elements of 32-bit rotate and mask instructions.
1181  */
1182 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1183 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1184 #ifdef __powerpc64__
1185 #define MASK64_L(mb)	(~0UL >> (mb))
1186 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1187 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1188 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1189 #else
1190 #define DATA32(x)	(x)
1191 #endif
1192 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1193 
1194 /*
1195  * Decode an instruction, and return information about it in *op
1196  * without changing *regs.
1197  * Integer arithmetic and logical instructions, branches, and barrier
1198  * instructions can be emulated just using the information in *op.
1199  *
1200  * Return value is 1 if the instruction can be emulated just by
1201  * updating *regs with the information in *op, -1 if we need the
1202  * GPRs but *regs doesn't contain the full register set, or 0
1203  * otherwise.
1204  */
1205 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1206 		  struct ppc_inst instr)
1207 {
1208 #ifdef CONFIG_PPC64
1209 	unsigned int suffixopcode, prefixtype, prefix_r;
1210 #endif
1211 	unsigned int opcode, ra, rb, rc, rd, spr, u;
1212 	unsigned long int imm;
1213 	unsigned long int val, val2;
1214 	unsigned int mb, me, sh;
1215 	unsigned int word, suffix;
1216 	long ival;
1217 
1218 	word = ppc_inst_val(instr);
1219 	suffix = ppc_inst_suffix(instr);
1220 
1221 	op->type = COMPUTE;
1222 
1223 	opcode = ppc_inst_primary_opcode(instr);
1224 	switch (opcode) {
1225 	case 16:	/* bc */
1226 		op->type = BRANCH;
1227 		imm = (signed short)(word & 0xfffc);
1228 		if ((word & 2) == 0)
1229 			imm += regs->nip;
1230 		op->val = truncate_if_32bit(regs->msr, imm);
1231 		if (word & 1)
1232 			op->type |= SETLK;
1233 		if (branch_taken(word, regs, op))
1234 			op->type |= BRTAKEN;
1235 		return 1;
1236 #ifdef CONFIG_PPC64
1237 	case 17:	/* sc */
1238 		if ((word & 0xfe2) == 2)
1239 			op->type = SYSCALL;
1240 		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1241 				(word & 0xfe3) == 1)
1242 			op->type = SYSCALL_VECTORED_0;
1243 		else
1244 			op->type = UNKNOWN;
1245 		return 0;
1246 #endif
1247 	case 18:	/* b */
1248 		op->type = BRANCH | BRTAKEN;
1249 		imm = word & 0x03fffffc;
1250 		if (imm & 0x02000000)
1251 			imm -= 0x04000000;
1252 		if ((word & 2) == 0)
1253 			imm += regs->nip;
1254 		op->val = truncate_if_32bit(regs->msr, imm);
1255 		if (word & 1)
1256 			op->type |= SETLK;
1257 		return 1;
1258 	case 19:
1259 		switch ((word >> 1) & 0x3ff) {
1260 		case 0:		/* mcrf */
1261 			op->type = COMPUTE + SETCC;
1262 			rd = 7 - ((word >> 23) & 0x7);
1263 			ra = 7 - ((word >> 18) & 0x7);
1264 			rd *= 4;
1265 			ra *= 4;
1266 			val = (regs->ccr >> ra) & 0xf;
1267 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1268 			return 1;
1269 
1270 		case 16:	/* bclr */
1271 		case 528:	/* bcctr */
1272 			op->type = BRANCH;
1273 			imm = (word & 0x400)? regs->ctr: regs->link;
1274 			op->val = truncate_if_32bit(regs->msr, imm);
1275 			if (word & 1)
1276 				op->type |= SETLK;
1277 			if (branch_taken(word, regs, op))
1278 				op->type |= BRTAKEN;
1279 			return 1;
1280 
1281 		case 18:	/* rfid, scary */
1282 			if (regs->msr & MSR_PR)
1283 				goto priv;
1284 			op->type = RFI;
1285 			return 0;
1286 
1287 		case 150:	/* isync */
1288 			op->type = BARRIER | BARRIER_ISYNC;
1289 			return 1;
1290 
1291 		case 33:	/* crnor */
1292 		case 129:	/* crandc */
1293 		case 193:	/* crxor */
1294 		case 225:	/* crnand */
1295 		case 257:	/* crand */
1296 		case 289:	/* creqv */
1297 		case 417:	/* crorc */
1298 		case 449:	/* cror */
1299 			op->type = COMPUTE + SETCC;
1300 			ra = (word >> 16) & 0x1f;
1301 			rb = (word >> 11) & 0x1f;
1302 			rd = (word >> 21) & 0x1f;
1303 			ra = (regs->ccr >> (31 - ra)) & 1;
1304 			rb = (regs->ccr >> (31 - rb)) & 1;
1305 			val = (word >> (6 + ra * 2 + rb)) & 1;
1306 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1307 				(val << (31 - rd));
1308 			return 1;
1309 		}
1310 		break;
1311 	case 31:
1312 		switch ((word >> 1) & 0x3ff) {
1313 		case 598:	/* sync */
1314 			op->type = BARRIER + BARRIER_SYNC;
1315 #ifdef __powerpc64__
1316 			switch ((word >> 21) & 3) {
1317 			case 1:		/* lwsync */
1318 				op->type = BARRIER + BARRIER_LWSYNC;
1319 				break;
1320 			case 2:		/* ptesync */
1321 				op->type = BARRIER + BARRIER_PTESYNC;
1322 				break;
1323 			}
1324 #endif
1325 			return 1;
1326 
1327 		case 854:	/* eieio */
1328 			op->type = BARRIER + BARRIER_EIEIO;
1329 			return 1;
1330 		}
1331 		break;
1332 	}
1333 
1334 	/* Following cases refer to regs->gpr[], so we need all regs */
1335 	if (!FULL_REGS(regs))
1336 		return -1;
1337 
1338 	rd = (word >> 21) & 0x1f;
1339 	ra = (word >> 16) & 0x1f;
1340 	rb = (word >> 11) & 0x1f;
1341 	rc = (word >> 6) & 0x1f;
1342 
1343 	switch (opcode) {
1344 #ifdef __powerpc64__
1345 	case 1:
1346 		prefix_r = GET_PREFIX_R(word);
1347 		ra = GET_PREFIX_RA(suffix);
1348 		rd = (suffix >> 21) & 0x1f;
1349 		op->reg = rd;
1350 		op->val = regs->gpr[rd];
1351 		suffixopcode = get_op(suffix);
1352 		prefixtype = (word >> 24) & 0x3;
1353 		switch (prefixtype) {
1354 		case 2:
1355 			if (prefix_r && ra)
1356 				return 0;
1357 			switch (suffixopcode) {
1358 			case 14:	/* paddi */
1359 				op->type = COMPUTE | PREFIXED;
1360 				op->val = mlsd_8lsd_ea(word, suffix, regs);
1361 				goto compute_done;
1362 			}
1363 		}
1364 		break;
1365 	case 2:		/* tdi */
1366 		if (rd & trap_compare(regs->gpr[ra], (short) word))
1367 			goto trap;
1368 		return 1;
1369 #endif
1370 	case 3:		/* twi */
1371 		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1372 			goto trap;
1373 		return 1;
1374 
1375 #ifdef __powerpc64__
1376 	case 4:
1377 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1378 			return -1;
1379 
1380 		switch (word & 0x3f) {
1381 		case 48:	/* maddhd */
1382 			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1383 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1384 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1385 			goto compute_done;
1386 
1387 		case 49:	/* maddhdu */
1388 			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1389 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1390 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1391 			goto compute_done;
1392 
1393 		case 51:	/* maddld */
1394 			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1395 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1396 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1397 			goto compute_done;
1398 		}
1399 
1400 		/*
1401 		 * There are other instructions from ISA 3.0 with the same
1402 		 * primary opcode which do not have emulation support yet.
1403 		 */
1404 		return -1;
1405 #endif
1406 
1407 	case 7:		/* mulli */
1408 		op->val = regs->gpr[ra] * (short) word;
1409 		goto compute_done;
1410 
1411 	case 8:		/* subfic */
1412 		imm = (short) word;
1413 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1414 		return 1;
1415 
1416 	case 10:	/* cmpli */
1417 		imm = (unsigned short) word;
1418 		val = regs->gpr[ra];
1419 #ifdef __powerpc64__
1420 		if ((rd & 1) == 0)
1421 			val = (unsigned int) val;
1422 #endif
1423 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1424 		return 1;
1425 
1426 	case 11:	/* cmpi */
1427 		imm = (short) word;
1428 		val = regs->gpr[ra];
1429 #ifdef __powerpc64__
1430 		if ((rd & 1) == 0)
1431 			val = (int) val;
1432 #endif
1433 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1434 		return 1;
1435 
1436 	case 12:	/* addic */
1437 		imm = (short) word;
1438 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1439 		return 1;
1440 
1441 	case 13:	/* addic. */
1442 		imm = (short) word;
1443 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1444 		set_cr0(regs, op);
1445 		return 1;
1446 
1447 	case 14:	/* addi */
1448 		imm = (short) word;
1449 		if (ra)
1450 			imm += regs->gpr[ra];
1451 		op->val = imm;
1452 		goto compute_done;
1453 
1454 	case 15:	/* addis */
1455 		imm = ((short) word) << 16;
1456 		if (ra)
1457 			imm += regs->gpr[ra];
1458 		op->val = imm;
1459 		goto compute_done;
1460 
1461 	case 19:
1462 		if (((word >> 1) & 0x1f) == 2) {
1463 			/* addpcis */
1464 			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1465 			imm |= (word >> 15) & 0x3e;	/* d1 field */
1466 			op->val = regs->nip + (imm << 16) + 4;
1467 			goto compute_done;
1468 		}
1469 		op->type = UNKNOWN;
1470 		return 0;
1471 
1472 	case 20:	/* rlwimi */
1473 		mb = (word >> 6) & 0x1f;
1474 		me = (word >> 1) & 0x1f;
1475 		val = DATA32(regs->gpr[rd]);
1476 		imm = MASK32(mb, me);
1477 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1478 		goto logical_done;
1479 
1480 	case 21:	/* rlwinm */
1481 		mb = (word >> 6) & 0x1f;
1482 		me = (word >> 1) & 0x1f;
1483 		val = DATA32(regs->gpr[rd]);
1484 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1485 		goto logical_done;
1486 
1487 	case 23:	/* rlwnm */
1488 		mb = (word >> 6) & 0x1f;
1489 		me = (word >> 1) & 0x1f;
1490 		rb = regs->gpr[rb] & 0x1f;
1491 		val = DATA32(regs->gpr[rd]);
1492 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1493 		goto logical_done;
1494 
1495 	case 24:	/* ori */
1496 		op->val = regs->gpr[rd] | (unsigned short) word;
1497 		goto logical_done_nocc;
1498 
1499 	case 25:	/* oris */
1500 		imm = (unsigned short) word;
1501 		op->val = regs->gpr[rd] | (imm << 16);
1502 		goto logical_done_nocc;
1503 
1504 	case 26:	/* xori */
1505 		op->val = regs->gpr[rd] ^ (unsigned short) word;
1506 		goto logical_done_nocc;
1507 
1508 	case 27:	/* xoris */
1509 		imm = (unsigned short) word;
1510 		op->val = regs->gpr[rd] ^ (imm << 16);
1511 		goto logical_done_nocc;
1512 
1513 	case 28:	/* andi. */
1514 		op->val = regs->gpr[rd] & (unsigned short) word;
1515 		set_cr0(regs, op);
1516 		goto logical_done_nocc;
1517 
1518 	case 29:	/* andis. */
1519 		imm = (unsigned short) word;
1520 		op->val = regs->gpr[rd] & (imm << 16);
1521 		set_cr0(regs, op);
1522 		goto logical_done_nocc;
1523 
1524 #ifdef __powerpc64__
1525 	case 30:	/* rld* */
1526 		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1527 		val = regs->gpr[rd];
1528 		if ((word & 0x10) == 0) {
1529 			sh = rb | ((word & 2) << 4);
1530 			val = ROTATE(val, sh);
1531 			switch ((word >> 2) & 3) {
1532 			case 0:		/* rldicl */
1533 				val &= MASK64_L(mb);
1534 				break;
1535 			case 1:		/* rldicr */
1536 				val &= MASK64_R(mb);
1537 				break;
1538 			case 2:		/* rldic */
1539 				val &= MASK64(mb, 63 - sh);
1540 				break;
1541 			case 3:		/* rldimi */
1542 				imm = MASK64(mb, 63 - sh);
1543 				val = (regs->gpr[ra] & ~imm) |
1544 					(val & imm);
1545 			}
1546 			op->val = val;
1547 			goto logical_done;
1548 		} else {
1549 			sh = regs->gpr[rb] & 0x3f;
1550 			val = ROTATE(val, sh);
1551 			switch ((word >> 1) & 7) {
1552 			case 0:		/* rldcl */
1553 				op->val = val & MASK64_L(mb);
1554 				goto logical_done;
1555 			case 1:		/* rldcr */
1556 				op->val = val & MASK64_R(mb);
1557 				goto logical_done;
1558 			}
1559 		}
1560 #endif
1561 		op->type = UNKNOWN;	/* illegal instruction */
1562 		return 0;
1563 
1564 	case 31:
1565 		/* isel occupies 32 minor opcodes */
1566 		if (((word >> 1) & 0x1f) == 15) {
1567 			mb = (word >> 6) & 0x1f; /* bc field */
1568 			val = (regs->ccr >> (31 - mb)) & 1;
1569 			val2 = (ra) ? regs->gpr[ra] : 0;
1570 
1571 			op->val = (val) ? val2 : regs->gpr[rb];
1572 			goto compute_done;
1573 		}
1574 
1575 		switch ((word >> 1) & 0x3ff) {
1576 		case 4:		/* tw */
1577 			if (rd == 0x1f ||
1578 			    (rd & trap_compare((int)regs->gpr[ra],
1579 					       (int)regs->gpr[rb])))
1580 				goto trap;
1581 			return 1;
1582 #ifdef __powerpc64__
1583 		case 68:	/* td */
1584 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1585 				goto trap;
1586 			return 1;
1587 #endif
1588 		case 83:	/* mfmsr */
1589 			if (regs->msr & MSR_PR)
1590 				goto priv;
1591 			op->type = MFMSR;
1592 			op->reg = rd;
1593 			return 0;
1594 		case 146:	/* mtmsr */
1595 			if (regs->msr & MSR_PR)
1596 				goto priv;
1597 			op->type = MTMSR;
1598 			op->reg = rd;
1599 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1600 			return 0;
1601 #ifdef CONFIG_PPC64
1602 		case 178:	/* mtmsrd */
1603 			if (regs->msr & MSR_PR)
1604 				goto priv;
1605 			op->type = MTMSR;
1606 			op->reg = rd;
1607 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1608 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1609 			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1610 			op->val = imm;
1611 			return 0;
1612 #endif
1613 
1614 		case 19:	/* mfcr */
1615 			imm = 0xffffffffUL;
1616 			if ((word >> 20) & 1) {
1617 				imm = 0xf0000000UL;
1618 				for (sh = 0; sh < 8; ++sh) {
1619 					if (word & (0x80000 >> sh))
1620 						break;
1621 					imm >>= 4;
1622 				}
1623 			}
1624 			op->val = regs->ccr & imm;
1625 			goto compute_done;
1626 
1627 		case 144:	/* mtcrf */
1628 			op->type = COMPUTE + SETCC;
1629 			imm = 0xf0000000UL;
1630 			val = regs->gpr[rd];
1631 			op->ccval = regs->ccr;
1632 			for (sh = 0; sh < 8; ++sh) {
1633 				if (word & (0x80000 >> sh))
1634 					op->ccval = (op->ccval & ~imm) |
1635 						(val & imm);
1636 				imm >>= 4;
1637 			}
1638 			return 1;
1639 
1640 		case 339:	/* mfspr */
1641 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1642 			op->type = MFSPR;
1643 			op->reg = rd;
1644 			op->spr = spr;
1645 			if (spr == SPRN_XER || spr == SPRN_LR ||
1646 			    spr == SPRN_CTR)
1647 				return 1;
1648 			return 0;
1649 
1650 		case 467:	/* mtspr */
1651 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1652 			op->type = MTSPR;
1653 			op->val = regs->gpr[rd];
1654 			op->spr = spr;
1655 			if (spr == SPRN_XER || spr == SPRN_LR ||
1656 			    spr == SPRN_CTR)
1657 				return 1;
1658 			return 0;
1659 
1660 /*
1661  * Compare instructions
1662  */
1663 		case 0:	/* cmp */
1664 			val = regs->gpr[ra];
1665 			val2 = regs->gpr[rb];
1666 #ifdef __powerpc64__
1667 			if ((rd & 1) == 0) {
1668 				/* word (32-bit) compare */
1669 				val = (int) val;
1670 				val2 = (int) val2;
1671 			}
1672 #endif
1673 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1674 			return 1;
1675 
1676 		case 32:	/* cmpl */
1677 			val = regs->gpr[ra];
1678 			val2 = regs->gpr[rb];
1679 #ifdef __powerpc64__
1680 			if ((rd & 1) == 0) {
1681 				/* word (32-bit) compare */
1682 				val = (unsigned int) val;
1683 				val2 = (unsigned int) val2;
1684 			}
1685 #endif
1686 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1687 			return 1;
1688 
1689 		case 508: /* cmpb */
1690 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1691 			goto logical_done_nocc;
1692 
1693 /*
1694  * Arithmetic instructions
1695  */
1696 		case 8:	/* subfc */
1697 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1698 				       regs->gpr[rb], 1);
1699 			goto arith_done;
1700 #ifdef __powerpc64__
1701 		case 9:	/* mulhdu */
1702 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1703 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1704 			goto arith_done;
1705 #endif
1706 		case 10:	/* addc */
1707 			add_with_carry(regs, op, rd, regs->gpr[ra],
1708 				       regs->gpr[rb], 0);
1709 			goto arith_done;
1710 
1711 		case 11:	/* mulhwu */
1712 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1713 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1714 			goto arith_done;
1715 
1716 		case 40:	/* subf */
1717 			op->val = regs->gpr[rb] - regs->gpr[ra];
1718 			goto arith_done;
1719 #ifdef __powerpc64__
1720 		case 73:	/* mulhd */
1721 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1722 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1723 			goto arith_done;
1724 #endif
1725 		case 75:	/* mulhw */
1726 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1727 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1728 			goto arith_done;
1729 
1730 		case 104:	/* neg */
1731 			op->val = -regs->gpr[ra];
1732 			goto arith_done;
1733 
1734 		case 136:	/* subfe */
1735 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1736 				       regs->gpr[rb], regs->xer & XER_CA);
1737 			goto arith_done;
1738 
1739 		case 138:	/* adde */
1740 			add_with_carry(regs, op, rd, regs->gpr[ra],
1741 				       regs->gpr[rb], regs->xer & XER_CA);
1742 			goto arith_done;
1743 
1744 		case 200:	/* subfze */
1745 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1746 				       regs->xer & XER_CA);
1747 			goto arith_done;
1748 
1749 		case 202:	/* addze */
1750 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1751 				       regs->xer & XER_CA);
1752 			goto arith_done;
1753 
1754 		case 232:	/* subfme */
1755 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1756 				       regs->xer & XER_CA);
1757 			goto arith_done;
1758 #ifdef __powerpc64__
1759 		case 233:	/* mulld */
1760 			op->val = regs->gpr[ra] * regs->gpr[rb];
1761 			goto arith_done;
1762 #endif
1763 		case 234:	/* addme */
1764 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1765 				       regs->xer & XER_CA);
1766 			goto arith_done;
1767 
1768 		case 235:	/* mullw */
1769 			op->val = (long)(int) regs->gpr[ra] *
1770 				(int) regs->gpr[rb];
1771 
1772 			goto arith_done;
1773 #ifdef __powerpc64__
1774 		case 265:	/* modud */
1775 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1776 				return -1;
1777 			op->val = regs->gpr[ra] % regs->gpr[rb];
1778 			goto compute_done;
1779 #endif
1780 		case 266:	/* add */
1781 			op->val = regs->gpr[ra] + regs->gpr[rb];
1782 			goto arith_done;
1783 
1784 		case 267:	/* moduw */
1785 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1786 				return -1;
1787 			op->val = (unsigned int) regs->gpr[ra] %
1788 				(unsigned int) regs->gpr[rb];
1789 			goto compute_done;
1790 #ifdef __powerpc64__
1791 		case 457:	/* divdu */
1792 			op->val = regs->gpr[ra] / regs->gpr[rb];
1793 			goto arith_done;
1794 #endif
1795 		case 459:	/* divwu */
1796 			op->val = (unsigned int) regs->gpr[ra] /
1797 				(unsigned int) regs->gpr[rb];
1798 			goto arith_done;
1799 #ifdef __powerpc64__
1800 		case 489:	/* divd */
1801 			op->val = (long int) regs->gpr[ra] /
1802 				(long int) regs->gpr[rb];
1803 			goto arith_done;
1804 #endif
1805 		case 491:	/* divw */
1806 			op->val = (int) regs->gpr[ra] /
1807 				(int) regs->gpr[rb];
1808 			goto arith_done;
1809 #ifdef __powerpc64__
1810 		case 425:	/* divde[.] */
1811 			asm volatile(PPC_DIVDE(%0, %1, %2) :
1812 				"=r" (op->val) : "r" (regs->gpr[ra]),
1813 				"r" (regs->gpr[rb]));
1814 			goto arith_done;
1815 		case 393:	/* divdeu[.] */
1816 			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1817 				"=r" (op->val) : "r" (regs->gpr[ra]),
1818 				"r" (regs->gpr[rb]));
1819 			goto arith_done;
1820 #endif
1821 		case 755:	/* darn */
1822 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1823 				return -1;
1824 			switch (ra & 0x3) {
1825 			case 0:
1826 				/* 32-bit conditioned */
1827 				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
1828 				goto compute_done;
1829 
1830 			case 1:
1831 				/* 64-bit conditioned */
1832 				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
1833 				goto compute_done;
1834 
1835 			case 2:
1836 				/* 64-bit raw */
1837 				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
1838 				goto compute_done;
1839 			}
1840 
1841 			return -1;
1842 #ifdef __powerpc64__
1843 		case 777:	/* modsd */
1844 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1845 				return -1;
1846 			op->val = (long int) regs->gpr[ra] %
1847 				(long int) regs->gpr[rb];
1848 			goto compute_done;
1849 #endif
1850 		case 779:	/* modsw */
1851 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1852 				return -1;
1853 			op->val = (int) regs->gpr[ra] %
1854 				(int) regs->gpr[rb];
1855 			goto compute_done;
1856 
1857 
1858 /*
1859  * Logical instructions
1860  */
1861 		case 26:	/* cntlzw */
1862 			val = (unsigned int) regs->gpr[rd];
1863 			op->val = ( val ? __builtin_clz(val) : 32 );
1864 			goto logical_done;
1865 #ifdef __powerpc64__
1866 		case 58:	/* cntlzd */
1867 			val = regs->gpr[rd];
1868 			op->val = ( val ? __builtin_clzl(val) : 64 );
1869 			goto logical_done;
1870 #endif
1871 		case 28:	/* and */
1872 			op->val = regs->gpr[rd] & regs->gpr[rb];
1873 			goto logical_done;
1874 
1875 		case 60:	/* andc */
1876 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1877 			goto logical_done;
1878 
1879 		case 122:	/* popcntb */
1880 			do_popcnt(regs, op, regs->gpr[rd], 8);
1881 			goto logical_done_nocc;
1882 
1883 		case 124:	/* nor */
1884 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1885 			goto logical_done;
1886 
1887 		case 154:	/* prtyw */
1888 			do_prty(regs, op, regs->gpr[rd], 32);
1889 			goto logical_done_nocc;
1890 
1891 		case 186:	/* prtyd */
1892 			do_prty(regs, op, regs->gpr[rd], 64);
1893 			goto logical_done_nocc;
1894 #ifdef CONFIG_PPC64
1895 		case 252:	/* bpermd */
1896 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1897 			goto logical_done_nocc;
1898 #endif
1899 		case 284:	/* xor */
1900 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1901 			goto logical_done;
1902 
1903 		case 316:	/* xor */
1904 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
1905 			goto logical_done;
1906 
1907 		case 378:	/* popcntw */
1908 			do_popcnt(regs, op, regs->gpr[rd], 32);
1909 			goto logical_done_nocc;
1910 
1911 		case 412:	/* orc */
1912 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
1913 			goto logical_done;
1914 
1915 		case 444:	/* or */
1916 			op->val = regs->gpr[rd] | regs->gpr[rb];
1917 			goto logical_done;
1918 
1919 		case 476:	/* nand */
1920 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1921 			goto logical_done;
1922 #ifdef CONFIG_PPC64
1923 		case 506:	/* popcntd */
1924 			do_popcnt(regs, op, regs->gpr[rd], 64);
1925 			goto logical_done_nocc;
1926 #endif
1927 		case 538:	/* cnttzw */
1928 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1929 				return -1;
1930 			val = (unsigned int) regs->gpr[rd];
1931 			op->val = (val ? __builtin_ctz(val) : 32);
1932 			goto logical_done;
1933 #ifdef __powerpc64__
1934 		case 570:	/* cnttzd */
1935 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1936 				return -1;
1937 			val = regs->gpr[rd];
1938 			op->val = (val ? __builtin_ctzl(val) : 64);
1939 			goto logical_done;
1940 #endif
1941 		case 922:	/* extsh */
1942 			op->val = (signed short) regs->gpr[rd];
1943 			goto logical_done;
1944 
1945 		case 954:	/* extsb */
1946 			op->val = (signed char) regs->gpr[rd];
1947 			goto logical_done;
1948 #ifdef __powerpc64__
1949 		case 986:	/* extsw */
1950 			op->val = (signed int) regs->gpr[rd];
1951 			goto logical_done;
1952 #endif
1953 
1954 /*
1955  * Shift instructions
1956  */
1957 		case 24:	/* slw */
1958 			sh = regs->gpr[rb] & 0x3f;
1959 			if (sh < 32)
1960 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1961 			else
1962 				op->val = 0;
1963 			goto logical_done;
1964 
1965 		case 536:	/* srw */
1966 			sh = regs->gpr[rb] & 0x3f;
1967 			if (sh < 32)
1968 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1969 			else
1970 				op->val = 0;
1971 			goto logical_done;
1972 
1973 		case 792:	/* sraw */
1974 			op->type = COMPUTE + SETREG + SETXER;
1975 			sh = regs->gpr[rb] & 0x3f;
1976 			ival = (signed int) regs->gpr[rd];
1977 			op->val = ival >> (sh < 32 ? sh : 31);
1978 			op->xerval = regs->xer;
1979 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1980 				op->xerval |= XER_CA;
1981 			else
1982 				op->xerval &= ~XER_CA;
1983 			set_ca32(op, op->xerval & XER_CA);
1984 			goto logical_done;
1985 
1986 		case 824:	/* srawi */
1987 			op->type = COMPUTE + SETREG + SETXER;
1988 			sh = rb;
1989 			ival = (signed int) regs->gpr[rd];
1990 			op->val = ival >> sh;
1991 			op->xerval = regs->xer;
1992 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1993 				op->xerval |= XER_CA;
1994 			else
1995 				op->xerval &= ~XER_CA;
1996 			set_ca32(op, op->xerval & XER_CA);
1997 			goto logical_done;
1998 
1999 #ifdef __powerpc64__
2000 		case 27:	/* sld */
2001 			sh = regs->gpr[rb] & 0x7f;
2002 			if (sh < 64)
2003 				op->val = regs->gpr[rd] << sh;
2004 			else
2005 				op->val = 0;
2006 			goto logical_done;
2007 
2008 		case 539:	/* srd */
2009 			sh = regs->gpr[rb] & 0x7f;
2010 			if (sh < 64)
2011 				op->val = regs->gpr[rd] >> sh;
2012 			else
2013 				op->val = 0;
2014 			goto logical_done;
2015 
2016 		case 794:	/* srad */
2017 			op->type = COMPUTE + SETREG + SETXER;
2018 			sh = regs->gpr[rb] & 0x7f;
2019 			ival = (signed long int) regs->gpr[rd];
2020 			op->val = ival >> (sh < 64 ? sh : 63);
2021 			op->xerval = regs->xer;
2022 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2023 				op->xerval |= XER_CA;
2024 			else
2025 				op->xerval &= ~XER_CA;
2026 			set_ca32(op, op->xerval & XER_CA);
2027 			goto logical_done;
2028 
2029 		case 826:	/* sradi with sh_5 = 0 */
2030 		case 827:	/* sradi with sh_5 = 1 */
2031 			op->type = COMPUTE + SETREG + SETXER;
2032 			sh = rb | ((word & 2) << 4);
2033 			ival = (signed long int) regs->gpr[rd];
2034 			op->val = ival >> sh;
2035 			op->xerval = regs->xer;
2036 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2037 				op->xerval |= XER_CA;
2038 			else
2039 				op->xerval &= ~XER_CA;
2040 			set_ca32(op, op->xerval & XER_CA);
2041 			goto logical_done;
2042 
2043 		case 890:	/* extswsli with sh_5 = 0 */
2044 		case 891:	/* extswsli with sh_5 = 1 */
2045 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2046 				return -1;
2047 			op->type = COMPUTE + SETREG;
2048 			sh = rb | ((word & 2) << 4);
2049 			val = (signed int) regs->gpr[rd];
2050 			if (sh)
2051 				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2052 			else
2053 				op->val = val;
2054 			goto logical_done;
2055 
2056 #endif /* __powerpc64__ */
2057 
2058 /*
2059  * Cache instructions
2060  */
2061 		case 54:	/* dcbst */
2062 			op->type = MKOP(CACHEOP, DCBST, 0);
2063 			op->ea = xform_ea(word, regs);
2064 			return 0;
2065 
2066 		case 86:	/* dcbf */
2067 			op->type = MKOP(CACHEOP, DCBF, 0);
2068 			op->ea = xform_ea(word, regs);
2069 			return 0;
2070 
2071 		case 246:	/* dcbtst */
2072 			op->type = MKOP(CACHEOP, DCBTST, 0);
2073 			op->ea = xform_ea(word, regs);
2074 			op->reg = rd;
2075 			return 0;
2076 
2077 		case 278:	/* dcbt */
2078 			op->type = MKOP(CACHEOP, DCBTST, 0);
2079 			op->ea = xform_ea(word, regs);
2080 			op->reg = rd;
2081 			return 0;
2082 
2083 		case 982:	/* icbi */
2084 			op->type = MKOP(CACHEOP, ICBI, 0);
2085 			op->ea = xform_ea(word, regs);
2086 			return 0;
2087 
2088 		case 1014:	/* dcbz */
2089 			op->type = MKOP(CACHEOP, DCBZ, 0);
2090 			op->ea = xform_ea(word, regs);
2091 			return 0;
2092 		}
2093 		break;
2094 	}
2095 
2096 /*
2097  * Loads and stores.
2098  */
2099 	op->type = UNKNOWN;
2100 	op->update_reg = ra;
2101 	op->reg = rd;
2102 	op->val = regs->gpr[rd];
2103 	u = (word >> 20) & UPDATE;
2104 	op->vsx_flags = 0;
2105 
2106 	switch (opcode) {
2107 	case 31:
2108 		u = word & UPDATE;
2109 		op->ea = xform_ea(word, regs);
2110 		switch ((word >> 1) & 0x3ff) {
2111 		case 20:	/* lwarx */
2112 			op->type = MKOP(LARX, 0, 4);
2113 			break;
2114 
2115 		case 150:	/* stwcx. */
2116 			op->type = MKOP(STCX, 0, 4);
2117 			break;
2118 
2119 #ifdef __powerpc64__
2120 		case 84:	/* ldarx */
2121 			op->type = MKOP(LARX, 0, 8);
2122 			break;
2123 
2124 		case 214:	/* stdcx. */
2125 			op->type = MKOP(STCX, 0, 8);
2126 			break;
2127 
2128 		case 52:	/* lbarx */
2129 			op->type = MKOP(LARX, 0, 1);
2130 			break;
2131 
2132 		case 694:	/* stbcx. */
2133 			op->type = MKOP(STCX, 0, 1);
2134 			break;
2135 
2136 		case 116:	/* lharx */
2137 			op->type = MKOP(LARX, 0, 2);
2138 			break;
2139 
2140 		case 726:	/* sthcx. */
2141 			op->type = MKOP(STCX, 0, 2);
2142 			break;
2143 
2144 		case 276:	/* lqarx */
2145 			if (!((rd & 1) || rd == ra || rd == rb))
2146 				op->type = MKOP(LARX, 0, 16);
2147 			break;
2148 
2149 		case 182:	/* stqcx. */
2150 			if (!(rd & 1))
2151 				op->type = MKOP(STCX, 0, 16);
2152 			break;
2153 #endif
2154 
2155 		case 23:	/* lwzx */
2156 		case 55:	/* lwzux */
2157 			op->type = MKOP(LOAD, u, 4);
2158 			break;
2159 
2160 		case 87:	/* lbzx */
2161 		case 119:	/* lbzux */
2162 			op->type = MKOP(LOAD, u, 1);
2163 			break;
2164 
2165 #ifdef CONFIG_ALTIVEC
2166 		/*
2167 		 * Note: for the load/store vector element instructions,
2168 		 * bits of the EA say which field of the VMX register to use.
2169 		 */
2170 		case 7:		/* lvebx */
2171 			op->type = MKOP(LOAD_VMX, 0, 1);
2172 			op->element_size = 1;
2173 			break;
2174 
2175 		case 39:	/* lvehx */
2176 			op->type = MKOP(LOAD_VMX, 0, 2);
2177 			op->element_size = 2;
2178 			break;
2179 
2180 		case 71:	/* lvewx */
2181 			op->type = MKOP(LOAD_VMX, 0, 4);
2182 			op->element_size = 4;
2183 			break;
2184 
2185 		case 103:	/* lvx */
2186 		case 359:	/* lvxl */
2187 			op->type = MKOP(LOAD_VMX, 0, 16);
2188 			op->element_size = 16;
2189 			break;
2190 
2191 		case 135:	/* stvebx */
2192 			op->type = MKOP(STORE_VMX, 0, 1);
2193 			op->element_size = 1;
2194 			break;
2195 
2196 		case 167:	/* stvehx */
2197 			op->type = MKOP(STORE_VMX, 0, 2);
2198 			op->element_size = 2;
2199 			break;
2200 
2201 		case 199:	/* stvewx */
2202 			op->type = MKOP(STORE_VMX, 0, 4);
2203 			op->element_size = 4;
2204 			break;
2205 
2206 		case 231:	/* stvx */
2207 		case 487:	/* stvxl */
2208 			op->type = MKOP(STORE_VMX, 0, 16);
2209 			break;
2210 #endif /* CONFIG_ALTIVEC */
2211 
2212 #ifdef __powerpc64__
2213 		case 21:	/* ldx */
2214 		case 53:	/* ldux */
2215 			op->type = MKOP(LOAD, u, 8);
2216 			break;
2217 
2218 		case 149:	/* stdx */
2219 		case 181:	/* stdux */
2220 			op->type = MKOP(STORE, u, 8);
2221 			break;
2222 #endif
2223 
2224 		case 151:	/* stwx */
2225 		case 183:	/* stwux */
2226 			op->type = MKOP(STORE, u, 4);
2227 			break;
2228 
2229 		case 215:	/* stbx */
2230 		case 247:	/* stbux */
2231 			op->type = MKOP(STORE, u, 1);
2232 			break;
2233 
2234 		case 279:	/* lhzx */
2235 		case 311:	/* lhzux */
2236 			op->type = MKOP(LOAD, u, 2);
2237 			break;
2238 
2239 #ifdef __powerpc64__
2240 		case 341:	/* lwax */
2241 		case 373:	/* lwaux */
2242 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2243 			break;
2244 #endif
2245 
2246 		case 343:	/* lhax */
2247 		case 375:	/* lhaux */
2248 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2249 			break;
2250 
2251 		case 407:	/* sthx */
2252 		case 439:	/* sthux */
2253 			op->type = MKOP(STORE, u, 2);
2254 			break;
2255 
2256 #ifdef __powerpc64__
2257 		case 532:	/* ldbrx */
2258 			op->type = MKOP(LOAD, BYTEREV, 8);
2259 			break;
2260 
2261 #endif
2262 		case 533:	/* lswx */
2263 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2264 			break;
2265 
2266 		case 534:	/* lwbrx */
2267 			op->type = MKOP(LOAD, BYTEREV, 4);
2268 			break;
2269 
2270 		case 597:	/* lswi */
2271 			if (rb == 0)
2272 				rb = 32;	/* # bytes to load */
2273 			op->type = MKOP(LOAD_MULTI, 0, rb);
2274 			op->ea = ra ? regs->gpr[ra] : 0;
2275 			break;
2276 
2277 #ifdef CONFIG_PPC_FPU
2278 		case 535:	/* lfsx */
2279 		case 567:	/* lfsux */
2280 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2281 			break;
2282 
2283 		case 599:	/* lfdx */
2284 		case 631:	/* lfdux */
2285 			op->type = MKOP(LOAD_FP, u, 8);
2286 			break;
2287 
2288 		case 663:	/* stfsx */
2289 		case 695:	/* stfsux */
2290 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2291 			break;
2292 
2293 		case 727:	/* stfdx */
2294 		case 759:	/* stfdux */
2295 			op->type = MKOP(STORE_FP, u, 8);
2296 			break;
2297 
2298 #ifdef __powerpc64__
2299 		case 791:	/* lfdpx */
2300 			op->type = MKOP(LOAD_FP, 0, 16);
2301 			break;
2302 
2303 		case 855:	/* lfiwax */
2304 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2305 			break;
2306 
2307 		case 887:	/* lfiwzx */
2308 			op->type = MKOP(LOAD_FP, 0, 4);
2309 			break;
2310 
2311 		case 919:	/* stfdpx */
2312 			op->type = MKOP(STORE_FP, 0, 16);
2313 			break;
2314 
2315 		case 983:	/* stfiwx */
2316 			op->type = MKOP(STORE_FP, 0, 4);
2317 			break;
2318 #endif /* __powerpc64 */
2319 #endif /* CONFIG_PPC_FPU */
2320 
2321 #ifdef __powerpc64__
2322 		case 660:	/* stdbrx */
2323 			op->type = MKOP(STORE, BYTEREV, 8);
2324 			op->val = byterev_8(regs->gpr[rd]);
2325 			break;
2326 
2327 #endif
2328 		case 661:	/* stswx */
2329 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2330 			break;
2331 
2332 		case 662:	/* stwbrx */
2333 			op->type = MKOP(STORE, BYTEREV, 4);
2334 			op->val = byterev_4(regs->gpr[rd]);
2335 			break;
2336 
2337 		case 725:	/* stswi */
2338 			if (rb == 0)
2339 				rb = 32;	/* # bytes to store */
2340 			op->type = MKOP(STORE_MULTI, 0, rb);
2341 			op->ea = ra ? regs->gpr[ra] : 0;
2342 			break;
2343 
2344 		case 790:	/* lhbrx */
2345 			op->type = MKOP(LOAD, BYTEREV, 2);
2346 			break;
2347 
2348 		case 918:	/* sthbrx */
2349 			op->type = MKOP(STORE, BYTEREV, 2);
2350 			op->val = byterev_2(regs->gpr[rd]);
2351 			break;
2352 
2353 #ifdef CONFIG_VSX
2354 		case 12:	/* lxsiwzx */
2355 			op->reg = rd | ((word & 1) << 5);
2356 			op->type = MKOP(LOAD_VSX, 0, 4);
2357 			op->element_size = 8;
2358 			break;
2359 
2360 		case 76:	/* lxsiwax */
2361 			op->reg = rd | ((word & 1) << 5);
2362 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2363 			op->element_size = 8;
2364 			break;
2365 
2366 		case 140:	/* stxsiwx */
2367 			op->reg = rd | ((word & 1) << 5);
2368 			op->type = MKOP(STORE_VSX, 0, 4);
2369 			op->element_size = 8;
2370 			break;
2371 
2372 		case 268:	/* lxvx */
2373 			op->reg = rd | ((word & 1) << 5);
2374 			op->type = MKOP(LOAD_VSX, 0, 16);
2375 			op->element_size = 16;
2376 			op->vsx_flags = VSX_CHECK_VEC;
2377 			break;
2378 
2379 		case 269:	/* lxvl */
2380 		case 301: {	/* lxvll */
2381 			int nb;
2382 			op->reg = rd | ((word & 1) << 5);
2383 			op->ea = ra ? regs->gpr[ra] : 0;
2384 			nb = regs->gpr[rb] & 0xff;
2385 			if (nb > 16)
2386 				nb = 16;
2387 			op->type = MKOP(LOAD_VSX, 0, nb);
2388 			op->element_size = 16;
2389 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2390 				VSX_CHECK_VEC;
2391 			break;
2392 		}
2393 		case 332:	/* lxvdsx */
2394 			op->reg = rd | ((word & 1) << 5);
2395 			op->type = MKOP(LOAD_VSX, 0, 8);
2396 			op->element_size = 8;
2397 			op->vsx_flags = VSX_SPLAT;
2398 			break;
2399 
2400 		case 364:	/* lxvwsx */
2401 			op->reg = rd | ((word & 1) << 5);
2402 			op->type = MKOP(LOAD_VSX, 0, 4);
2403 			op->element_size = 4;
2404 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2405 			break;
2406 
2407 		case 396:	/* stxvx */
2408 			op->reg = rd | ((word & 1) << 5);
2409 			op->type = MKOP(STORE_VSX, 0, 16);
2410 			op->element_size = 16;
2411 			op->vsx_flags = VSX_CHECK_VEC;
2412 			break;
2413 
2414 		case 397:	/* stxvl */
2415 		case 429: {	/* stxvll */
2416 			int nb;
2417 			op->reg = rd | ((word & 1) << 5);
2418 			op->ea = ra ? regs->gpr[ra] : 0;
2419 			nb = regs->gpr[rb] & 0xff;
2420 			if (nb > 16)
2421 				nb = 16;
2422 			op->type = MKOP(STORE_VSX, 0, nb);
2423 			op->element_size = 16;
2424 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2425 				VSX_CHECK_VEC;
2426 			break;
2427 		}
2428 		case 524:	/* lxsspx */
2429 			op->reg = rd | ((word & 1) << 5);
2430 			op->type = MKOP(LOAD_VSX, 0, 4);
2431 			op->element_size = 8;
2432 			op->vsx_flags = VSX_FPCONV;
2433 			break;
2434 
2435 		case 588:	/* lxsdx */
2436 			op->reg = rd | ((word & 1) << 5);
2437 			op->type = MKOP(LOAD_VSX, 0, 8);
2438 			op->element_size = 8;
2439 			break;
2440 
2441 		case 652:	/* stxsspx */
2442 			op->reg = rd | ((word & 1) << 5);
2443 			op->type = MKOP(STORE_VSX, 0, 4);
2444 			op->element_size = 8;
2445 			op->vsx_flags = VSX_FPCONV;
2446 			break;
2447 
2448 		case 716:	/* stxsdx */
2449 			op->reg = rd | ((word & 1) << 5);
2450 			op->type = MKOP(STORE_VSX, 0, 8);
2451 			op->element_size = 8;
2452 			break;
2453 
2454 		case 780:	/* lxvw4x */
2455 			op->reg = rd | ((word & 1) << 5);
2456 			op->type = MKOP(LOAD_VSX, 0, 16);
2457 			op->element_size = 4;
2458 			break;
2459 
2460 		case 781:	/* lxsibzx */
2461 			op->reg = rd | ((word & 1) << 5);
2462 			op->type = MKOP(LOAD_VSX, 0, 1);
2463 			op->element_size = 8;
2464 			op->vsx_flags = VSX_CHECK_VEC;
2465 			break;
2466 
2467 		case 812:	/* lxvh8x */
2468 			op->reg = rd | ((word & 1) << 5);
2469 			op->type = MKOP(LOAD_VSX, 0, 16);
2470 			op->element_size = 2;
2471 			op->vsx_flags = VSX_CHECK_VEC;
2472 			break;
2473 
2474 		case 813:	/* lxsihzx */
2475 			op->reg = rd | ((word & 1) << 5);
2476 			op->type = MKOP(LOAD_VSX, 0, 2);
2477 			op->element_size = 8;
2478 			op->vsx_flags = VSX_CHECK_VEC;
2479 			break;
2480 
2481 		case 844:	/* lxvd2x */
2482 			op->reg = rd | ((word & 1) << 5);
2483 			op->type = MKOP(LOAD_VSX, 0, 16);
2484 			op->element_size = 8;
2485 			break;
2486 
2487 		case 876:	/* lxvb16x */
2488 			op->reg = rd | ((word & 1) << 5);
2489 			op->type = MKOP(LOAD_VSX, 0, 16);
2490 			op->element_size = 1;
2491 			op->vsx_flags = VSX_CHECK_VEC;
2492 			break;
2493 
2494 		case 908:	/* stxvw4x */
2495 			op->reg = rd | ((word & 1) << 5);
2496 			op->type = MKOP(STORE_VSX, 0, 16);
2497 			op->element_size = 4;
2498 			break;
2499 
2500 		case 909:	/* stxsibx */
2501 			op->reg = rd | ((word & 1) << 5);
2502 			op->type = MKOP(STORE_VSX, 0, 1);
2503 			op->element_size = 8;
2504 			op->vsx_flags = VSX_CHECK_VEC;
2505 			break;
2506 
2507 		case 940:	/* stxvh8x */
2508 			op->reg = rd | ((word & 1) << 5);
2509 			op->type = MKOP(STORE_VSX, 0, 16);
2510 			op->element_size = 2;
2511 			op->vsx_flags = VSX_CHECK_VEC;
2512 			break;
2513 
2514 		case 941:	/* stxsihx */
2515 			op->reg = rd | ((word & 1) << 5);
2516 			op->type = MKOP(STORE_VSX, 0, 2);
2517 			op->element_size = 8;
2518 			op->vsx_flags = VSX_CHECK_VEC;
2519 			break;
2520 
2521 		case 972:	/* stxvd2x */
2522 			op->reg = rd | ((word & 1) << 5);
2523 			op->type = MKOP(STORE_VSX, 0, 16);
2524 			op->element_size = 8;
2525 			break;
2526 
2527 		case 1004:	/* stxvb16x */
2528 			op->reg = rd | ((word & 1) << 5);
2529 			op->type = MKOP(STORE_VSX, 0, 16);
2530 			op->element_size = 1;
2531 			op->vsx_flags = VSX_CHECK_VEC;
2532 			break;
2533 
2534 #endif /* CONFIG_VSX */
2535 		}
2536 		break;
2537 
2538 	case 32:	/* lwz */
2539 	case 33:	/* lwzu */
2540 		op->type = MKOP(LOAD, u, 4);
2541 		op->ea = dform_ea(word, regs);
2542 		break;
2543 
2544 	case 34:	/* lbz */
2545 	case 35:	/* lbzu */
2546 		op->type = MKOP(LOAD, u, 1);
2547 		op->ea = dform_ea(word, regs);
2548 		break;
2549 
2550 	case 36:	/* stw */
2551 	case 37:	/* stwu */
2552 		op->type = MKOP(STORE, u, 4);
2553 		op->ea = dform_ea(word, regs);
2554 		break;
2555 
2556 	case 38:	/* stb */
2557 	case 39:	/* stbu */
2558 		op->type = MKOP(STORE, u, 1);
2559 		op->ea = dform_ea(word, regs);
2560 		break;
2561 
2562 	case 40:	/* lhz */
2563 	case 41:	/* lhzu */
2564 		op->type = MKOP(LOAD, u, 2);
2565 		op->ea = dform_ea(word, regs);
2566 		break;
2567 
2568 	case 42:	/* lha */
2569 	case 43:	/* lhau */
2570 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2571 		op->ea = dform_ea(word, regs);
2572 		break;
2573 
2574 	case 44:	/* sth */
2575 	case 45:	/* sthu */
2576 		op->type = MKOP(STORE, u, 2);
2577 		op->ea = dform_ea(word, regs);
2578 		break;
2579 
2580 	case 46:	/* lmw */
2581 		if (ra >= rd)
2582 			break;		/* invalid form, ra in range to load */
2583 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2584 		op->ea = dform_ea(word, regs);
2585 		break;
2586 
2587 	case 47:	/* stmw */
2588 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2589 		op->ea = dform_ea(word, regs);
2590 		break;
2591 
2592 #ifdef CONFIG_PPC_FPU
2593 	case 48:	/* lfs */
2594 	case 49:	/* lfsu */
2595 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2596 		op->ea = dform_ea(word, regs);
2597 		break;
2598 
2599 	case 50:	/* lfd */
2600 	case 51:	/* lfdu */
2601 		op->type = MKOP(LOAD_FP, u, 8);
2602 		op->ea = dform_ea(word, regs);
2603 		break;
2604 
2605 	case 52:	/* stfs */
2606 	case 53:	/* stfsu */
2607 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2608 		op->ea = dform_ea(word, regs);
2609 		break;
2610 
2611 	case 54:	/* stfd */
2612 	case 55:	/* stfdu */
2613 		op->type = MKOP(STORE_FP, u, 8);
2614 		op->ea = dform_ea(word, regs);
2615 		break;
2616 #endif
2617 
2618 #ifdef __powerpc64__
2619 	case 56:	/* lq */
2620 		if (!((rd & 1) || (rd == ra)))
2621 			op->type = MKOP(LOAD, 0, 16);
2622 		op->ea = dqform_ea(word, regs);
2623 		break;
2624 #endif
2625 
2626 #ifdef CONFIG_VSX
2627 	case 57:	/* lfdp, lxsd, lxssp */
2628 		op->ea = dsform_ea(word, regs);
2629 		switch (word & 3) {
2630 		case 0:		/* lfdp */
2631 			if (rd & 1)
2632 				break;		/* reg must be even */
2633 			op->type = MKOP(LOAD_FP, 0, 16);
2634 			break;
2635 		case 2:		/* lxsd */
2636 			op->reg = rd + 32;
2637 			op->type = MKOP(LOAD_VSX, 0, 8);
2638 			op->element_size = 8;
2639 			op->vsx_flags = VSX_CHECK_VEC;
2640 			break;
2641 		case 3:		/* lxssp */
2642 			op->reg = rd + 32;
2643 			op->type = MKOP(LOAD_VSX, 0, 4);
2644 			op->element_size = 8;
2645 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2646 			break;
2647 		}
2648 		break;
2649 #endif /* CONFIG_VSX */
2650 
2651 #ifdef __powerpc64__
2652 	case 58:	/* ld[u], lwa */
2653 		op->ea = dsform_ea(word, regs);
2654 		switch (word & 3) {
2655 		case 0:		/* ld */
2656 			op->type = MKOP(LOAD, 0, 8);
2657 			break;
2658 		case 1:		/* ldu */
2659 			op->type = MKOP(LOAD, UPDATE, 8);
2660 			break;
2661 		case 2:		/* lwa */
2662 			op->type = MKOP(LOAD, SIGNEXT, 4);
2663 			break;
2664 		}
2665 		break;
2666 #endif
2667 
2668 #ifdef CONFIG_VSX
2669 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2670 		switch (word & 7) {
2671 		case 0:		/* stfdp with LSB of DS field = 0 */
2672 		case 4:		/* stfdp with LSB of DS field = 1 */
2673 			op->ea = dsform_ea(word, regs);
2674 			op->type = MKOP(STORE_FP, 0, 16);
2675 			break;
2676 
2677 		case 1:		/* lxv */
2678 			op->ea = dqform_ea(word, regs);
2679 			if (word & 8)
2680 				op->reg = rd + 32;
2681 			op->type = MKOP(LOAD_VSX, 0, 16);
2682 			op->element_size = 16;
2683 			op->vsx_flags = VSX_CHECK_VEC;
2684 			break;
2685 
2686 		case 2:		/* stxsd with LSB of DS field = 0 */
2687 		case 6:		/* stxsd with LSB of DS field = 1 */
2688 			op->ea = dsform_ea(word, regs);
2689 			op->reg = rd + 32;
2690 			op->type = MKOP(STORE_VSX, 0, 8);
2691 			op->element_size = 8;
2692 			op->vsx_flags = VSX_CHECK_VEC;
2693 			break;
2694 
2695 		case 3:		/* stxssp with LSB of DS field = 0 */
2696 		case 7:		/* stxssp with LSB of DS field = 1 */
2697 			op->ea = dsform_ea(word, regs);
2698 			op->reg = rd + 32;
2699 			op->type = MKOP(STORE_VSX, 0, 4);
2700 			op->element_size = 8;
2701 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2702 			break;
2703 
2704 		case 5:		/* stxv */
2705 			op->ea = dqform_ea(word, regs);
2706 			if (word & 8)
2707 				op->reg = rd + 32;
2708 			op->type = MKOP(STORE_VSX, 0, 16);
2709 			op->element_size = 16;
2710 			op->vsx_flags = VSX_CHECK_VEC;
2711 			break;
2712 		}
2713 		break;
2714 #endif /* CONFIG_VSX */
2715 
2716 #ifdef __powerpc64__
2717 	case 62:	/* std[u] */
2718 		op->ea = dsform_ea(word, regs);
2719 		switch (word & 3) {
2720 		case 0:		/* std */
2721 			op->type = MKOP(STORE, 0, 8);
2722 			break;
2723 		case 1:		/* stdu */
2724 			op->type = MKOP(STORE, UPDATE, 8);
2725 			break;
2726 		case 2:		/* stq */
2727 			if (!(rd & 1))
2728 				op->type = MKOP(STORE, 0, 16);
2729 			break;
2730 		}
2731 		break;
2732 	case 1: /* Prefixed instructions */
2733 		prefix_r = GET_PREFIX_R(word);
2734 		ra = GET_PREFIX_RA(suffix);
2735 		op->update_reg = ra;
2736 		rd = (suffix >> 21) & 0x1f;
2737 		op->reg = rd;
2738 		op->val = regs->gpr[rd];
2739 
2740 		suffixopcode = get_op(suffix);
2741 		prefixtype = (word >> 24) & 0x3;
2742 		switch (prefixtype) {
2743 		case 0: /* Type 00  Eight-Byte Load/Store */
2744 			if (prefix_r && ra)
2745 				break;
2746 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2747 			switch (suffixopcode) {
2748 			case 41:	/* plwa */
2749 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
2750 				break;
2751 			case 42:        /* plxsd */
2752 				op->reg = rd + 32;
2753 				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
2754 				op->element_size = 8;
2755 				op->vsx_flags = VSX_CHECK_VEC;
2756 				break;
2757 			case 43:	/* plxssp */
2758 				op->reg = rd + 32;
2759 				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
2760 				op->element_size = 8;
2761 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2762 				break;
2763 			case 46:	/* pstxsd */
2764 				op->reg = rd + 32;
2765 				op->type = MKOP(STORE_VSX, PREFIXED, 8);
2766 				op->element_size = 8;
2767 				op->vsx_flags = VSX_CHECK_VEC;
2768 				break;
2769 			case 47:	/* pstxssp */
2770 				op->reg = rd + 32;
2771 				op->type = MKOP(STORE_VSX, PREFIXED, 4);
2772 				op->element_size = 8;
2773 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2774 				break;
2775 			case 51:	/* plxv1 */
2776 				op->reg += 32;
2777 				fallthrough;
2778 			case 50:	/* plxv0 */
2779 				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
2780 				op->element_size = 16;
2781 				op->vsx_flags = VSX_CHECK_VEC;
2782 				break;
2783 			case 55:	/* pstxv1 */
2784 				op->reg = rd + 32;
2785 				fallthrough;
2786 			case 54:	/* pstxv0 */
2787 				op->type = MKOP(STORE_VSX, PREFIXED, 16);
2788 				op->element_size = 16;
2789 				op->vsx_flags = VSX_CHECK_VEC;
2790 				break;
2791 			case 56:        /* plq */
2792 				op->type = MKOP(LOAD, PREFIXED, 16);
2793 				break;
2794 			case 57:	/* pld */
2795 				op->type = MKOP(LOAD, PREFIXED, 8);
2796 				break;
2797 			case 60:        /* stq */
2798 				op->type = MKOP(STORE, PREFIXED, 16);
2799 				break;
2800 			case 61:	/* pstd */
2801 				op->type = MKOP(STORE, PREFIXED, 8);
2802 				break;
2803 			}
2804 			break;
2805 		case 1: /* Type 01 Eight-Byte Register-to-Register */
2806 			break;
2807 		case 2: /* Type 10 Modified Load/Store */
2808 			if (prefix_r && ra)
2809 				break;
2810 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2811 			switch (suffixopcode) {
2812 			case 32:	/* plwz */
2813 				op->type = MKOP(LOAD, PREFIXED, 4);
2814 				break;
2815 			case 34:	/* plbz */
2816 				op->type = MKOP(LOAD, PREFIXED, 1);
2817 				break;
2818 			case 36:	/* pstw */
2819 				op->type = MKOP(STORE, PREFIXED, 4);
2820 				break;
2821 			case 38:	/* pstb */
2822 				op->type = MKOP(STORE, PREFIXED, 1);
2823 				break;
2824 			case 40:	/* plhz */
2825 				op->type = MKOP(LOAD, PREFIXED, 2);
2826 				break;
2827 			case 42:	/* plha */
2828 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
2829 				break;
2830 			case 44:	/* psth */
2831 				op->type = MKOP(STORE, PREFIXED, 2);
2832 				break;
2833 			case 48:        /* plfs */
2834 				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
2835 				break;
2836 			case 50:        /* plfd */
2837 				op->type = MKOP(LOAD_FP, PREFIXED, 8);
2838 				break;
2839 			case 52:        /* pstfs */
2840 				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
2841 				break;
2842 			case 54:        /* pstfd */
2843 				op->type = MKOP(STORE_FP, PREFIXED, 8);
2844 				break;
2845 			}
2846 			break;
2847 		case 3: /* Type 11 Modified Register-to-Register */
2848 			break;
2849 		}
2850 #endif /* __powerpc64__ */
2851 
2852 	}
2853 
2854 #ifdef CONFIG_VSX
2855 	if ((GETTYPE(op->type) == LOAD_VSX ||
2856 	     GETTYPE(op->type) == STORE_VSX) &&
2857 	    !cpu_has_feature(CPU_FTR_VSX)) {
2858 		return -1;
2859 	}
2860 #endif /* CONFIG_VSX */
2861 
2862 	return 0;
2863 
2864  logical_done:
2865 	if (word & 1)
2866 		set_cr0(regs, op);
2867  logical_done_nocc:
2868 	op->reg = ra;
2869 	op->type |= SETREG;
2870 	return 1;
2871 
2872  arith_done:
2873 	if (word & 1)
2874 		set_cr0(regs, op);
2875  compute_done:
2876 	op->reg = rd;
2877 	op->type |= SETREG;
2878 	return 1;
2879 
2880  priv:
2881 	op->type = INTERRUPT | 0x700;
2882 	op->val = SRR1_PROGPRIV;
2883 	return 0;
2884 
2885  trap:
2886 	op->type = INTERRUPT | 0x700;
2887 	op->val = SRR1_PROGTRAP;
2888 	return 0;
2889 }
2890 EXPORT_SYMBOL_GPL(analyse_instr);
2891 NOKPROBE_SYMBOL(analyse_instr);
2892 
2893 /*
2894  * For PPC32 we always use stwu with r1 to change the stack pointer.
2895  * So this emulated store may corrupt the exception frame, now we
2896  * have to provide the exception frame trampoline, which is pushed
2897  * below the kprobed function stack. So we only update gpr[1] but
2898  * don't emulate the real store operation. We will do real store
2899  * operation safely in exception return code by checking this flag.
2900  */
2901 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2902 {
2903 #ifdef CONFIG_PPC32
2904 	/*
2905 	 * Check if we will touch kernel stack overflow
2906 	 */
2907 	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2908 		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2909 		return -EINVAL;
2910 	}
2911 #endif /* CONFIG_PPC32 */
2912 	/*
2913 	 * Check if we already set since that means we'll
2914 	 * lose the previous value.
2915 	 */
2916 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2917 	set_thread_flag(TIF_EMULATE_STACK_STORE);
2918 	return 0;
2919 }
2920 
2921 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2922 {
2923 	switch (size) {
2924 	case 2:
2925 		*valp = (signed short) *valp;
2926 		break;
2927 	case 4:
2928 		*valp = (signed int) *valp;
2929 		break;
2930 	}
2931 }
2932 
2933 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2934 {
2935 	switch (size) {
2936 	case 2:
2937 		*valp = byterev_2(*valp);
2938 		break;
2939 	case 4:
2940 		*valp = byterev_4(*valp);
2941 		break;
2942 #ifdef __powerpc64__
2943 	case 8:
2944 		*valp = byterev_8(*valp);
2945 		break;
2946 #endif
2947 	}
2948 }
2949 
2950 /*
2951  * Emulate an instruction that can be executed just by updating
2952  * fields in *regs.
2953  */
2954 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2955 {
2956 	unsigned long next_pc;
2957 
2958 	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
2959 	switch (GETTYPE(op->type)) {
2960 	case COMPUTE:
2961 		if (op->type & SETREG)
2962 			regs->gpr[op->reg] = op->val;
2963 		if (op->type & SETCC)
2964 			regs->ccr = op->ccval;
2965 		if (op->type & SETXER)
2966 			regs->xer = op->xerval;
2967 		break;
2968 
2969 	case BRANCH:
2970 		if (op->type & SETLK)
2971 			regs->link = next_pc;
2972 		if (op->type & BRTAKEN)
2973 			next_pc = op->val;
2974 		if (op->type & DECCTR)
2975 			--regs->ctr;
2976 		break;
2977 
2978 	case BARRIER:
2979 		switch (op->type & BARRIER_MASK) {
2980 		case BARRIER_SYNC:
2981 			mb();
2982 			break;
2983 		case BARRIER_ISYNC:
2984 			isync();
2985 			break;
2986 		case BARRIER_EIEIO:
2987 			eieio();
2988 			break;
2989 		case BARRIER_LWSYNC:
2990 			asm volatile("lwsync" : : : "memory");
2991 			break;
2992 		case BARRIER_PTESYNC:
2993 			asm volatile("ptesync" : : : "memory");
2994 			break;
2995 		}
2996 		break;
2997 
2998 	case MFSPR:
2999 		switch (op->spr) {
3000 		case SPRN_XER:
3001 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3002 			break;
3003 		case SPRN_LR:
3004 			regs->gpr[op->reg] = regs->link;
3005 			break;
3006 		case SPRN_CTR:
3007 			regs->gpr[op->reg] = regs->ctr;
3008 			break;
3009 		default:
3010 			WARN_ON_ONCE(1);
3011 		}
3012 		break;
3013 
3014 	case MTSPR:
3015 		switch (op->spr) {
3016 		case SPRN_XER:
3017 			regs->xer = op->val & 0xffffffffUL;
3018 			break;
3019 		case SPRN_LR:
3020 			regs->link = op->val;
3021 			break;
3022 		case SPRN_CTR:
3023 			regs->ctr = op->val;
3024 			break;
3025 		default:
3026 			WARN_ON_ONCE(1);
3027 		}
3028 		break;
3029 
3030 	default:
3031 		WARN_ON_ONCE(1);
3032 	}
3033 	regs->nip = next_pc;
3034 }
3035 NOKPROBE_SYMBOL(emulate_update_regs);
3036 
3037 /*
3038  * Emulate a previously-analysed load or store instruction.
3039  * Return values are:
3040  * 0 = instruction emulated successfully
3041  * -EFAULT = address out of range or access faulted (regs->dar
3042  *	     contains the faulting address)
3043  * -EACCES = misaligned access, instruction requires alignment
3044  * -EINVAL = unknown operation in *op
3045  */
3046 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3047 {
3048 	int err, size, type;
3049 	int i, rd, nb;
3050 	unsigned int cr;
3051 	unsigned long val;
3052 	unsigned long ea;
3053 	bool cross_endian;
3054 
3055 	err = 0;
3056 	size = GETSIZE(op->type);
3057 	type = GETTYPE(op->type);
3058 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3059 	ea = truncate_if_32bit(regs->msr, op->ea);
3060 
3061 	switch (type) {
3062 	case LARX:
3063 		if (ea & (size - 1))
3064 			return -EACCES;		/* can't handle misaligned */
3065 		if (!address_ok(regs, ea, size))
3066 			return -EFAULT;
3067 		err = 0;
3068 		val = 0;
3069 		switch (size) {
3070 #ifdef __powerpc64__
3071 		case 1:
3072 			__get_user_asmx(val, ea, err, "lbarx");
3073 			break;
3074 		case 2:
3075 			__get_user_asmx(val, ea, err, "lharx");
3076 			break;
3077 #endif
3078 		case 4:
3079 			__get_user_asmx(val, ea, err, "lwarx");
3080 			break;
3081 #ifdef __powerpc64__
3082 		case 8:
3083 			__get_user_asmx(val, ea, err, "ldarx");
3084 			break;
3085 		case 16:
3086 			err = do_lqarx(ea, &regs->gpr[op->reg]);
3087 			break;
3088 #endif
3089 		default:
3090 			return -EINVAL;
3091 		}
3092 		if (err) {
3093 			regs->dar = ea;
3094 			break;
3095 		}
3096 		if (size < 16)
3097 			regs->gpr[op->reg] = val;
3098 		break;
3099 
3100 	case STCX:
3101 		if (ea & (size - 1))
3102 			return -EACCES;		/* can't handle misaligned */
3103 		if (!address_ok(regs, ea, size))
3104 			return -EFAULT;
3105 		err = 0;
3106 		switch (size) {
3107 #ifdef __powerpc64__
3108 		case 1:
3109 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3110 			break;
3111 		case 2:
3112 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3113 			break;
3114 #endif
3115 		case 4:
3116 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3117 			break;
3118 #ifdef __powerpc64__
3119 		case 8:
3120 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3121 			break;
3122 		case 16:
3123 			err = do_stqcx(ea, regs->gpr[op->reg],
3124 				       regs->gpr[op->reg + 1], &cr);
3125 			break;
3126 #endif
3127 		default:
3128 			return -EINVAL;
3129 		}
3130 		if (!err)
3131 			regs->ccr = (regs->ccr & 0x0fffffff) |
3132 				(cr & 0xe0000000) |
3133 				((regs->xer >> 3) & 0x10000000);
3134 		else
3135 			regs->dar = ea;
3136 		break;
3137 
3138 	case LOAD:
3139 #ifdef __powerpc64__
3140 		if (size == 16) {
3141 			err = emulate_lq(regs, ea, op->reg, cross_endian);
3142 			break;
3143 		}
3144 #endif
3145 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3146 		if (!err) {
3147 			if (op->type & SIGNEXT)
3148 				do_signext(&regs->gpr[op->reg], size);
3149 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3150 				do_byterev(&regs->gpr[op->reg], size);
3151 		}
3152 		break;
3153 
3154 #ifdef CONFIG_PPC_FPU
3155 	case LOAD_FP:
3156 		/*
3157 		 * If the instruction is in userspace, we can emulate it even
3158 		 * if the VMX state is not live, because we have the state
3159 		 * stored in the thread_struct.  If the instruction is in
3160 		 * the kernel, we must not touch the state in the thread_struct.
3161 		 */
3162 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3163 			return 0;
3164 		err = do_fp_load(op, ea, regs, cross_endian);
3165 		break;
3166 #endif
3167 #ifdef CONFIG_ALTIVEC
3168 	case LOAD_VMX:
3169 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3170 			return 0;
3171 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3172 		break;
3173 #endif
3174 #ifdef CONFIG_VSX
3175 	case LOAD_VSX: {
3176 		unsigned long msrbit = MSR_VSX;
3177 
3178 		/*
3179 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3180 		 * when the target of the instruction is a vector register.
3181 		 */
3182 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3183 			msrbit = MSR_VEC;
3184 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3185 			return 0;
3186 		err = do_vsx_load(op, ea, regs, cross_endian);
3187 		break;
3188 	}
3189 #endif
3190 	case LOAD_MULTI:
3191 		if (!address_ok(regs, ea, size))
3192 			return -EFAULT;
3193 		rd = op->reg;
3194 		for (i = 0; i < size; i += 4) {
3195 			unsigned int v32 = 0;
3196 
3197 			nb = size - i;
3198 			if (nb > 4)
3199 				nb = 4;
3200 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3201 			if (err)
3202 				break;
3203 			if (unlikely(cross_endian))
3204 				v32 = byterev_4(v32);
3205 			regs->gpr[rd] = v32;
3206 			ea += 4;
3207 			/* reg number wraps from 31 to 0 for lsw[ix] */
3208 			rd = (rd + 1) & 0x1f;
3209 		}
3210 		break;
3211 
3212 	case STORE:
3213 #ifdef __powerpc64__
3214 		if (size == 16) {
3215 			err = emulate_stq(regs, ea, op->reg, cross_endian);
3216 			break;
3217 		}
3218 #endif
3219 		if ((op->type & UPDATE) && size == sizeof(long) &&
3220 		    op->reg == 1 && op->update_reg == 1 &&
3221 		    !(regs->msr & MSR_PR) &&
3222 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3223 			err = handle_stack_update(ea, regs);
3224 			break;
3225 		}
3226 		if (unlikely(cross_endian))
3227 			do_byterev(&op->val, size);
3228 		err = write_mem(op->val, ea, size, regs);
3229 		break;
3230 
3231 #ifdef CONFIG_PPC_FPU
3232 	case STORE_FP:
3233 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3234 			return 0;
3235 		err = do_fp_store(op, ea, regs, cross_endian);
3236 		break;
3237 #endif
3238 #ifdef CONFIG_ALTIVEC
3239 	case STORE_VMX:
3240 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3241 			return 0;
3242 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3243 		break;
3244 #endif
3245 #ifdef CONFIG_VSX
3246 	case STORE_VSX: {
3247 		unsigned long msrbit = MSR_VSX;
3248 
3249 		/*
3250 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3251 		 * when the target of the instruction is a vector register.
3252 		 */
3253 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3254 			msrbit = MSR_VEC;
3255 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3256 			return 0;
3257 		err = do_vsx_store(op, ea, regs, cross_endian);
3258 		break;
3259 	}
3260 #endif
3261 	case STORE_MULTI:
3262 		if (!address_ok(regs, ea, size))
3263 			return -EFAULT;
3264 		rd = op->reg;
3265 		for (i = 0; i < size; i += 4) {
3266 			unsigned int v32 = regs->gpr[rd];
3267 
3268 			nb = size - i;
3269 			if (nb > 4)
3270 				nb = 4;
3271 			if (unlikely(cross_endian))
3272 				v32 = byterev_4(v32);
3273 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3274 			if (err)
3275 				break;
3276 			ea += 4;
3277 			/* reg number wraps from 31 to 0 for stsw[ix] */
3278 			rd = (rd + 1) & 0x1f;
3279 		}
3280 		break;
3281 
3282 	default:
3283 		return -EINVAL;
3284 	}
3285 
3286 	if (err)
3287 		return err;
3288 
3289 	if (op->type & UPDATE)
3290 		regs->gpr[op->update_reg] = op->ea;
3291 
3292 	return 0;
3293 }
3294 NOKPROBE_SYMBOL(emulate_loadstore);
3295 
3296 /*
3297  * Emulate instructions that cause a transfer of control,
3298  * loads and stores, and a few other instructions.
3299  * Returns 1 if the step was emulated, 0 if not,
3300  * or -1 if the instruction is one that should not be stepped,
3301  * such as an rfid, or a mtmsrd that would clear MSR_RI.
3302  */
3303 int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
3304 {
3305 	struct instruction_op op;
3306 	int r, err, type;
3307 	unsigned long val;
3308 	unsigned long ea;
3309 
3310 	r = analyse_instr(&op, regs, instr);
3311 	if (r < 0)
3312 		return r;
3313 	if (r > 0) {
3314 		emulate_update_regs(regs, &op);
3315 		return 1;
3316 	}
3317 
3318 	err = 0;
3319 	type = GETTYPE(op.type);
3320 
3321 	if (OP_IS_LOAD_STORE(type)) {
3322 		err = emulate_loadstore(regs, &op);
3323 		if (err)
3324 			return 0;
3325 		goto instr_done;
3326 	}
3327 
3328 	switch (type) {
3329 	case CACHEOP:
3330 		ea = truncate_if_32bit(regs->msr, op.ea);
3331 		if (!address_ok(regs, ea, 8))
3332 			return 0;
3333 		switch (op.type & CACHEOP_MASK) {
3334 		case DCBST:
3335 			__cacheop_user_asmx(ea, err, "dcbst");
3336 			break;
3337 		case DCBF:
3338 			__cacheop_user_asmx(ea, err, "dcbf");
3339 			break;
3340 		case DCBTST:
3341 			if (op.reg == 0)
3342 				prefetchw((void *) ea);
3343 			break;
3344 		case DCBT:
3345 			if (op.reg == 0)
3346 				prefetch((void *) ea);
3347 			break;
3348 		case ICBI:
3349 			__cacheop_user_asmx(ea, err, "icbi");
3350 			break;
3351 		case DCBZ:
3352 			err = emulate_dcbz(ea, regs);
3353 			break;
3354 		}
3355 		if (err) {
3356 			regs->dar = ea;
3357 			return 0;
3358 		}
3359 		goto instr_done;
3360 
3361 	case MFMSR:
3362 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3363 		goto instr_done;
3364 
3365 	case MTMSR:
3366 		val = regs->gpr[op.reg];
3367 		if ((val & MSR_RI) == 0)
3368 			/* can't step mtmsr[d] that would clear MSR_RI */
3369 			return -1;
3370 		/* here op.val is the mask of bits to change */
3371 		regs->msr = (regs->msr & ~op.val) | (val & op.val);
3372 		goto instr_done;
3373 
3374 #ifdef CONFIG_PPC64
3375 	case SYSCALL:	/* sc */
3376 		/*
3377 		 * N.B. this uses knowledge about how the syscall
3378 		 * entry code works.  If that is changed, this will
3379 		 * need to be changed also.
3380 		 */
3381 		if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
3382 				cpu_has_feature(CPU_FTR_REAL_LE) &&
3383 				regs->gpr[0] == 0x1ebe) {
3384 			regs->msr ^= MSR_LE;
3385 			goto instr_done;
3386 		}
3387 		regs->gpr[9] = regs->gpr[13];
3388 		regs->gpr[10] = MSR_KERNEL;
3389 		regs->gpr[11] = regs->nip + 4;
3390 		regs->gpr[12] = regs->msr & MSR_MASK;
3391 		regs->gpr[13] = (unsigned long) get_paca();
3392 		regs->nip = (unsigned long) &system_call_common;
3393 		regs->msr = MSR_KERNEL;
3394 		return 1;
3395 
3396 #ifdef CONFIG_PPC_BOOK3S_64
3397 	case SYSCALL_VECTORED_0:	/* scv 0 */
3398 		regs->gpr[9] = regs->gpr[13];
3399 		regs->gpr[10] = MSR_KERNEL;
3400 		regs->gpr[11] = regs->nip + 4;
3401 		regs->gpr[12] = regs->msr & MSR_MASK;
3402 		regs->gpr[13] = (unsigned long) get_paca();
3403 		regs->nip = (unsigned long) &system_call_vectored_emulate;
3404 		regs->msr = MSR_KERNEL;
3405 		return 1;
3406 #endif
3407 
3408 	case RFI:
3409 		return -1;
3410 #endif
3411 	}
3412 	return 0;
3413 
3414  instr_done:
3415 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
3416 	return 1;
3417 }
3418 NOKPROBE_SYMBOL(emulate_step);
3419