xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision 3805e6a1)
1 /*
2  * Single-step support.
3  *
4  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
19 
20 extern char system_call_common[];
21 
22 #ifdef CONFIG_PPC64
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK	0xffffffff87c0ffffUL
25 #else
26 #define MSR_MASK	0x87c0ffff
27 #endif
28 
29 /* Bits in XER */
30 #define XER_SO		0x80000000U
31 #define XER_OV		0x40000000U
32 #define XER_CA		0x20000000U
33 
34 #ifdef CONFIG_PPC_FPU
35 /*
36  * Functions in ldstfp.S
37  */
38 extern int do_lfs(int rn, unsigned long ea);
39 extern int do_lfd(int rn, unsigned long ea);
40 extern int do_stfs(int rn, unsigned long ea);
41 extern int do_stfd(int rn, unsigned long ea);
42 extern int do_lvx(int rn, unsigned long ea);
43 extern int do_stvx(int rn, unsigned long ea);
44 extern int do_lxvd2x(int rn, unsigned long ea);
45 extern int do_stxvd2x(int rn, unsigned long ea);
46 #endif
47 
48 /*
49  * Emulate the truncation of 64 bit values in 32-bit mode.
50  */
51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
52 {
53 #ifdef __powerpc64__
54 	if ((msr & MSR_64BIT) == 0)
55 		val &= 0xffffffffUL;
56 #endif
57 	return val;
58 }
59 
60 /*
61  * Determine whether a conditional branch instruction would branch.
62  */
63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
64 {
65 	unsigned int bo = (instr >> 21) & 0x1f;
66 	unsigned int bi;
67 
68 	if ((bo & 4) == 0) {
69 		/* decrement counter */
70 		--regs->ctr;
71 		if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 			return 0;
73 	}
74 	if ((bo & 0x10) == 0) {
75 		/* check bit from CR */
76 		bi = (instr >> 16) & 0x1f;
77 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 			return 0;
79 	}
80 	return 1;
81 }
82 
83 
84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
85 {
86 	if (!user_mode(regs))
87 		return 1;
88 	return __access_ok(ea, nb, USER_DS);
89 }
90 
91 /*
92  * Calculate effective address for a D-form instruction
93  */
94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
95 {
96 	int ra;
97 	unsigned long ea;
98 
99 	ra = (instr >> 16) & 0x1f;
100 	ea = (signed short) instr;		/* sign-extend */
101 	if (ra)
102 		ea += regs->gpr[ra];
103 
104 	return truncate_if_32bit(regs->msr, ea);
105 }
106 
107 #ifdef __powerpc64__
108 /*
109  * Calculate effective address for a DS-form instruction
110  */
111 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
112 {
113 	int ra;
114 	unsigned long ea;
115 
116 	ra = (instr >> 16) & 0x1f;
117 	ea = (signed short) (instr & ~3);	/* sign-extend */
118 	if (ra)
119 		ea += regs->gpr[ra];
120 
121 	return truncate_if_32bit(regs->msr, ea);
122 }
123 #endif /* __powerpc64 */
124 
125 /*
126  * Calculate effective address for an X-form instruction
127  */
128 static unsigned long __kprobes xform_ea(unsigned int instr,
129 					struct pt_regs *regs)
130 {
131 	int ra, rb;
132 	unsigned long ea;
133 
134 	ra = (instr >> 16) & 0x1f;
135 	rb = (instr >> 11) & 0x1f;
136 	ea = regs->gpr[rb];
137 	if (ra)
138 		ea += regs->gpr[ra];
139 
140 	return truncate_if_32bit(regs->msr, ea);
141 }
142 
143 /*
144  * Return the largest power of 2, not greater than sizeof(unsigned long),
145  * such that x is a multiple of it.
146  */
147 static inline unsigned long max_align(unsigned long x)
148 {
149 	x |= sizeof(unsigned long);
150 	return x & -x;		/* isolates rightmost bit */
151 }
152 
153 
154 static inline unsigned long byterev_2(unsigned long x)
155 {
156 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
157 }
158 
159 static inline unsigned long byterev_4(unsigned long x)
160 {
161 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
162 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
163 }
164 
165 #ifdef __powerpc64__
166 static inline unsigned long byterev_8(unsigned long x)
167 {
168 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
169 }
170 #endif
171 
172 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
173 				      int nb)
174 {
175 	int err = 0;
176 	unsigned long x = 0;
177 
178 	switch (nb) {
179 	case 1:
180 		err = __get_user(x, (unsigned char __user *) ea);
181 		break;
182 	case 2:
183 		err = __get_user(x, (unsigned short __user *) ea);
184 		break;
185 	case 4:
186 		err = __get_user(x, (unsigned int __user *) ea);
187 		break;
188 #ifdef __powerpc64__
189 	case 8:
190 		err = __get_user(x, (unsigned long __user *) ea);
191 		break;
192 #endif
193 	}
194 	if (!err)
195 		*dest = x;
196 	return err;
197 }
198 
199 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
200 					int nb, struct pt_regs *regs)
201 {
202 	int err;
203 	unsigned long x, b, c;
204 #ifdef __LITTLE_ENDIAN__
205 	int len = nb; /* save a copy of the length for byte reversal */
206 #endif
207 
208 	/* unaligned, do this in pieces */
209 	x = 0;
210 	for (; nb > 0; nb -= c) {
211 #ifdef __LITTLE_ENDIAN__
212 		c = 1;
213 #endif
214 #ifdef __BIG_ENDIAN__
215 		c = max_align(ea);
216 #endif
217 		if (c > nb)
218 			c = max_align(nb);
219 		err = read_mem_aligned(&b, ea, c);
220 		if (err)
221 			return err;
222 		x = (x << (8 * c)) + b;
223 		ea += c;
224 	}
225 #ifdef __LITTLE_ENDIAN__
226 	switch (len) {
227 	case 2:
228 		*dest = byterev_2(x);
229 		break;
230 	case 4:
231 		*dest = byterev_4(x);
232 		break;
233 #ifdef __powerpc64__
234 	case 8:
235 		*dest = byterev_8(x);
236 		break;
237 #endif
238 	}
239 #endif
240 #ifdef __BIG_ENDIAN__
241 	*dest = x;
242 #endif
243 	return 0;
244 }
245 
246 /*
247  * Read memory at address ea for nb bytes, return 0 for success
248  * or -EFAULT if an error occurred.
249  */
250 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
251 			      struct pt_regs *regs)
252 {
253 	if (!address_ok(regs, ea, nb))
254 		return -EFAULT;
255 	if ((ea & (nb - 1)) == 0)
256 		return read_mem_aligned(dest, ea, nb);
257 	return read_mem_unaligned(dest, ea, nb, regs);
258 }
259 
260 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
261 				       int nb)
262 {
263 	int err = 0;
264 
265 	switch (nb) {
266 	case 1:
267 		err = __put_user(val, (unsigned char __user *) ea);
268 		break;
269 	case 2:
270 		err = __put_user(val, (unsigned short __user *) ea);
271 		break;
272 	case 4:
273 		err = __put_user(val, (unsigned int __user *) ea);
274 		break;
275 #ifdef __powerpc64__
276 	case 8:
277 		err = __put_user(val, (unsigned long __user *) ea);
278 		break;
279 #endif
280 	}
281 	return err;
282 }
283 
284 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
285 					 int nb, struct pt_regs *regs)
286 {
287 	int err;
288 	unsigned long c;
289 
290 #ifdef __LITTLE_ENDIAN__
291 	switch (nb) {
292 	case 2:
293 		val = byterev_2(val);
294 		break;
295 	case 4:
296 		val = byterev_4(val);
297 		break;
298 #ifdef __powerpc64__
299 	case 8:
300 		val = byterev_8(val);
301 		break;
302 #endif
303 	}
304 #endif
305 	/* unaligned or little-endian, do this in pieces */
306 	for (; nb > 0; nb -= c) {
307 #ifdef __LITTLE_ENDIAN__
308 		c = 1;
309 #endif
310 #ifdef __BIG_ENDIAN__
311 		c = max_align(ea);
312 #endif
313 		if (c > nb)
314 			c = max_align(nb);
315 		err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
316 		if (err)
317 			return err;
318 		ea += c;
319 	}
320 	return 0;
321 }
322 
323 /*
324  * Write memory at address ea for nb bytes, return 0 for success
325  * or -EFAULT if an error occurred.
326  */
327 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
328 			       struct pt_regs *regs)
329 {
330 	if (!address_ok(regs, ea, nb))
331 		return -EFAULT;
332 	if ((ea & (nb - 1)) == 0)
333 		return write_mem_aligned(val, ea, nb);
334 	return write_mem_unaligned(val, ea, nb, regs);
335 }
336 
337 #ifdef CONFIG_PPC_FPU
338 /*
339  * Check the address and alignment, and call func to do the actual
340  * load or store.
341  */
342 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
343 				unsigned long ea, int nb,
344 				struct pt_regs *regs)
345 {
346 	int err;
347 	union {
348 		double dbl;
349 		unsigned long ul[2];
350 		struct {
351 #ifdef __BIG_ENDIAN__
352 			unsigned _pad_;
353 			unsigned word;
354 #endif
355 #ifdef __LITTLE_ENDIAN__
356 			unsigned word;
357 			unsigned _pad_;
358 #endif
359 		} single;
360 	} data;
361 	unsigned long ptr;
362 
363 	if (!address_ok(regs, ea, nb))
364 		return -EFAULT;
365 	if ((ea & 3) == 0)
366 		return (*func)(rn, ea);
367 	ptr = (unsigned long) &data.ul;
368 	if (sizeof(unsigned long) == 8 || nb == 4) {
369 		err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
370 		if (nb == 4)
371 			ptr = (unsigned long)&(data.single.word);
372 	} else {
373 		/* reading a double on 32-bit */
374 		err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
375 		if (!err)
376 			err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
377 	}
378 	if (err)
379 		return err;
380 	return (*func)(rn, ptr);
381 }
382 
383 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
384 				 unsigned long ea, int nb,
385 				 struct pt_regs *regs)
386 {
387 	int err;
388 	union {
389 		double dbl;
390 		unsigned long ul[2];
391 		struct {
392 #ifdef __BIG_ENDIAN__
393 			unsigned _pad_;
394 			unsigned word;
395 #endif
396 #ifdef __LITTLE_ENDIAN__
397 			unsigned word;
398 			unsigned _pad_;
399 #endif
400 		} single;
401 	} data;
402 	unsigned long ptr;
403 
404 	if (!address_ok(regs, ea, nb))
405 		return -EFAULT;
406 	if ((ea & 3) == 0)
407 		return (*func)(rn, ea);
408 	ptr = (unsigned long) &data.ul[0];
409 	if (sizeof(unsigned long) == 8 || nb == 4) {
410 		if (nb == 4)
411 			ptr = (unsigned long)&(data.single.word);
412 		err = (*func)(rn, ptr);
413 		if (err)
414 			return err;
415 		err = write_mem_unaligned(data.ul[0], ea, nb, regs);
416 	} else {
417 		/* writing a double on 32-bit */
418 		err = (*func)(rn, ptr);
419 		if (err)
420 			return err;
421 		err = write_mem_unaligned(data.ul[0], ea, 4, regs);
422 		if (!err)
423 			err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
424 	}
425 	return err;
426 }
427 #endif
428 
429 #ifdef CONFIG_ALTIVEC
430 /* For Altivec/VMX, no need to worry about alignment */
431 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
432 				 unsigned long ea, struct pt_regs *regs)
433 {
434 	if (!address_ok(regs, ea & ~0xfUL, 16))
435 		return -EFAULT;
436 	return (*func)(rn, ea);
437 }
438 
439 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
440 				  unsigned long ea, struct pt_regs *regs)
441 {
442 	if (!address_ok(regs, ea & ~0xfUL, 16))
443 		return -EFAULT;
444 	return (*func)(rn, ea);
445 }
446 #endif /* CONFIG_ALTIVEC */
447 
448 #ifdef CONFIG_VSX
449 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
450 				 unsigned long ea, struct pt_regs *regs)
451 {
452 	int err;
453 	unsigned long val[2];
454 
455 	if (!address_ok(regs, ea, 16))
456 		return -EFAULT;
457 	if ((ea & 3) == 0)
458 		return (*func)(rn, ea);
459 	err = read_mem_unaligned(&val[0], ea, 8, regs);
460 	if (!err)
461 		err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
462 	if (!err)
463 		err = (*func)(rn, (unsigned long) &val[0]);
464 	return err;
465 }
466 
467 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
468 				 unsigned long ea, struct pt_regs *regs)
469 {
470 	int err;
471 	unsigned long val[2];
472 
473 	if (!address_ok(regs, ea, 16))
474 		return -EFAULT;
475 	if ((ea & 3) == 0)
476 		return (*func)(rn, ea);
477 	err = (*func)(rn, (unsigned long) &val[0]);
478 	if (err)
479 		return err;
480 	err = write_mem_unaligned(val[0], ea, 8, regs);
481 	if (!err)
482 		err = write_mem_unaligned(val[1], ea + 8, 8, regs);
483 	return err;
484 }
485 #endif /* CONFIG_VSX */
486 
487 #define __put_user_asmx(x, addr, err, op, cr)		\
488 	__asm__ __volatile__(				\
489 		"1:	" op " %2,0,%3\n"		\
490 		"	mfcr	%1\n"			\
491 		"2:\n"					\
492 		".section .fixup,\"ax\"\n"		\
493 		"3:	li	%0,%4\n"		\
494 		"	b	2b\n"			\
495 		".previous\n"				\
496 		".section __ex_table,\"a\"\n"		\
497 			PPC_LONG_ALIGN "\n"		\
498 			PPC_LONG "1b,3b\n"		\
499 		".previous"				\
500 		: "=r" (err), "=r" (cr)			\
501 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
502 
503 #define __get_user_asmx(x, addr, err, op)		\
504 	__asm__ __volatile__(				\
505 		"1:	"op" %1,0,%2\n"			\
506 		"2:\n"					\
507 		".section .fixup,\"ax\"\n"		\
508 		"3:	li	%0,%3\n"		\
509 		"	b	2b\n"			\
510 		".previous\n"				\
511 		".section __ex_table,\"a\"\n"		\
512 			PPC_LONG_ALIGN "\n"		\
513 			PPC_LONG "1b,3b\n"		\
514 		".previous"				\
515 		: "=r" (err), "=r" (x)			\
516 		: "r" (addr), "i" (-EFAULT), "0" (err))
517 
518 #define __cacheop_user_asmx(addr, err, op)		\
519 	__asm__ __volatile__(				\
520 		"1:	"op" 0,%1\n"			\
521 		"2:\n"					\
522 		".section .fixup,\"ax\"\n"		\
523 		"3:	li	%0,%3\n"		\
524 		"	b	2b\n"			\
525 		".previous\n"				\
526 		".section __ex_table,\"a\"\n"		\
527 			PPC_LONG_ALIGN "\n"		\
528 			PPC_LONG "1b,3b\n"		\
529 		".previous"				\
530 		: "=r" (err)				\
531 		: "r" (addr), "i" (-EFAULT), "0" (err))
532 
533 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
534 {
535 	long val = regs->gpr[rd];
536 
537 	regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
538 #ifdef __powerpc64__
539 	if (!(regs->msr & MSR_64BIT))
540 		val = (int) val;
541 #endif
542 	if (val < 0)
543 		regs->ccr |= 0x80000000;
544 	else if (val > 0)
545 		regs->ccr |= 0x40000000;
546 	else
547 		regs->ccr |= 0x20000000;
548 }
549 
550 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
551 				     unsigned long val1, unsigned long val2,
552 				     unsigned long carry_in)
553 {
554 	unsigned long val = val1 + val2;
555 
556 	if (carry_in)
557 		++val;
558 	regs->gpr[rd] = val;
559 #ifdef __powerpc64__
560 	if (!(regs->msr & MSR_64BIT)) {
561 		val = (unsigned int) val;
562 		val1 = (unsigned int) val1;
563 	}
564 #endif
565 	if (val < val1 || (carry_in && val == val1))
566 		regs->xer |= XER_CA;
567 	else
568 		regs->xer &= ~XER_CA;
569 }
570 
571 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
572 				    int crfld)
573 {
574 	unsigned int crval, shift;
575 
576 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
577 	if (v1 < v2)
578 		crval |= 8;
579 	else if (v1 > v2)
580 		crval |= 4;
581 	else
582 		crval |= 2;
583 	shift = (7 - crfld) * 4;
584 	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
585 }
586 
587 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
588 				      unsigned long v2, int crfld)
589 {
590 	unsigned int crval, shift;
591 
592 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
593 	if (v1 < v2)
594 		crval |= 8;
595 	else if (v1 > v2)
596 		crval |= 4;
597 	else
598 		crval |= 2;
599 	shift = (7 - crfld) * 4;
600 	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
601 }
602 
603 static int __kprobes trap_compare(long v1, long v2)
604 {
605 	int ret = 0;
606 
607 	if (v1 < v2)
608 		ret |= 0x10;
609 	else if (v1 > v2)
610 		ret |= 0x08;
611 	else
612 		ret |= 0x04;
613 	if ((unsigned long)v1 < (unsigned long)v2)
614 		ret |= 0x02;
615 	else if ((unsigned long)v1 > (unsigned long)v2)
616 		ret |= 0x01;
617 	return ret;
618 }
619 
620 /*
621  * Elements of 32-bit rotate and mask instructions.
622  */
623 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
624 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
625 #ifdef __powerpc64__
626 #define MASK64_L(mb)	(~0UL >> (mb))
627 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
628 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
629 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
630 #else
631 #define DATA32(x)	(x)
632 #endif
633 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
634 
635 /*
636  * Decode an instruction, and execute it if that can be done just by
637  * modifying *regs (i.e. integer arithmetic and logical instructions,
638  * branches, and barrier instructions).
639  * Returns 1 if the instruction has been executed, or 0 if not.
640  * Sets *op to indicate what the instruction does.
641  */
642 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
643 			    unsigned int instr)
644 {
645 	unsigned int opcode, ra, rb, rd, spr, u;
646 	unsigned long int imm;
647 	unsigned long int val, val2;
648 	unsigned int mb, me, sh;
649 	long ival;
650 
651 	op->type = COMPUTE;
652 
653 	opcode = instr >> 26;
654 	switch (opcode) {
655 	case 16:	/* bc */
656 		op->type = BRANCH;
657 		imm = (signed short)(instr & 0xfffc);
658 		if ((instr & 2) == 0)
659 			imm += regs->nip;
660 		regs->nip += 4;
661 		regs->nip = truncate_if_32bit(regs->msr, regs->nip);
662 		if (instr & 1)
663 			regs->link = regs->nip;
664 		if (branch_taken(instr, regs))
665 			regs->nip = truncate_if_32bit(regs->msr, imm);
666 		return 1;
667 #ifdef CONFIG_PPC64
668 	case 17:	/* sc */
669 		if ((instr & 0xfe2) == 2)
670 			op->type = SYSCALL;
671 		else
672 			op->type = UNKNOWN;
673 		return 0;
674 #endif
675 	case 18:	/* b */
676 		op->type = BRANCH;
677 		imm = instr & 0x03fffffc;
678 		if (imm & 0x02000000)
679 			imm -= 0x04000000;
680 		if ((instr & 2) == 0)
681 			imm += regs->nip;
682 		if (instr & 1)
683 			regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
684 		imm = truncate_if_32bit(regs->msr, imm);
685 		regs->nip = imm;
686 		return 1;
687 	case 19:
688 		switch ((instr >> 1) & 0x3ff) {
689 		case 0:		/* mcrf */
690 			rd = (instr >> 21) & 0x1c;
691 			ra = (instr >> 16) & 0x1c;
692 			val = (regs->ccr >> ra) & 0xf;
693 			regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
694 			goto instr_done;
695 
696 		case 16:	/* bclr */
697 		case 528:	/* bcctr */
698 			op->type = BRANCH;
699 			imm = (instr & 0x400)? regs->ctr: regs->link;
700 			regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
701 			imm = truncate_if_32bit(regs->msr, imm);
702 			if (instr & 1)
703 				regs->link = regs->nip;
704 			if (branch_taken(instr, regs))
705 				regs->nip = imm;
706 			return 1;
707 
708 		case 18:	/* rfid, scary */
709 			if (regs->msr & MSR_PR)
710 				goto priv;
711 			op->type = RFI;
712 			return 0;
713 
714 		case 150:	/* isync */
715 			op->type = BARRIER;
716 			isync();
717 			goto instr_done;
718 
719 		case 33:	/* crnor */
720 		case 129:	/* crandc */
721 		case 193:	/* crxor */
722 		case 225:	/* crnand */
723 		case 257:	/* crand */
724 		case 289:	/* creqv */
725 		case 417:	/* crorc */
726 		case 449:	/* cror */
727 			ra = (instr >> 16) & 0x1f;
728 			rb = (instr >> 11) & 0x1f;
729 			rd = (instr >> 21) & 0x1f;
730 			ra = (regs->ccr >> (31 - ra)) & 1;
731 			rb = (regs->ccr >> (31 - rb)) & 1;
732 			val = (instr >> (6 + ra * 2 + rb)) & 1;
733 			regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
734 				(val << (31 - rd));
735 			goto instr_done;
736 		}
737 		break;
738 	case 31:
739 		switch ((instr >> 1) & 0x3ff) {
740 		case 598:	/* sync */
741 			op->type = BARRIER;
742 #ifdef __powerpc64__
743 			switch ((instr >> 21) & 3) {
744 			case 1:		/* lwsync */
745 				asm volatile("lwsync" : : : "memory");
746 				goto instr_done;
747 			case 2:		/* ptesync */
748 				asm volatile("ptesync" : : : "memory");
749 				goto instr_done;
750 			}
751 #endif
752 			mb();
753 			goto instr_done;
754 
755 		case 854:	/* eieio */
756 			op->type = BARRIER;
757 			eieio();
758 			goto instr_done;
759 		}
760 		break;
761 	}
762 
763 	/* Following cases refer to regs->gpr[], so we need all regs */
764 	if (!FULL_REGS(regs))
765 		return 0;
766 
767 	rd = (instr >> 21) & 0x1f;
768 	ra = (instr >> 16) & 0x1f;
769 	rb = (instr >> 11) & 0x1f;
770 
771 	switch (opcode) {
772 #ifdef __powerpc64__
773 	case 2:		/* tdi */
774 		if (rd & trap_compare(regs->gpr[ra], (short) instr))
775 			goto trap;
776 		goto instr_done;
777 #endif
778 	case 3:		/* twi */
779 		if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
780 			goto trap;
781 		goto instr_done;
782 
783 	case 7:		/* mulli */
784 		regs->gpr[rd] = regs->gpr[ra] * (short) instr;
785 		goto instr_done;
786 
787 	case 8:		/* subfic */
788 		imm = (short) instr;
789 		add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
790 		goto instr_done;
791 
792 	case 10:	/* cmpli */
793 		imm = (unsigned short) instr;
794 		val = regs->gpr[ra];
795 #ifdef __powerpc64__
796 		if ((rd & 1) == 0)
797 			val = (unsigned int) val;
798 #endif
799 		do_cmp_unsigned(regs, val, imm, rd >> 2);
800 		goto instr_done;
801 
802 	case 11:	/* cmpi */
803 		imm = (short) instr;
804 		val = regs->gpr[ra];
805 #ifdef __powerpc64__
806 		if ((rd & 1) == 0)
807 			val = (int) val;
808 #endif
809 		do_cmp_signed(regs, val, imm, rd >> 2);
810 		goto instr_done;
811 
812 	case 12:	/* addic */
813 		imm = (short) instr;
814 		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
815 		goto instr_done;
816 
817 	case 13:	/* addic. */
818 		imm = (short) instr;
819 		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
820 		set_cr0(regs, rd);
821 		goto instr_done;
822 
823 	case 14:	/* addi */
824 		imm = (short) instr;
825 		if (ra)
826 			imm += regs->gpr[ra];
827 		regs->gpr[rd] = imm;
828 		goto instr_done;
829 
830 	case 15:	/* addis */
831 		imm = ((short) instr) << 16;
832 		if (ra)
833 			imm += regs->gpr[ra];
834 		regs->gpr[rd] = imm;
835 		goto instr_done;
836 
837 	case 20:	/* rlwimi */
838 		mb = (instr >> 6) & 0x1f;
839 		me = (instr >> 1) & 0x1f;
840 		val = DATA32(regs->gpr[rd]);
841 		imm = MASK32(mb, me);
842 		regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
843 		goto logical_done;
844 
845 	case 21:	/* rlwinm */
846 		mb = (instr >> 6) & 0x1f;
847 		me = (instr >> 1) & 0x1f;
848 		val = DATA32(regs->gpr[rd]);
849 		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
850 		goto logical_done;
851 
852 	case 23:	/* rlwnm */
853 		mb = (instr >> 6) & 0x1f;
854 		me = (instr >> 1) & 0x1f;
855 		rb = regs->gpr[rb] & 0x1f;
856 		val = DATA32(regs->gpr[rd]);
857 		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
858 		goto logical_done;
859 
860 	case 24:	/* ori */
861 		imm = (unsigned short) instr;
862 		regs->gpr[ra] = regs->gpr[rd] | imm;
863 		goto instr_done;
864 
865 	case 25:	/* oris */
866 		imm = (unsigned short) instr;
867 		regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
868 		goto instr_done;
869 
870 	case 26:	/* xori */
871 		imm = (unsigned short) instr;
872 		regs->gpr[ra] = regs->gpr[rd] ^ imm;
873 		goto instr_done;
874 
875 	case 27:	/* xoris */
876 		imm = (unsigned short) instr;
877 		regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
878 		goto instr_done;
879 
880 	case 28:	/* andi. */
881 		imm = (unsigned short) instr;
882 		regs->gpr[ra] = regs->gpr[rd] & imm;
883 		set_cr0(regs, ra);
884 		goto instr_done;
885 
886 	case 29:	/* andis. */
887 		imm = (unsigned short) instr;
888 		regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
889 		set_cr0(regs, ra);
890 		goto instr_done;
891 
892 #ifdef __powerpc64__
893 	case 30:	/* rld* */
894 		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
895 		val = regs->gpr[rd];
896 		if ((instr & 0x10) == 0) {
897 			sh = rb | ((instr & 2) << 4);
898 			val = ROTATE(val, sh);
899 			switch ((instr >> 2) & 3) {
900 			case 0:		/* rldicl */
901 				regs->gpr[ra] = val & MASK64_L(mb);
902 				goto logical_done;
903 			case 1:		/* rldicr */
904 				regs->gpr[ra] = val & MASK64_R(mb);
905 				goto logical_done;
906 			case 2:		/* rldic */
907 				regs->gpr[ra] = val & MASK64(mb, 63 - sh);
908 				goto logical_done;
909 			case 3:		/* rldimi */
910 				imm = MASK64(mb, 63 - sh);
911 				regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
912 					(val & imm);
913 				goto logical_done;
914 			}
915 		} else {
916 			sh = regs->gpr[rb] & 0x3f;
917 			val = ROTATE(val, sh);
918 			switch ((instr >> 1) & 7) {
919 			case 0:		/* rldcl */
920 				regs->gpr[ra] = val & MASK64_L(mb);
921 				goto logical_done;
922 			case 1:		/* rldcr */
923 				regs->gpr[ra] = val & MASK64_R(mb);
924 				goto logical_done;
925 			}
926 		}
927 #endif
928 	break; /* illegal instruction */
929 
930 	case 31:
931 		switch ((instr >> 1) & 0x3ff) {
932 		case 4:		/* tw */
933 			if (rd == 0x1f ||
934 			    (rd & trap_compare((int)regs->gpr[ra],
935 					       (int)regs->gpr[rb])))
936 				goto trap;
937 			goto instr_done;
938 #ifdef __powerpc64__
939 		case 68:	/* td */
940 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
941 				goto trap;
942 			goto instr_done;
943 #endif
944 		case 83:	/* mfmsr */
945 			if (regs->msr & MSR_PR)
946 				goto priv;
947 			op->type = MFMSR;
948 			op->reg = rd;
949 			return 0;
950 		case 146:	/* mtmsr */
951 			if (regs->msr & MSR_PR)
952 				goto priv;
953 			op->type = MTMSR;
954 			op->reg = rd;
955 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
956 			return 0;
957 #ifdef CONFIG_PPC64
958 		case 178:	/* mtmsrd */
959 			if (regs->msr & MSR_PR)
960 				goto priv;
961 			op->type = MTMSR;
962 			op->reg = rd;
963 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
964 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
965 			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
966 			op->val = imm;
967 			return 0;
968 #endif
969 
970 		case 19:	/* mfcr */
971 			regs->gpr[rd] = regs->ccr;
972 			regs->gpr[rd] &= 0xffffffffUL;
973 			goto instr_done;
974 
975 		case 144:	/* mtcrf */
976 			imm = 0xf0000000UL;
977 			val = regs->gpr[rd];
978 			for (sh = 0; sh < 8; ++sh) {
979 				if (instr & (0x80000 >> sh))
980 					regs->ccr = (regs->ccr & ~imm) |
981 						(val & imm);
982 				imm >>= 4;
983 			}
984 			goto instr_done;
985 
986 		case 339:	/* mfspr */
987 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
988 			switch (spr) {
989 			case SPRN_XER:	/* mfxer */
990 				regs->gpr[rd] = regs->xer;
991 				regs->gpr[rd] &= 0xffffffffUL;
992 				goto instr_done;
993 			case SPRN_LR:	/* mflr */
994 				regs->gpr[rd] = regs->link;
995 				goto instr_done;
996 			case SPRN_CTR:	/* mfctr */
997 				regs->gpr[rd] = regs->ctr;
998 				goto instr_done;
999 			default:
1000 				op->type = MFSPR;
1001 				op->reg = rd;
1002 				op->spr = spr;
1003 				return 0;
1004 			}
1005 			break;
1006 
1007 		case 467:	/* mtspr */
1008 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1009 			switch (spr) {
1010 			case SPRN_XER:	/* mtxer */
1011 				regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1012 				goto instr_done;
1013 			case SPRN_LR:	/* mtlr */
1014 				regs->link = regs->gpr[rd];
1015 				goto instr_done;
1016 			case SPRN_CTR:	/* mtctr */
1017 				regs->ctr = regs->gpr[rd];
1018 				goto instr_done;
1019 			default:
1020 				op->type = MTSPR;
1021 				op->val = regs->gpr[rd];
1022 				op->spr = spr;
1023 				return 0;
1024 			}
1025 			break;
1026 
1027 /*
1028  * Compare instructions
1029  */
1030 		case 0:	/* cmp */
1031 			val = regs->gpr[ra];
1032 			val2 = regs->gpr[rb];
1033 #ifdef __powerpc64__
1034 			if ((rd & 1) == 0) {
1035 				/* word (32-bit) compare */
1036 				val = (int) val;
1037 				val2 = (int) val2;
1038 			}
1039 #endif
1040 			do_cmp_signed(regs, val, val2, rd >> 2);
1041 			goto instr_done;
1042 
1043 		case 32:	/* cmpl */
1044 			val = regs->gpr[ra];
1045 			val2 = regs->gpr[rb];
1046 #ifdef __powerpc64__
1047 			if ((rd & 1) == 0) {
1048 				/* word (32-bit) compare */
1049 				val = (unsigned int) val;
1050 				val2 = (unsigned int) val2;
1051 			}
1052 #endif
1053 			do_cmp_unsigned(regs, val, val2, rd >> 2);
1054 			goto instr_done;
1055 
1056 /*
1057  * Arithmetic instructions
1058  */
1059 		case 8:	/* subfc */
1060 			add_with_carry(regs, rd, ~regs->gpr[ra],
1061 				       regs->gpr[rb], 1);
1062 			goto arith_done;
1063 #ifdef __powerpc64__
1064 		case 9:	/* mulhdu */
1065 			asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1066 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1067 			goto arith_done;
1068 #endif
1069 		case 10:	/* addc */
1070 			add_with_carry(regs, rd, regs->gpr[ra],
1071 				       regs->gpr[rb], 0);
1072 			goto arith_done;
1073 
1074 		case 11:	/* mulhwu */
1075 			asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1076 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1077 			goto arith_done;
1078 
1079 		case 40:	/* subf */
1080 			regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1081 			goto arith_done;
1082 #ifdef __powerpc64__
1083 		case 73:	/* mulhd */
1084 			asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1085 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1086 			goto arith_done;
1087 #endif
1088 		case 75:	/* mulhw */
1089 			asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1090 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1091 			goto arith_done;
1092 
1093 		case 104:	/* neg */
1094 			regs->gpr[rd] = -regs->gpr[ra];
1095 			goto arith_done;
1096 
1097 		case 136:	/* subfe */
1098 			add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1099 				       regs->xer & XER_CA);
1100 			goto arith_done;
1101 
1102 		case 138:	/* adde */
1103 			add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1104 				       regs->xer & XER_CA);
1105 			goto arith_done;
1106 
1107 		case 200:	/* subfze */
1108 			add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1109 				       regs->xer & XER_CA);
1110 			goto arith_done;
1111 
1112 		case 202:	/* addze */
1113 			add_with_carry(regs, rd, regs->gpr[ra], 0L,
1114 				       regs->xer & XER_CA);
1115 			goto arith_done;
1116 
1117 		case 232:	/* subfme */
1118 			add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1119 				       regs->xer & XER_CA);
1120 			goto arith_done;
1121 #ifdef __powerpc64__
1122 		case 233:	/* mulld */
1123 			regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1124 			goto arith_done;
1125 #endif
1126 		case 234:	/* addme */
1127 			add_with_carry(regs, rd, regs->gpr[ra], -1L,
1128 				       regs->xer & XER_CA);
1129 			goto arith_done;
1130 
1131 		case 235:	/* mullw */
1132 			regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1133 				(unsigned int) regs->gpr[rb];
1134 			goto arith_done;
1135 
1136 		case 266:	/* add */
1137 			regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1138 			goto arith_done;
1139 #ifdef __powerpc64__
1140 		case 457:	/* divdu */
1141 			regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1142 			goto arith_done;
1143 #endif
1144 		case 459:	/* divwu */
1145 			regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1146 				(unsigned int) regs->gpr[rb];
1147 			goto arith_done;
1148 #ifdef __powerpc64__
1149 		case 489:	/* divd */
1150 			regs->gpr[rd] = (long int) regs->gpr[ra] /
1151 				(long int) regs->gpr[rb];
1152 			goto arith_done;
1153 #endif
1154 		case 491:	/* divw */
1155 			regs->gpr[rd] = (int) regs->gpr[ra] /
1156 				(int) regs->gpr[rb];
1157 			goto arith_done;
1158 
1159 
1160 /*
1161  * Logical instructions
1162  */
1163 		case 26:	/* cntlzw */
1164 			asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1165 			    "r" (regs->gpr[rd]));
1166 			goto logical_done;
1167 #ifdef __powerpc64__
1168 		case 58:	/* cntlzd */
1169 			asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1170 			    "r" (regs->gpr[rd]));
1171 			goto logical_done;
1172 #endif
1173 		case 28:	/* and */
1174 			regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1175 			goto logical_done;
1176 
1177 		case 60:	/* andc */
1178 			regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1179 			goto logical_done;
1180 
1181 		case 124:	/* nor */
1182 			regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1183 			goto logical_done;
1184 
1185 		case 284:	/* xor */
1186 			regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1187 			goto logical_done;
1188 
1189 		case 316:	/* xor */
1190 			regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1191 			goto logical_done;
1192 
1193 		case 412:	/* orc */
1194 			regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1195 			goto logical_done;
1196 
1197 		case 444:	/* or */
1198 			regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1199 			goto logical_done;
1200 
1201 		case 476:	/* nand */
1202 			regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1203 			goto logical_done;
1204 
1205 		case 922:	/* extsh */
1206 			regs->gpr[ra] = (signed short) regs->gpr[rd];
1207 			goto logical_done;
1208 
1209 		case 954:	/* extsb */
1210 			regs->gpr[ra] = (signed char) regs->gpr[rd];
1211 			goto logical_done;
1212 #ifdef __powerpc64__
1213 		case 986:	/* extsw */
1214 			regs->gpr[ra] = (signed int) regs->gpr[rd];
1215 			goto logical_done;
1216 #endif
1217 
1218 /*
1219  * Shift instructions
1220  */
1221 		case 24:	/* slw */
1222 			sh = regs->gpr[rb] & 0x3f;
1223 			if (sh < 32)
1224 				regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1225 			else
1226 				regs->gpr[ra] = 0;
1227 			goto logical_done;
1228 
1229 		case 536:	/* srw */
1230 			sh = regs->gpr[rb] & 0x3f;
1231 			if (sh < 32)
1232 				regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1233 			else
1234 				regs->gpr[ra] = 0;
1235 			goto logical_done;
1236 
1237 		case 792:	/* sraw */
1238 			sh = regs->gpr[rb] & 0x3f;
1239 			ival = (signed int) regs->gpr[rd];
1240 			regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1241 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1242 				regs->xer |= XER_CA;
1243 			else
1244 				regs->xer &= ~XER_CA;
1245 			goto logical_done;
1246 
1247 		case 824:	/* srawi */
1248 			sh = rb;
1249 			ival = (signed int) regs->gpr[rd];
1250 			regs->gpr[ra] = ival >> sh;
1251 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1252 				regs->xer |= XER_CA;
1253 			else
1254 				regs->xer &= ~XER_CA;
1255 			goto logical_done;
1256 
1257 #ifdef __powerpc64__
1258 		case 27:	/* sld */
1259 			sh = regs->gpr[rb] & 0x7f;
1260 			if (sh < 64)
1261 				regs->gpr[ra] = regs->gpr[rd] << sh;
1262 			else
1263 				regs->gpr[ra] = 0;
1264 			goto logical_done;
1265 
1266 		case 539:	/* srd */
1267 			sh = regs->gpr[rb] & 0x7f;
1268 			if (sh < 64)
1269 				regs->gpr[ra] = regs->gpr[rd] >> sh;
1270 			else
1271 				regs->gpr[ra] = 0;
1272 			goto logical_done;
1273 
1274 		case 794:	/* srad */
1275 			sh = regs->gpr[rb] & 0x7f;
1276 			ival = (signed long int) regs->gpr[rd];
1277 			regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1278 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1279 				regs->xer |= XER_CA;
1280 			else
1281 				regs->xer &= ~XER_CA;
1282 			goto logical_done;
1283 
1284 		case 826:	/* sradi with sh_5 = 0 */
1285 		case 827:	/* sradi with sh_5 = 1 */
1286 			sh = rb | ((instr & 2) << 4);
1287 			ival = (signed long int) regs->gpr[rd];
1288 			regs->gpr[ra] = ival >> sh;
1289 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1290 				regs->xer |= XER_CA;
1291 			else
1292 				regs->xer &= ~XER_CA;
1293 			goto logical_done;
1294 #endif /* __powerpc64__ */
1295 
1296 /*
1297  * Cache instructions
1298  */
1299 		case 54:	/* dcbst */
1300 			op->type = MKOP(CACHEOP, DCBST, 0);
1301 			op->ea = xform_ea(instr, regs);
1302 			return 0;
1303 
1304 		case 86:	/* dcbf */
1305 			op->type = MKOP(CACHEOP, DCBF, 0);
1306 			op->ea = xform_ea(instr, regs);
1307 			return 0;
1308 
1309 		case 246:	/* dcbtst */
1310 			op->type = MKOP(CACHEOP, DCBTST, 0);
1311 			op->ea = xform_ea(instr, regs);
1312 			op->reg = rd;
1313 			return 0;
1314 
1315 		case 278:	/* dcbt */
1316 			op->type = MKOP(CACHEOP, DCBTST, 0);
1317 			op->ea = xform_ea(instr, regs);
1318 			op->reg = rd;
1319 			return 0;
1320 
1321 		case 982:	/* icbi */
1322 			op->type = MKOP(CACHEOP, ICBI, 0);
1323 			op->ea = xform_ea(instr, regs);
1324 			return 0;
1325 		}
1326 		break;
1327 	}
1328 
1329 	/*
1330 	 * Loads and stores.
1331 	 */
1332 	op->type = UNKNOWN;
1333 	op->update_reg = ra;
1334 	op->reg = rd;
1335 	op->val = regs->gpr[rd];
1336 	u = (instr >> 20) & UPDATE;
1337 
1338 	switch (opcode) {
1339 	case 31:
1340 		u = instr & UPDATE;
1341 		op->ea = xform_ea(instr, regs);
1342 		switch ((instr >> 1) & 0x3ff) {
1343 		case 20:	/* lwarx */
1344 			op->type = MKOP(LARX, 0, 4);
1345 			break;
1346 
1347 		case 150:	/* stwcx. */
1348 			op->type = MKOP(STCX, 0, 4);
1349 			break;
1350 
1351 #ifdef __powerpc64__
1352 		case 84:	/* ldarx */
1353 			op->type = MKOP(LARX, 0, 8);
1354 			break;
1355 
1356 		case 214:	/* stdcx. */
1357 			op->type = MKOP(STCX, 0, 8);
1358 			break;
1359 
1360 		case 21:	/* ldx */
1361 		case 53:	/* ldux */
1362 			op->type = MKOP(LOAD, u, 8);
1363 			break;
1364 #endif
1365 
1366 		case 23:	/* lwzx */
1367 		case 55:	/* lwzux */
1368 			op->type = MKOP(LOAD, u, 4);
1369 			break;
1370 
1371 		case 87:	/* lbzx */
1372 		case 119:	/* lbzux */
1373 			op->type = MKOP(LOAD, u, 1);
1374 			break;
1375 
1376 #ifdef CONFIG_ALTIVEC
1377 		case 103:	/* lvx */
1378 		case 359:	/* lvxl */
1379 			if (!(regs->msr & MSR_VEC))
1380 				goto vecunavail;
1381 			op->type = MKOP(LOAD_VMX, 0, 16);
1382 			break;
1383 
1384 		case 231:	/* stvx */
1385 		case 487:	/* stvxl */
1386 			if (!(regs->msr & MSR_VEC))
1387 				goto vecunavail;
1388 			op->type = MKOP(STORE_VMX, 0, 16);
1389 			break;
1390 #endif /* CONFIG_ALTIVEC */
1391 
1392 #ifdef __powerpc64__
1393 		case 149:	/* stdx */
1394 		case 181:	/* stdux */
1395 			op->type = MKOP(STORE, u, 8);
1396 			break;
1397 #endif
1398 
1399 		case 151:	/* stwx */
1400 		case 183:	/* stwux */
1401 			op->type = MKOP(STORE, u, 4);
1402 			break;
1403 
1404 		case 215:	/* stbx */
1405 		case 247:	/* stbux */
1406 			op->type = MKOP(STORE, u, 1);
1407 			break;
1408 
1409 		case 279:	/* lhzx */
1410 		case 311:	/* lhzux */
1411 			op->type = MKOP(LOAD, u, 2);
1412 			break;
1413 
1414 #ifdef __powerpc64__
1415 		case 341:	/* lwax */
1416 		case 373:	/* lwaux */
1417 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
1418 			break;
1419 #endif
1420 
1421 		case 343:	/* lhax */
1422 		case 375:	/* lhaux */
1423 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
1424 			break;
1425 
1426 		case 407:	/* sthx */
1427 		case 439:	/* sthux */
1428 			op->type = MKOP(STORE, u, 2);
1429 			break;
1430 
1431 #ifdef __powerpc64__
1432 		case 532:	/* ldbrx */
1433 			op->type = MKOP(LOAD, BYTEREV, 8);
1434 			break;
1435 
1436 #endif
1437 		case 533:	/* lswx */
1438 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1439 			break;
1440 
1441 		case 534:	/* lwbrx */
1442 			op->type = MKOP(LOAD, BYTEREV, 4);
1443 			break;
1444 
1445 		case 597:	/* lswi */
1446 			if (rb == 0)
1447 				rb = 32;	/* # bytes to load */
1448 			op->type = MKOP(LOAD_MULTI, 0, rb);
1449 			op->ea = 0;
1450 			if (ra)
1451 				op->ea = truncate_if_32bit(regs->msr,
1452 							   regs->gpr[ra]);
1453 			break;
1454 
1455 #ifdef CONFIG_PPC_FPU
1456 		case 535:	/* lfsx */
1457 		case 567:	/* lfsux */
1458 			if (!(regs->msr & MSR_FP))
1459 				goto fpunavail;
1460 			op->type = MKOP(LOAD_FP, u, 4);
1461 			break;
1462 
1463 		case 599:	/* lfdx */
1464 		case 631:	/* lfdux */
1465 			if (!(regs->msr & MSR_FP))
1466 				goto fpunavail;
1467 			op->type = MKOP(LOAD_FP, u, 8);
1468 			break;
1469 
1470 		case 663:	/* stfsx */
1471 		case 695:	/* stfsux */
1472 			if (!(regs->msr & MSR_FP))
1473 				goto fpunavail;
1474 			op->type = MKOP(STORE_FP, u, 4);
1475 			break;
1476 
1477 		case 727:	/* stfdx */
1478 		case 759:	/* stfdux */
1479 			if (!(regs->msr & MSR_FP))
1480 				goto fpunavail;
1481 			op->type = MKOP(STORE_FP, u, 8);
1482 			break;
1483 #endif
1484 
1485 #ifdef __powerpc64__
1486 		case 660:	/* stdbrx */
1487 			op->type = MKOP(STORE, BYTEREV, 8);
1488 			op->val = byterev_8(regs->gpr[rd]);
1489 			break;
1490 
1491 #endif
1492 		case 661:	/* stswx */
1493 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1494 			break;
1495 
1496 		case 662:	/* stwbrx */
1497 			op->type = MKOP(STORE, BYTEREV, 4);
1498 			op->val = byterev_4(regs->gpr[rd]);
1499 			break;
1500 
1501 		case 725:
1502 			if (rb == 0)
1503 				rb = 32;	/* # bytes to store */
1504 			op->type = MKOP(STORE_MULTI, 0, rb);
1505 			op->ea = 0;
1506 			if (ra)
1507 				op->ea = truncate_if_32bit(regs->msr,
1508 							   regs->gpr[ra]);
1509 			break;
1510 
1511 		case 790:	/* lhbrx */
1512 			op->type = MKOP(LOAD, BYTEREV, 2);
1513 			break;
1514 
1515 		case 918:	/* sthbrx */
1516 			op->type = MKOP(STORE, BYTEREV, 2);
1517 			op->val = byterev_2(regs->gpr[rd]);
1518 			break;
1519 
1520 #ifdef CONFIG_VSX
1521 		case 844:	/* lxvd2x */
1522 		case 876:	/* lxvd2ux */
1523 			if (!(regs->msr & MSR_VSX))
1524 				goto vsxunavail;
1525 			op->reg = rd | ((instr & 1) << 5);
1526 			op->type = MKOP(LOAD_VSX, u, 16);
1527 			break;
1528 
1529 		case 972:	/* stxvd2x */
1530 		case 1004:	/* stxvd2ux */
1531 			if (!(regs->msr & MSR_VSX))
1532 				goto vsxunavail;
1533 			op->reg = rd | ((instr & 1) << 5);
1534 			op->type = MKOP(STORE_VSX, u, 16);
1535 			break;
1536 
1537 #endif /* CONFIG_VSX */
1538 		}
1539 		break;
1540 
1541 	case 32:	/* lwz */
1542 	case 33:	/* lwzu */
1543 		op->type = MKOP(LOAD, u, 4);
1544 		op->ea = dform_ea(instr, regs);
1545 		break;
1546 
1547 	case 34:	/* lbz */
1548 	case 35:	/* lbzu */
1549 		op->type = MKOP(LOAD, u, 1);
1550 		op->ea = dform_ea(instr, regs);
1551 		break;
1552 
1553 	case 36:	/* stw */
1554 	case 37:	/* stwu */
1555 		op->type = MKOP(STORE, u, 4);
1556 		op->ea = dform_ea(instr, regs);
1557 		break;
1558 
1559 	case 38:	/* stb */
1560 	case 39:	/* stbu */
1561 		op->type = MKOP(STORE, u, 1);
1562 		op->ea = dform_ea(instr, regs);
1563 		break;
1564 
1565 	case 40:	/* lhz */
1566 	case 41:	/* lhzu */
1567 		op->type = MKOP(LOAD, u, 2);
1568 		op->ea = dform_ea(instr, regs);
1569 		break;
1570 
1571 	case 42:	/* lha */
1572 	case 43:	/* lhau */
1573 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
1574 		op->ea = dform_ea(instr, regs);
1575 		break;
1576 
1577 	case 44:	/* sth */
1578 	case 45:	/* sthu */
1579 		op->type = MKOP(STORE, u, 2);
1580 		op->ea = dform_ea(instr, regs);
1581 		break;
1582 
1583 	case 46:	/* lmw */
1584 		if (ra >= rd)
1585 			break;		/* invalid form, ra in range to load */
1586 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1587 		op->ea = dform_ea(instr, regs);
1588 		break;
1589 
1590 	case 47:	/* stmw */
1591 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1592 		op->ea = dform_ea(instr, regs);
1593 		break;
1594 
1595 #ifdef CONFIG_PPC_FPU
1596 	case 48:	/* lfs */
1597 	case 49:	/* lfsu */
1598 		if (!(regs->msr & MSR_FP))
1599 			goto fpunavail;
1600 		op->type = MKOP(LOAD_FP, u, 4);
1601 		op->ea = dform_ea(instr, regs);
1602 		break;
1603 
1604 	case 50:	/* lfd */
1605 	case 51:	/* lfdu */
1606 		if (!(regs->msr & MSR_FP))
1607 			goto fpunavail;
1608 		op->type = MKOP(LOAD_FP, u, 8);
1609 		op->ea = dform_ea(instr, regs);
1610 		break;
1611 
1612 	case 52:	/* stfs */
1613 	case 53:	/* stfsu */
1614 		if (!(regs->msr & MSR_FP))
1615 			goto fpunavail;
1616 		op->type = MKOP(STORE_FP, u, 4);
1617 		op->ea = dform_ea(instr, regs);
1618 		break;
1619 
1620 	case 54:	/* stfd */
1621 	case 55:	/* stfdu */
1622 		if (!(regs->msr & MSR_FP))
1623 			goto fpunavail;
1624 		op->type = MKOP(STORE_FP, u, 8);
1625 		op->ea = dform_ea(instr, regs);
1626 		break;
1627 #endif
1628 
1629 #ifdef __powerpc64__
1630 	case 58:	/* ld[u], lwa */
1631 		op->ea = dsform_ea(instr, regs);
1632 		switch (instr & 3) {
1633 		case 0:		/* ld */
1634 			op->type = MKOP(LOAD, 0, 8);
1635 			break;
1636 		case 1:		/* ldu */
1637 			op->type = MKOP(LOAD, UPDATE, 8);
1638 			break;
1639 		case 2:		/* lwa */
1640 			op->type = MKOP(LOAD, SIGNEXT, 4);
1641 			break;
1642 		}
1643 		break;
1644 
1645 	case 62:	/* std[u] */
1646 		op->ea = dsform_ea(instr, regs);
1647 		switch (instr & 3) {
1648 		case 0:		/* std */
1649 			op->type = MKOP(STORE, 0, 8);
1650 			break;
1651 		case 1:		/* stdu */
1652 			op->type = MKOP(STORE, UPDATE, 8);
1653 			break;
1654 		}
1655 		break;
1656 #endif /* __powerpc64__ */
1657 
1658 	}
1659 	return 0;
1660 
1661  logical_done:
1662 	if (instr & 1)
1663 		set_cr0(regs, ra);
1664 	goto instr_done;
1665 
1666  arith_done:
1667 	if (instr & 1)
1668 		set_cr0(regs, rd);
1669 
1670  instr_done:
1671 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1672 	return 1;
1673 
1674  priv:
1675 	op->type = INTERRUPT | 0x700;
1676 	op->val = SRR1_PROGPRIV;
1677 	return 0;
1678 
1679  trap:
1680 	op->type = INTERRUPT | 0x700;
1681 	op->val = SRR1_PROGTRAP;
1682 	return 0;
1683 
1684 #ifdef CONFIG_PPC_FPU
1685  fpunavail:
1686 	op->type = INTERRUPT | 0x800;
1687 	return 0;
1688 #endif
1689 
1690 #ifdef CONFIG_ALTIVEC
1691  vecunavail:
1692 	op->type = INTERRUPT | 0xf20;
1693 	return 0;
1694 #endif
1695 
1696 #ifdef CONFIG_VSX
1697  vsxunavail:
1698 	op->type = INTERRUPT | 0xf40;
1699 	return 0;
1700 #endif
1701 }
1702 EXPORT_SYMBOL_GPL(analyse_instr);
1703 
1704 /*
1705  * For PPC32 we always use stwu with r1 to change the stack pointer.
1706  * So this emulated store may corrupt the exception frame, now we
1707  * have to provide the exception frame trampoline, which is pushed
1708  * below the kprobed function stack. So we only update gpr[1] but
1709  * don't emulate the real store operation. We will do real store
1710  * operation safely in exception return code by checking this flag.
1711  */
1712 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1713 {
1714 #ifdef CONFIG_PPC32
1715 	/*
1716 	 * Check if we will touch kernel stack overflow
1717 	 */
1718 	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1719 		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1720 		return -EINVAL;
1721 	}
1722 #endif /* CONFIG_PPC32 */
1723 	/*
1724 	 * Check if we already set since that means we'll
1725 	 * lose the previous value.
1726 	 */
1727 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1728 	set_thread_flag(TIF_EMULATE_STACK_STORE);
1729 	return 0;
1730 }
1731 
1732 static __kprobes void do_signext(unsigned long *valp, int size)
1733 {
1734 	switch (size) {
1735 	case 2:
1736 		*valp = (signed short) *valp;
1737 		break;
1738 	case 4:
1739 		*valp = (signed int) *valp;
1740 		break;
1741 	}
1742 }
1743 
1744 static __kprobes void do_byterev(unsigned long *valp, int size)
1745 {
1746 	switch (size) {
1747 	case 2:
1748 		*valp = byterev_2(*valp);
1749 		break;
1750 	case 4:
1751 		*valp = byterev_4(*valp);
1752 		break;
1753 #ifdef __powerpc64__
1754 	case 8:
1755 		*valp = byterev_8(*valp);
1756 		break;
1757 #endif
1758 	}
1759 }
1760 
1761 /*
1762  * Emulate instructions that cause a transfer of control,
1763  * loads and stores, and a few other instructions.
1764  * Returns 1 if the step was emulated, 0 if not,
1765  * or -1 if the instruction is one that should not be stepped,
1766  * such as an rfid, or a mtmsrd that would clear MSR_RI.
1767  */
1768 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1769 {
1770 	struct instruction_op op;
1771 	int r, err, size;
1772 	unsigned long val;
1773 	unsigned int cr;
1774 	int i, rd, nb;
1775 
1776 	r = analyse_instr(&op, regs, instr);
1777 	if (r != 0)
1778 		return r;
1779 
1780 	err = 0;
1781 	size = GETSIZE(op.type);
1782 	switch (op.type & INSTR_TYPE_MASK) {
1783 	case CACHEOP:
1784 		if (!address_ok(regs, op.ea, 8))
1785 			return 0;
1786 		switch (op.type & CACHEOP_MASK) {
1787 		case DCBST:
1788 			__cacheop_user_asmx(op.ea, err, "dcbst");
1789 			break;
1790 		case DCBF:
1791 			__cacheop_user_asmx(op.ea, err, "dcbf");
1792 			break;
1793 		case DCBTST:
1794 			if (op.reg == 0)
1795 				prefetchw((void *) op.ea);
1796 			break;
1797 		case DCBT:
1798 			if (op.reg == 0)
1799 				prefetch((void *) op.ea);
1800 			break;
1801 		case ICBI:
1802 			__cacheop_user_asmx(op.ea, err, "icbi");
1803 			break;
1804 		}
1805 		if (err)
1806 			return 0;
1807 		goto instr_done;
1808 
1809 	case LARX:
1810 		if (regs->msr & MSR_LE)
1811 			return 0;
1812 		if (op.ea & (size - 1))
1813 			break;		/* can't handle misaligned */
1814 		err = -EFAULT;
1815 		if (!address_ok(regs, op.ea, size))
1816 			goto ldst_done;
1817 		err = 0;
1818 		switch (size) {
1819 		case 4:
1820 			__get_user_asmx(val, op.ea, err, "lwarx");
1821 			break;
1822 #ifdef __powerpc64__
1823 		case 8:
1824 			__get_user_asmx(val, op.ea, err, "ldarx");
1825 			break;
1826 #endif
1827 		default:
1828 			return 0;
1829 		}
1830 		if (!err)
1831 			regs->gpr[op.reg] = val;
1832 		goto ldst_done;
1833 
1834 	case STCX:
1835 		if (regs->msr & MSR_LE)
1836 			return 0;
1837 		if (op.ea & (size - 1))
1838 			break;		/* can't handle misaligned */
1839 		err = -EFAULT;
1840 		if (!address_ok(regs, op.ea, size))
1841 			goto ldst_done;
1842 		err = 0;
1843 		switch (size) {
1844 		case 4:
1845 			__put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1846 			break;
1847 #ifdef __powerpc64__
1848 		case 8:
1849 			__put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1850 			break;
1851 #endif
1852 		default:
1853 			return 0;
1854 		}
1855 		if (!err)
1856 			regs->ccr = (regs->ccr & 0x0fffffff) |
1857 				(cr & 0xe0000000) |
1858 				((regs->xer >> 3) & 0x10000000);
1859 		goto ldst_done;
1860 
1861 	case LOAD:
1862 		if (regs->msr & MSR_LE)
1863 			return 0;
1864 		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1865 		if (!err) {
1866 			if (op.type & SIGNEXT)
1867 				do_signext(&regs->gpr[op.reg], size);
1868 			if (op.type & BYTEREV)
1869 				do_byterev(&regs->gpr[op.reg], size);
1870 		}
1871 		goto ldst_done;
1872 
1873 #ifdef CONFIG_PPC_FPU
1874 	case LOAD_FP:
1875 		if (regs->msr & MSR_LE)
1876 			return 0;
1877 		if (size == 4)
1878 			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1879 		else
1880 			err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1881 		goto ldst_done;
1882 #endif
1883 #ifdef CONFIG_ALTIVEC
1884 	case LOAD_VMX:
1885 		if (regs->msr & MSR_LE)
1886 			return 0;
1887 		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1888 		goto ldst_done;
1889 #endif
1890 #ifdef CONFIG_VSX
1891 	case LOAD_VSX:
1892 		if (regs->msr & MSR_LE)
1893 			return 0;
1894 		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1895 		goto ldst_done;
1896 #endif
1897 	case LOAD_MULTI:
1898 		if (regs->msr & MSR_LE)
1899 			return 0;
1900 		rd = op.reg;
1901 		for (i = 0; i < size; i += 4) {
1902 			nb = size - i;
1903 			if (nb > 4)
1904 				nb = 4;
1905 			err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
1906 			if (err)
1907 				return 0;
1908 			if (nb < 4)	/* left-justify last bytes */
1909 				regs->gpr[rd] <<= 32 - 8 * nb;
1910 			op.ea += 4;
1911 			++rd;
1912 		}
1913 		goto instr_done;
1914 
1915 	case STORE:
1916 		if (regs->msr & MSR_LE)
1917 			return 0;
1918 		if ((op.type & UPDATE) && size == sizeof(long) &&
1919 		    op.reg == 1 && op.update_reg == 1 &&
1920 		    !(regs->msr & MSR_PR) &&
1921 		    op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1922 			err = handle_stack_update(op.ea, regs);
1923 			goto ldst_done;
1924 		}
1925 		err = write_mem(op.val, op.ea, size, regs);
1926 		goto ldst_done;
1927 
1928 #ifdef CONFIG_PPC_FPU
1929 	case STORE_FP:
1930 		if (regs->msr & MSR_LE)
1931 			return 0;
1932 		if (size == 4)
1933 			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1934 		else
1935 			err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1936 		goto ldst_done;
1937 #endif
1938 #ifdef CONFIG_ALTIVEC
1939 	case STORE_VMX:
1940 		if (regs->msr & MSR_LE)
1941 			return 0;
1942 		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1943 		goto ldst_done;
1944 #endif
1945 #ifdef CONFIG_VSX
1946 	case STORE_VSX:
1947 		if (regs->msr & MSR_LE)
1948 			return 0;
1949 		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1950 		goto ldst_done;
1951 #endif
1952 	case STORE_MULTI:
1953 		if (regs->msr & MSR_LE)
1954 			return 0;
1955 		rd = op.reg;
1956 		for (i = 0; i < size; i += 4) {
1957 			val = regs->gpr[rd];
1958 			nb = size - i;
1959 			if (nb > 4)
1960 				nb = 4;
1961 			else
1962 				val >>= 32 - 8 * nb;
1963 			err = write_mem(val, op.ea, nb, regs);
1964 			if (err)
1965 				return 0;
1966 			op.ea += 4;
1967 			++rd;
1968 		}
1969 		goto instr_done;
1970 
1971 	case MFMSR:
1972 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
1973 		goto instr_done;
1974 
1975 	case MTMSR:
1976 		val = regs->gpr[op.reg];
1977 		if ((val & MSR_RI) == 0)
1978 			/* can't step mtmsr[d] that would clear MSR_RI */
1979 			return -1;
1980 		/* here op.val is the mask of bits to change */
1981 		regs->msr = (regs->msr & ~op.val) | (val & op.val);
1982 		goto instr_done;
1983 
1984 #ifdef CONFIG_PPC64
1985 	case SYSCALL:	/* sc */
1986 		/*
1987 		 * N.B. this uses knowledge about how the syscall
1988 		 * entry code works.  If that is changed, this will
1989 		 * need to be changed also.
1990 		 */
1991 		if (regs->gpr[0] == 0x1ebe &&
1992 		    cpu_has_feature(CPU_FTR_REAL_LE)) {
1993 			regs->msr ^= MSR_LE;
1994 			goto instr_done;
1995 		}
1996 		regs->gpr[9] = regs->gpr[13];
1997 		regs->gpr[10] = MSR_KERNEL;
1998 		regs->gpr[11] = regs->nip + 4;
1999 		regs->gpr[12] = regs->msr & MSR_MASK;
2000 		regs->gpr[13] = (unsigned long) get_paca();
2001 		regs->nip = (unsigned long) &system_call_common;
2002 		regs->msr = MSR_KERNEL;
2003 		return 1;
2004 
2005 	case RFI:
2006 		return -1;
2007 #endif
2008 	}
2009 	return 0;
2010 
2011  ldst_done:
2012 	if (err)
2013 		return 0;
2014 	if (op.type & UPDATE)
2015 		regs->gpr[op.update_reg] = op.ea;
2016 
2017  instr_done:
2018 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2019 	return 1;
2020 }
2021