xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision 8a26af30)
1 /*
2  * Single-step support.
3  *
4  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
19 
20 extern char system_call_common[];
21 
22 #ifdef CONFIG_PPC64
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK	0xffffffff87c0ffffUL
25 #else
26 #define MSR_MASK	0x87c0ffff
27 #endif
28 
29 /* Bits in XER */
30 #define XER_SO		0x80000000U
31 #define XER_OV		0x40000000U
32 #define XER_CA		0x20000000U
33 
34 #ifdef CONFIG_PPC_FPU
35 /*
36  * Functions in ldstfp.S
37  */
38 extern int do_lfs(int rn, unsigned long ea);
39 extern int do_lfd(int rn, unsigned long ea);
40 extern int do_stfs(int rn, unsigned long ea);
41 extern int do_stfd(int rn, unsigned long ea);
42 extern int do_lvx(int rn, unsigned long ea);
43 extern int do_stvx(int rn, unsigned long ea);
44 extern int do_lxvd2x(int rn, unsigned long ea);
45 extern int do_stxvd2x(int rn, unsigned long ea);
46 #endif
47 
48 /*
49  * Emulate the truncation of 64 bit values in 32-bit mode.
50  */
51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
52 {
53 #ifdef __powerpc64__
54 	if ((msr & MSR_64BIT) == 0)
55 		val &= 0xffffffffUL;
56 #endif
57 	return val;
58 }
59 
60 /*
61  * Determine whether a conditional branch instruction would branch.
62  */
63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
64 {
65 	unsigned int bo = (instr >> 21) & 0x1f;
66 	unsigned int bi;
67 
68 	if ((bo & 4) == 0) {
69 		/* decrement counter */
70 		--regs->ctr;
71 		if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 			return 0;
73 	}
74 	if ((bo & 0x10) == 0) {
75 		/* check bit from CR */
76 		bi = (instr >> 16) & 0x1f;
77 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 			return 0;
79 	}
80 	return 1;
81 }
82 
83 
84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
85 {
86 	if (!user_mode(regs))
87 		return 1;
88 	return __access_ok(ea, nb, USER_DS);
89 }
90 
91 /*
92  * Calculate effective address for a D-form instruction
93  */
94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
95 {
96 	int ra;
97 	unsigned long ea;
98 
99 	ra = (instr >> 16) & 0x1f;
100 	ea = (signed short) instr;		/* sign-extend */
101 	if (ra) {
102 		ea += regs->gpr[ra];
103 		if (instr & 0x04000000) {		/* update forms */
104 			if ((instr>>26) != 47) 		/* stmw is not an update form */
105 				regs->gpr[ra] = ea;
106 		}
107 	}
108 
109 	return truncate_if_32bit(regs->msr, ea);
110 }
111 
112 #ifdef __powerpc64__
113 /*
114  * Calculate effective address for a DS-form instruction
115  */
116 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
117 {
118 	int ra;
119 	unsigned long ea;
120 
121 	ra = (instr >> 16) & 0x1f;
122 	ea = (signed short) (instr & ~3);	/* sign-extend */
123 	if (ra) {
124 		ea += regs->gpr[ra];
125 		if ((instr & 3) == 1)		/* update forms */
126 			regs->gpr[ra] = ea;
127 	}
128 
129 	return truncate_if_32bit(regs->msr, ea);
130 }
131 #endif /* __powerpc64 */
132 
133 /*
134  * Calculate effective address for an X-form instruction
135  */
136 static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs,
137 				     int do_update)
138 {
139 	int ra, rb;
140 	unsigned long ea;
141 
142 	ra = (instr >> 16) & 0x1f;
143 	rb = (instr >> 11) & 0x1f;
144 	ea = regs->gpr[rb];
145 	if (ra) {
146 		ea += regs->gpr[ra];
147 		if (do_update)		/* update forms */
148 			regs->gpr[ra] = ea;
149 	}
150 
151 	return truncate_if_32bit(regs->msr, ea);
152 }
153 
154 /*
155  * Return the largest power of 2, not greater than sizeof(unsigned long),
156  * such that x is a multiple of it.
157  */
158 static inline unsigned long max_align(unsigned long x)
159 {
160 	x |= sizeof(unsigned long);
161 	return x & -x;		/* isolates rightmost bit */
162 }
163 
164 
165 static inline unsigned long byterev_2(unsigned long x)
166 {
167 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
168 }
169 
170 static inline unsigned long byterev_4(unsigned long x)
171 {
172 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
173 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
174 }
175 
176 #ifdef __powerpc64__
177 static inline unsigned long byterev_8(unsigned long x)
178 {
179 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
180 }
181 #endif
182 
183 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
184 				      int nb)
185 {
186 	int err = 0;
187 	unsigned long x = 0;
188 
189 	switch (nb) {
190 	case 1:
191 		err = __get_user(x, (unsigned char __user *) ea);
192 		break;
193 	case 2:
194 		err = __get_user(x, (unsigned short __user *) ea);
195 		break;
196 	case 4:
197 		err = __get_user(x, (unsigned int __user *) ea);
198 		break;
199 #ifdef __powerpc64__
200 	case 8:
201 		err = __get_user(x, (unsigned long __user *) ea);
202 		break;
203 #endif
204 	}
205 	if (!err)
206 		*dest = x;
207 	return err;
208 }
209 
210 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
211 					int nb, struct pt_regs *regs)
212 {
213 	int err;
214 	unsigned long x, b, c;
215 #ifdef __LITTLE_ENDIAN__
216 	int len = nb; /* save a copy of the length for byte reversal */
217 #endif
218 
219 	/* unaligned, do this in pieces */
220 	x = 0;
221 	for (; nb > 0; nb -= c) {
222 #ifdef __LITTLE_ENDIAN__
223 		c = 1;
224 #endif
225 #ifdef __BIG_ENDIAN__
226 		c = max_align(ea);
227 #endif
228 		if (c > nb)
229 			c = max_align(nb);
230 		err = read_mem_aligned(&b, ea, c);
231 		if (err)
232 			return err;
233 		x = (x << (8 * c)) + b;
234 		ea += c;
235 	}
236 #ifdef __LITTLE_ENDIAN__
237 	switch (len) {
238 	case 2:
239 		*dest = byterev_2(x);
240 		break;
241 	case 4:
242 		*dest = byterev_4(x);
243 		break;
244 #ifdef __powerpc64__
245 	case 8:
246 		*dest = byterev_8(x);
247 		break;
248 #endif
249 	}
250 #endif
251 #ifdef __BIG_ENDIAN__
252 	*dest = x;
253 #endif
254 	return 0;
255 }
256 
257 /*
258  * Read memory at address ea for nb bytes, return 0 for success
259  * or -EFAULT if an error occurred.
260  */
261 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
262 			      struct pt_regs *regs)
263 {
264 	if (!address_ok(regs, ea, nb))
265 		return -EFAULT;
266 	if ((ea & (nb - 1)) == 0)
267 		return read_mem_aligned(dest, ea, nb);
268 	return read_mem_unaligned(dest, ea, nb, regs);
269 }
270 
271 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
272 				       int nb)
273 {
274 	int err = 0;
275 
276 	switch (nb) {
277 	case 1:
278 		err = __put_user(val, (unsigned char __user *) ea);
279 		break;
280 	case 2:
281 		err = __put_user(val, (unsigned short __user *) ea);
282 		break;
283 	case 4:
284 		err = __put_user(val, (unsigned int __user *) ea);
285 		break;
286 #ifdef __powerpc64__
287 	case 8:
288 		err = __put_user(val, (unsigned long __user *) ea);
289 		break;
290 #endif
291 	}
292 	return err;
293 }
294 
295 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
296 					 int nb, struct pt_regs *regs)
297 {
298 	int err;
299 	unsigned long c;
300 
301 #ifdef __LITTLE_ENDIAN__
302 	switch (nb) {
303 	case 2:
304 		val = byterev_2(val);
305 		break;
306 	case 4:
307 		val = byterev_4(val);
308 		break;
309 #ifdef __powerpc64__
310 	case 8:
311 		val = byterev_8(val);
312 		break;
313 #endif
314 	}
315 #endif
316 	/* unaligned or little-endian, do this in pieces */
317 	for (; nb > 0; nb -= c) {
318 #ifdef __LITTLE_ENDIAN__
319 		c = 1;
320 #endif
321 #ifdef __BIG_ENDIAN__
322 		c = max_align(ea);
323 #endif
324 		if (c > nb)
325 			c = max_align(nb);
326 		err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
327 		if (err)
328 			return err;
329 		ea += c;
330 	}
331 	return 0;
332 }
333 
334 /*
335  * Write memory at address ea for nb bytes, return 0 for success
336  * or -EFAULT if an error occurred.
337  */
338 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
339 			       struct pt_regs *regs)
340 {
341 	if (!address_ok(regs, ea, nb))
342 		return -EFAULT;
343 	if ((ea & (nb - 1)) == 0)
344 		return write_mem_aligned(val, ea, nb);
345 	return write_mem_unaligned(val, ea, nb, regs);
346 }
347 
348 #ifdef CONFIG_PPC_FPU
349 /*
350  * Check the address and alignment, and call func to do the actual
351  * load or store.
352  */
353 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
354 				unsigned long ea, int nb,
355 				struct pt_regs *regs)
356 {
357 	int err;
358 	union {
359 		double dbl;
360 		unsigned long ul[2];
361 		struct {
362 #ifdef __BIG_ENDIAN__
363 			unsigned _pad_;
364 			unsigned word;
365 #endif
366 #ifdef __LITTLE_ENDIAN__
367 			unsigned word;
368 			unsigned _pad_;
369 #endif
370 		} single;
371 	} data;
372 	unsigned long ptr;
373 
374 	if (!address_ok(regs, ea, nb))
375 		return -EFAULT;
376 	if ((ea & 3) == 0)
377 		return (*func)(rn, ea);
378 	ptr = (unsigned long) &data.ul;
379 	if (sizeof(unsigned long) == 8 || nb == 4) {
380 		err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
381 		if (nb == 4)
382 			ptr = (unsigned long)&(data.single.word);
383 	} else {
384 		/* reading a double on 32-bit */
385 		err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
386 		if (!err)
387 			err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
388 	}
389 	if (err)
390 		return err;
391 	return (*func)(rn, ptr);
392 }
393 
394 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
395 				 unsigned long ea, int nb,
396 				 struct pt_regs *regs)
397 {
398 	int err;
399 	union {
400 		double dbl;
401 		unsigned long ul[2];
402 		struct {
403 #ifdef __BIG_ENDIAN__
404 			unsigned _pad_;
405 			unsigned word;
406 #endif
407 #ifdef __LITTLE_ENDIAN__
408 			unsigned word;
409 			unsigned _pad_;
410 #endif
411 		} single;
412 	} data;
413 	unsigned long ptr;
414 
415 	if (!address_ok(regs, ea, nb))
416 		return -EFAULT;
417 	if ((ea & 3) == 0)
418 		return (*func)(rn, ea);
419 	ptr = (unsigned long) &data.ul[0];
420 	if (sizeof(unsigned long) == 8 || nb == 4) {
421 		if (nb == 4)
422 			ptr = (unsigned long)&(data.single.word);
423 		err = (*func)(rn, ptr);
424 		if (err)
425 			return err;
426 		err = write_mem_unaligned(data.ul[0], ea, nb, regs);
427 	} else {
428 		/* writing a double on 32-bit */
429 		err = (*func)(rn, ptr);
430 		if (err)
431 			return err;
432 		err = write_mem_unaligned(data.ul[0], ea, 4, regs);
433 		if (!err)
434 			err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
435 	}
436 	return err;
437 }
438 #endif
439 
440 #ifdef CONFIG_ALTIVEC
441 /* For Altivec/VMX, no need to worry about alignment */
442 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
443 				 unsigned long ea, struct pt_regs *regs)
444 {
445 	if (!address_ok(regs, ea & ~0xfUL, 16))
446 		return -EFAULT;
447 	return (*func)(rn, ea);
448 }
449 
450 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
451 				  unsigned long ea, struct pt_regs *regs)
452 {
453 	if (!address_ok(regs, ea & ~0xfUL, 16))
454 		return -EFAULT;
455 	return (*func)(rn, ea);
456 }
457 #endif /* CONFIG_ALTIVEC */
458 
459 #ifdef CONFIG_VSX
460 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
461 				 unsigned long ea, struct pt_regs *regs)
462 {
463 	int err;
464 	unsigned long val[2];
465 
466 	if (!address_ok(regs, ea, 16))
467 		return -EFAULT;
468 	if ((ea & 3) == 0)
469 		return (*func)(rn, ea);
470 	err = read_mem_unaligned(&val[0], ea, 8, regs);
471 	if (!err)
472 		err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
473 	if (!err)
474 		err = (*func)(rn, (unsigned long) &val[0]);
475 	return err;
476 }
477 
478 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
479 				 unsigned long ea, struct pt_regs *regs)
480 {
481 	int err;
482 	unsigned long val[2];
483 
484 	if (!address_ok(regs, ea, 16))
485 		return -EFAULT;
486 	if ((ea & 3) == 0)
487 		return (*func)(rn, ea);
488 	err = (*func)(rn, (unsigned long) &val[0]);
489 	if (err)
490 		return err;
491 	err = write_mem_unaligned(val[0], ea, 8, regs);
492 	if (!err)
493 		err = write_mem_unaligned(val[1], ea + 8, 8, regs);
494 	return err;
495 }
496 #endif /* CONFIG_VSX */
497 
498 #define __put_user_asmx(x, addr, err, op, cr)		\
499 	__asm__ __volatile__(				\
500 		"1:	" op " %2,0,%3\n"		\
501 		"	mfcr	%1\n"			\
502 		"2:\n"					\
503 		".section .fixup,\"ax\"\n"		\
504 		"3:	li	%0,%4\n"		\
505 		"	b	2b\n"			\
506 		".previous\n"				\
507 		".section __ex_table,\"a\"\n"		\
508 			PPC_LONG_ALIGN "\n"		\
509 			PPC_LONG "1b,3b\n"		\
510 		".previous"				\
511 		: "=r" (err), "=r" (cr)			\
512 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
513 
514 #define __get_user_asmx(x, addr, err, op)		\
515 	__asm__ __volatile__(				\
516 		"1:	"op" %1,0,%2\n"			\
517 		"2:\n"					\
518 		".section .fixup,\"ax\"\n"		\
519 		"3:	li	%0,%3\n"		\
520 		"	b	2b\n"			\
521 		".previous\n"				\
522 		".section __ex_table,\"a\"\n"		\
523 			PPC_LONG_ALIGN "\n"		\
524 			PPC_LONG "1b,3b\n"		\
525 		".previous"				\
526 		: "=r" (err), "=r" (x)			\
527 		: "r" (addr), "i" (-EFAULT), "0" (err))
528 
529 #define __cacheop_user_asmx(addr, err, op)		\
530 	__asm__ __volatile__(				\
531 		"1:	"op" 0,%1\n"			\
532 		"2:\n"					\
533 		".section .fixup,\"ax\"\n"		\
534 		"3:	li	%0,%3\n"		\
535 		"	b	2b\n"			\
536 		".previous\n"				\
537 		".section __ex_table,\"a\"\n"		\
538 			PPC_LONG_ALIGN "\n"		\
539 			PPC_LONG "1b,3b\n"		\
540 		".previous"				\
541 		: "=r" (err)				\
542 		: "r" (addr), "i" (-EFAULT), "0" (err))
543 
544 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
545 {
546 	long val = regs->gpr[rd];
547 
548 	regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
549 #ifdef __powerpc64__
550 	if (!(regs->msr & MSR_64BIT))
551 		val = (int) val;
552 #endif
553 	if (val < 0)
554 		regs->ccr |= 0x80000000;
555 	else if (val > 0)
556 		regs->ccr |= 0x40000000;
557 	else
558 		regs->ccr |= 0x20000000;
559 }
560 
561 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
562 				     unsigned long val1, unsigned long val2,
563 				     unsigned long carry_in)
564 {
565 	unsigned long val = val1 + val2;
566 
567 	if (carry_in)
568 		++val;
569 	regs->gpr[rd] = val;
570 #ifdef __powerpc64__
571 	if (!(regs->msr & MSR_64BIT)) {
572 		val = (unsigned int) val;
573 		val1 = (unsigned int) val1;
574 	}
575 #endif
576 	if (val < val1 || (carry_in && val == val1))
577 		regs->xer |= XER_CA;
578 	else
579 		regs->xer &= ~XER_CA;
580 }
581 
582 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
583 				    int crfld)
584 {
585 	unsigned int crval, shift;
586 
587 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
588 	if (v1 < v2)
589 		crval |= 8;
590 	else if (v1 > v2)
591 		crval |= 4;
592 	else
593 		crval |= 2;
594 	shift = (7 - crfld) * 4;
595 	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
596 }
597 
598 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
599 				      unsigned long v2, int crfld)
600 {
601 	unsigned int crval, shift;
602 
603 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
604 	if (v1 < v2)
605 		crval |= 8;
606 	else if (v1 > v2)
607 		crval |= 4;
608 	else
609 		crval |= 2;
610 	shift = (7 - crfld) * 4;
611 	regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
612 }
613 
614 /*
615  * Elements of 32-bit rotate and mask instructions.
616  */
617 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
618 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
619 #ifdef __powerpc64__
620 #define MASK64_L(mb)	(~0UL >> (mb))
621 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
622 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
623 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
624 #else
625 #define DATA32(x)	(x)
626 #endif
627 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
628 
629 /*
630  * Emulate instructions that cause a transfer of control,
631  * loads and stores, and a few other instructions.
632  * Returns 1 if the step was emulated, 0 if not,
633  * or -1 if the instruction is one that should not be stepped,
634  * such as an rfid, or a mtmsrd that would clear MSR_RI.
635  */
636 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
637 {
638 	unsigned int opcode, ra, rb, rd, spr, u;
639 	unsigned long int imm;
640 	unsigned long int val, val2;
641 	unsigned long int ea;
642 	unsigned int cr, mb, me, sh;
643 	int err;
644 	unsigned long old_ra, val3;
645 	long ival;
646 
647 	opcode = instr >> 26;
648 	switch (opcode) {
649 	case 16:	/* bc */
650 		imm = (signed short)(instr & 0xfffc);
651 		if ((instr & 2) == 0)
652 			imm += regs->nip;
653 		regs->nip += 4;
654 		regs->nip = truncate_if_32bit(regs->msr, regs->nip);
655 		if (instr & 1)
656 			regs->link = regs->nip;
657 		if (branch_taken(instr, regs))
658 			regs->nip = truncate_if_32bit(regs->msr, imm);
659 		return 1;
660 #ifdef CONFIG_PPC64
661 	case 17:	/* sc */
662 		/*
663 		 * N.B. this uses knowledge about how the syscall
664 		 * entry code works.  If that is changed, this will
665 		 * need to be changed also.
666 		 */
667 		if (regs->gpr[0] == 0x1ebe &&
668 		    cpu_has_feature(CPU_FTR_REAL_LE)) {
669 			regs->msr ^= MSR_LE;
670 			goto instr_done;
671 		}
672 		regs->gpr[9] = regs->gpr[13];
673 		regs->gpr[10] = MSR_KERNEL;
674 		regs->gpr[11] = regs->nip + 4;
675 		regs->gpr[12] = regs->msr & MSR_MASK;
676 		regs->gpr[13] = (unsigned long) get_paca();
677 		regs->nip = (unsigned long) &system_call_common;
678 		regs->msr = MSR_KERNEL;
679 		return 1;
680 #endif
681 	case 18:	/* b */
682 		imm = instr & 0x03fffffc;
683 		if (imm & 0x02000000)
684 			imm -= 0x04000000;
685 		if ((instr & 2) == 0)
686 			imm += regs->nip;
687 		if (instr & 1)
688 			regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
689 		imm = truncate_if_32bit(regs->msr, imm);
690 		regs->nip = imm;
691 		return 1;
692 	case 19:
693 		switch ((instr >> 1) & 0x3ff) {
694 		case 16:	/* bclr */
695 		case 528:	/* bcctr */
696 			imm = (instr & 0x400)? regs->ctr: regs->link;
697 			regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
698 			imm = truncate_if_32bit(regs->msr, imm);
699 			if (instr & 1)
700 				regs->link = regs->nip;
701 			if (branch_taken(instr, regs))
702 				regs->nip = imm;
703 			return 1;
704 
705 		case 18:	/* rfid, scary */
706 			return -1;
707 
708 		case 150:	/* isync */
709 			isync();
710 			goto instr_done;
711 
712 		case 33:	/* crnor */
713 		case 129:	/* crandc */
714 		case 193:	/* crxor */
715 		case 225:	/* crnand */
716 		case 257:	/* crand */
717 		case 289:	/* creqv */
718 		case 417:	/* crorc */
719 		case 449:	/* cror */
720 			ra = (instr >> 16) & 0x1f;
721 			rb = (instr >> 11) & 0x1f;
722 			rd = (instr >> 21) & 0x1f;
723 			ra = (regs->ccr >> (31 - ra)) & 1;
724 			rb = (regs->ccr >> (31 - rb)) & 1;
725 			val = (instr >> (6 + ra * 2 + rb)) & 1;
726 			regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
727 				(val << (31 - rd));
728 			goto instr_done;
729 		}
730 		break;
731 	case 31:
732 		switch ((instr >> 1) & 0x3ff) {
733 		case 598:	/* sync */
734 #ifdef __powerpc64__
735 			switch ((instr >> 21) & 3) {
736 			case 1:		/* lwsync */
737 				asm volatile("lwsync" : : : "memory");
738 				goto instr_done;
739 			case 2:		/* ptesync */
740 				asm volatile("ptesync" : : : "memory");
741 				goto instr_done;
742 			}
743 #endif
744 			mb();
745 			goto instr_done;
746 
747 		case 854:	/* eieio */
748 			eieio();
749 			goto instr_done;
750 		}
751 		break;
752 	}
753 
754 	/* Following cases refer to regs->gpr[], so we need all regs */
755 	if (!FULL_REGS(regs))
756 		return 0;
757 
758 	rd = (instr >> 21) & 0x1f;
759 	ra = (instr >> 16) & 0x1f;
760 	rb = (instr >> 11) & 0x1f;
761 
762 	switch (opcode) {
763 	case 7:		/* mulli */
764 		regs->gpr[rd] = regs->gpr[ra] * (short) instr;
765 		goto instr_done;
766 
767 	case 8:		/* subfic */
768 		imm = (short) instr;
769 		add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
770 		goto instr_done;
771 
772 	case 10:	/* cmpli */
773 		imm = (unsigned short) instr;
774 		val = regs->gpr[ra];
775 #ifdef __powerpc64__
776 		if ((rd & 1) == 0)
777 			val = (unsigned int) val;
778 #endif
779 		do_cmp_unsigned(regs, val, imm, rd >> 2);
780 		goto instr_done;
781 
782 	case 11:	/* cmpi */
783 		imm = (short) instr;
784 		val = regs->gpr[ra];
785 #ifdef __powerpc64__
786 		if ((rd & 1) == 0)
787 			val = (int) val;
788 #endif
789 		do_cmp_signed(regs, val, imm, rd >> 2);
790 		goto instr_done;
791 
792 	case 12:	/* addic */
793 		imm = (short) instr;
794 		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
795 		goto instr_done;
796 
797 	case 13:	/* addic. */
798 		imm = (short) instr;
799 		add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
800 		set_cr0(regs, rd);
801 		goto instr_done;
802 
803 	case 14:	/* addi */
804 		imm = (short) instr;
805 		if (ra)
806 			imm += regs->gpr[ra];
807 		regs->gpr[rd] = imm;
808 		goto instr_done;
809 
810 	case 15:	/* addis */
811 		imm = ((short) instr) << 16;
812 		if (ra)
813 			imm += regs->gpr[ra];
814 		regs->gpr[rd] = imm;
815 		goto instr_done;
816 
817 	case 20:	/* rlwimi */
818 		mb = (instr >> 6) & 0x1f;
819 		me = (instr >> 1) & 0x1f;
820 		val = DATA32(regs->gpr[rd]);
821 		imm = MASK32(mb, me);
822 		regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
823 		goto logical_done;
824 
825 	case 21:	/* rlwinm */
826 		mb = (instr >> 6) & 0x1f;
827 		me = (instr >> 1) & 0x1f;
828 		val = DATA32(regs->gpr[rd]);
829 		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
830 		goto logical_done;
831 
832 	case 23:	/* rlwnm */
833 		mb = (instr >> 6) & 0x1f;
834 		me = (instr >> 1) & 0x1f;
835 		rb = regs->gpr[rb] & 0x1f;
836 		val = DATA32(regs->gpr[rd]);
837 		regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
838 		goto logical_done;
839 
840 	case 24:	/* ori */
841 		imm = (unsigned short) instr;
842 		regs->gpr[ra] = regs->gpr[rd] | imm;
843 		goto instr_done;
844 
845 	case 25:	/* oris */
846 		imm = (unsigned short) instr;
847 		regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
848 		goto instr_done;
849 
850 	case 26:	/* xori */
851 		imm = (unsigned short) instr;
852 		regs->gpr[ra] = regs->gpr[rd] ^ imm;
853 		goto instr_done;
854 
855 	case 27:	/* xoris */
856 		imm = (unsigned short) instr;
857 		regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
858 		goto instr_done;
859 
860 	case 28:	/* andi. */
861 		imm = (unsigned short) instr;
862 		regs->gpr[ra] = regs->gpr[rd] & imm;
863 		set_cr0(regs, ra);
864 		goto instr_done;
865 
866 	case 29:	/* andis. */
867 		imm = (unsigned short) instr;
868 		regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
869 		set_cr0(regs, ra);
870 		goto instr_done;
871 
872 #ifdef __powerpc64__
873 	case 30:	/* rld* */
874 		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
875 		val = regs->gpr[rd];
876 		if ((instr & 0x10) == 0) {
877 			sh = rb | ((instr & 2) << 4);
878 			val = ROTATE(val, sh);
879 			switch ((instr >> 2) & 3) {
880 			case 0:		/* rldicl */
881 				regs->gpr[ra] = val & MASK64_L(mb);
882 				goto logical_done;
883 			case 1:		/* rldicr */
884 				regs->gpr[ra] = val & MASK64_R(mb);
885 				goto logical_done;
886 			case 2:		/* rldic */
887 				regs->gpr[ra] = val & MASK64(mb, 63 - sh);
888 				goto logical_done;
889 			case 3:		/* rldimi */
890 				imm = MASK64(mb, 63 - sh);
891 				regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
892 					(val & imm);
893 				goto logical_done;
894 			}
895 		} else {
896 			sh = regs->gpr[rb] & 0x3f;
897 			val = ROTATE(val, sh);
898 			switch ((instr >> 1) & 7) {
899 			case 0:		/* rldcl */
900 				regs->gpr[ra] = val & MASK64_L(mb);
901 				goto logical_done;
902 			case 1:		/* rldcr */
903 				regs->gpr[ra] = val & MASK64_R(mb);
904 				goto logical_done;
905 			}
906 		}
907 #endif
908 
909 	case 31:
910 		switch ((instr >> 1) & 0x3ff) {
911 		case 83:	/* mfmsr */
912 			if (regs->msr & MSR_PR)
913 				break;
914 			regs->gpr[rd] = regs->msr & MSR_MASK;
915 			goto instr_done;
916 		case 146:	/* mtmsr */
917 			if (regs->msr & MSR_PR)
918 				break;
919 			imm = regs->gpr[rd];
920 			if ((imm & MSR_RI) == 0)
921 				/* can't step mtmsr that would clear MSR_RI */
922 				return -1;
923 			regs->msr = imm;
924 			goto instr_done;
925 #ifdef CONFIG_PPC64
926 		case 178:	/* mtmsrd */
927 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
928 			/* mtmsrd doesn't change MSR_HV and MSR_ME */
929 			if (regs->msr & MSR_PR)
930 				break;
931 			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
932 			imm = (regs->msr & MSR_MASK & ~imm)
933 				| (regs->gpr[rd] & imm);
934 			if ((imm & MSR_RI) == 0)
935 				/* can't step mtmsrd that would clear MSR_RI */
936 				return -1;
937 			regs->msr = imm;
938 			goto instr_done;
939 #endif
940 		case 19:	/* mfcr */
941 			regs->gpr[rd] = regs->ccr;
942 			regs->gpr[rd] &= 0xffffffffUL;
943 			goto instr_done;
944 
945 		case 144:	/* mtcrf */
946 			imm = 0xf0000000UL;
947 			val = regs->gpr[rd];
948 			for (sh = 0; sh < 8; ++sh) {
949 				if (instr & (0x80000 >> sh))
950 					regs->ccr = (regs->ccr & ~imm) |
951 						(val & imm);
952 				imm >>= 4;
953 			}
954 			goto instr_done;
955 
956 		case 339:	/* mfspr */
957 			spr = (instr >> 11) & 0x3ff;
958 			switch (spr) {
959 			case 0x20:	/* mfxer */
960 				regs->gpr[rd] = regs->xer;
961 				regs->gpr[rd] &= 0xffffffffUL;
962 				goto instr_done;
963 			case 0x100:	/* mflr */
964 				regs->gpr[rd] = regs->link;
965 				goto instr_done;
966 			case 0x120:	/* mfctr */
967 				regs->gpr[rd] = regs->ctr;
968 				goto instr_done;
969 			}
970 			break;
971 
972 		case 467:	/* mtspr */
973 			spr = (instr >> 11) & 0x3ff;
974 			switch (spr) {
975 			case 0x20:	/* mtxer */
976 				regs->xer = (regs->gpr[rd] & 0xffffffffUL);
977 				goto instr_done;
978 			case 0x100:	/* mtlr */
979 				regs->link = regs->gpr[rd];
980 				goto instr_done;
981 			case 0x120:	/* mtctr */
982 				regs->ctr = regs->gpr[rd];
983 				goto instr_done;
984 			}
985 			break;
986 
987 /*
988  * Compare instructions
989  */
990 		case 0:	/* cmp */
991 			val = regs->gpr[ra];
992 			val2 = regs->gpr[rb];
993 #ifdef __powerpc64__
994 			if ((rd & 1) == 0) {
995 				/* word (32-bit) compare */
996 				val = (int) val;
997 				val2 = (int) val2;
998 			}
999 #endif
1000 			do_cmp_signed(regs, val, val2, rd >> 2);
1001 			goto instr_done;
1002 
1003 		case 32:	/* cmpl */
1004 			val = regs->gpr[ra];
1005 			val2 = regs->gpr[rb];
1006 #ifdef __powerpc64__
1007 			if ((rd & 1) == 0) {
1008 				/* word (32-bit) compare */
1009 				val = (unsigned int) val;
1010 				val2 = (unsigned int) val2;
1011 			}
1012 #endif
1013 			do_cmp_unsigned(regs, val, val2, rd >> 2);
1014 			goto instr_done;
1015 
1016 /*
1017  * Arithmetic instructions
1018  */
1019 		case 8:	/* subfc */
1020 			add_with_carry(regs, rd, ~regs->gpr[ra],
1021 				       regs->gpr[rb], 1);
1022 			goto arith_done;
1023 #ifdef __powerpc64__
1024 		case 9:	/* mulhdu */
1025 			asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1026 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1027 			goto arith_done;
1028 #endif
1029 		case 10:	/* addc */
1030 			add_with_carry(regs, rd, regs->gpr[ra],
1031 				       regs->gpr[rb], 0);
1032 			goto arith_done;
1033 
1034 		case 11:	/* mulhwu */
1035 			asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1036 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1037 			goto arith_done;
1038 
1039 		case 40:	/* subf */
1040 			regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1041 			goto arith_done;
1042 #ifdef __powerpc64__
1043 		case 73:	/* mulhd */
1044 			asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1045 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1046 			goto arith_done;
1047 #endif
1048 		case 75:	/* mulhw */
1049 			asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1050 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1051 			goto arith_done;
1052 
1053 		case 104:	/* neg */
1054 			regs->gpr[rd] = -regs->gpr[ra];
1055 			goto arith_done;
1056 
1057 		case 136:	/* subfe */
1058 			add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1059 				       regs->xer & XER_CA);
1060 			goto arith_done;
1061 
1062 		case 138:	/* adde */
1063 			add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1064 				       regs->xer & XER_CA);
1065 			goto arith_done;
1066 
1067 		case 200:	/* subfze */
1068 			add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1069 				       regs->xer & XER_CA);
1070 			goto arith_done;
1071 
1072 		case 202:	/* addze */
1073 			add_with_carry(regs, rd, regs->gpr[ra], 0L,
1074 				       regs->xer & XER_CA);
1075 			goto arith_done;
1076 
1077 		case 232:	/* subfme */
1078 			add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1079 				       regs->xer & XER_CA);
1080 			goto arith_done;
1081 #ifdef __powerpc64__
1082 		case 233:	/* mulld */
1083 			regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1084 			goto arith_done;
1085 #endif
1086 		case 234:	/* addme */
1087 			add_with_carry(regs, rd, regs->gpr[ra], -1L,
1088 				       regs->xer & XER_CA);
1089 			goto arith_done;
1090 
1091 		case 235:	/* mullw */
1092 			regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1093 				(unsigned int) regs->gpr[rb];
1094 			goto arith_done;
1095 
1096 		case 266:	/* add */
1097 			regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1098 			goto arith_done;
1099 #ifdef __powerpc64__
1100 		case 457:	/* divdu */
1101 			regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1102 			goto arith_done;
1103 #endif
1104 		case 459:	/* divwu */
1105 			regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1106 				(unsigned int) regs->gpr[rb];
1107 			goto arith_done;
1108 #ifdef __powerpc64__
1109 		case 489:	/* divd */
1110 			regs->gpr[rd] = (long int) regs->gpr[ra] /
1111 				(long int) regs->gpr[rb];
1112 			goto arith_done;
1113 #endif
1114 		case 491:	/* divw */
1115 			regs->gpr[rd] = (int) regs->gpr[ra] /
1116 				(int) regs->gpr[rb];
1117 			goto arith_done;
1118 
1119 
1120 /*
1121  * Logical instructions
1122  */
1123 		case 26:	/* cntlzw */
1124 			asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1125 			    "r" (regs->gpr[rd]));
1126 			goto logical_done;
1127 #ifdef __powerpc64__
1128 		case 58:	/* cntlzd */
1129 			asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1130 			    "r" (regs->gpr[rd]));
1131 			goto logical_done;
1132 #endif
1133 		case 28:	/* and */
1134 			regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1135 			goto logical_done;
1136 
1137 		case 60:	/* andc */
1138 			regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1139 			goto logical_done;
1140 
1141 		case 124:	/* nor */
1142 			regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1143 			goto logical_done;
1144 
1145 		case 284:	/* xor */
1146 			regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1147 			goto logical_done;
1148 
1149 		case 316:	/* xor */
1150 			regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1151 			goto logical_done;
1152 
1153 		case 412:	/* orc */
1154 			regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1155 			goto logical_done;
1156 
1157 		case 444:	/* or */
1158 			regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1159 			goto logical_done;
1160 
1161 		case 476:	/* nand */
1162 			regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1163 			goto logical_done;
1164 
1165 		case 922:	/* extsh */
1166 			regs->gpr[ra] = (signed short) regs->gpr[rd];
1167 			goto logical_done;
1168 
1169 		case 954:	/* extsb */
1170 			regs->gpr[ra] = (signed char) regs->gpr[rd];
1171 			goto logical_done;
1172 #ifdef __powerpc64__
1173 		case 986:	/* extsw */
1174 			regs->gpr[ra] = (signed int) regs->gpr[rd];
1175 			goto logical_done;
1176 #endif
1177 
1178 /*
1179  * Shift instructions
1180  */
1181 		case 24:	/* slw */
1182 			sh = regs->gpr[rb] & 0x3f;
1183 			if (sh < 32)
1184 				regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1185 			else
1186 				regs->gpr[ra] = 0;
1187 			goto logical_done;
1188 
1189 		case 536:	/* srw */
1190 			sh = regs->gpr[rb] & 0x3f;
1191 			if (sh < 32)
1192 				regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1193 			else
1194 				regs->gpr[ra] = 0;
1195 			goto logical_done;
1196 
1197 		case 792:	/* sraw */
1198 			sh = regs->gpr[rb] & 0x3f;
1199 			ival = (signed int) regs->gpr[rd];
1200 			regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 			if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
1202 				regs->xer |= XER_CA;
1203 			else
1204 				regs->xer &= ~XER_CA;
1205 			goto logical_done;
1206 
1207 		case 824:	/* srawi */
1208 			sh = rb;
1209 			ival = (signed int) regs->gpr[rd];
1210 			regs->gpr[ra] = ival >> sh;
1211 			if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1212 				regs->xer |= XER_CA;
1213 			else
1214 				regs->xer &= ~XER_CA;
1215 			goto logical_done;
1216 
1217 #ifdef __powerpc64__
1218 		case 27:	/* sld */
1219 			sh = regs->gpr[rd] & 0x7f;
1220 			if (sh < 64)
1221 				regs->gpr[ra] = regs->gpr[rd] << sh;
1222 			else
1223 				regs->gpr[ra] = 0;
1224 			goto logical_done;
1225 
1226 		case 539:	/* srd */
1227 			sh = regs->gpr[rb] & 0x7f;
1228 			if (sh < 64)
1229 				regs->gpr[ra] = regs->gpr[rd] >> sh;
1230 			else
1231 				regs->gpr[ra] = 0;
1232 			goto logical_done;
1233 
1234 		case 794:	/* srad */
1235 			sh = regs->gpr[rb] & 0x7f;
1236 			ival = (signed long int) regs->gpr[rd];
1237 			regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 			if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
1239 				regs->xer |= XER_CA;
1240 			else
1241 				regs->xer &= ~XER_CA;
1242 			goto logical_done;
1243 
1244 		case 826:	/* sradi with sh_5 = 0 */
1245 		case 827:	/* sradi with sh_5 = 1 */
1246 			sh = rb | ((instr & 2) << 4);
1247 			ival = (signed long int) regs->gpr[rd];
1248 			regs->gpr[ra] = ival >> sh;
1249 			if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
1250 				regs->xer |= XER_CA;
1251 			else
1252 				regs->xer &= ~XER_CA;
1253 			goto logical_done;
1254 #endif /* __powerpc64__ */
1255 
1256 /*
1257  * Cache instructions
1258  */
1259 		case 54:	/* dcbst */
1260 			ea = xform_ea(instr, regs, 0);
1261 			if (!address_ok(regs, ea, 8))
1262 				return 0;
1263 			err = 0;
1264 			__cacheop_user_asmx(ea, err, "dcbst");
1265 			if (err)
1266 				return 0;
1267 			goto instr_done;
1268 
1269 		case 86:	/* dcbf */
1270 			ea = xform_ea(instr, regs, 0);
1271 			if (!address_ok(regs, ea, 8))
1272 				return 0;
1273 			err = 0;
1274 			__cacheop_user_asmx(ea, err, "dcbf");
1275 			if (err)
1276 				return 0;
1277 			goto instr_done;
1278 
1279 		case 246:	/* dcbtst */
1280 			if (rd == 0) {
1281 				ea = xform_ea(instr, regs, 0);
1282 				prefetchw((void *) ea);
1283 			}
1284 			goto instr_done;
1285 
1286 		case 278:	/* dcbt */
1287 			if (rd == 0) {
1288 				ea = xform_ea(instr, regs, 0);
1289 				prefetch((void *) ea);
1290 			}
1291 			goto instr_done;
1292 
1293 		}
1294 		break;
1295 	}
1296 
1297 	/*
1298 	 * Following cases are for loads and stores, so bail out
1299 	 * if we're in little-endian mode.
1300 	 */
1301 	if (regs->msr & MSR_LE)
1302 		return 0;
1303 
1304 	/*
1305 	 * Save register RA in case it's an update form load or store
1306 	 * and the access faults.
1307 	 */
1308 	old_ra = regs->gpr[ra];
1309 
1310 	switch (opcode) {
1311 	case 31:
1312 		u = instr & 0x40;
1313 		switch ((instr >> 1) & 0x3ff) {
1314 		case 20:	/* lwarx */
1315 			ea = xform_ea(instr, regs, 0);
1316 			if (ea & 3)
1317 				break;		/* can't handle misaligned */
1318 			err = -EFAULT;
1319 			if (!address_ok(regs, ea, 4))
1320 				goto ldst_done;
1321 			err = 0;
1322 			__get_user_asmx(val, ea, err, "lwarx");
1323 			if (!err)
1324 				regs->gpr[rd] = val;
1325 			goto ldst_done;
1326 
1327 		case 150:	/* stwcx. */
1328 			ea = xform_ea(instr, regs, 0);
1329 			if (ea & 3)
1330 				break;		/* can't handle misaligned */
1331 			err = -EFAULT;
1332 			if (!address_ok(regs, ea, 4))
1333 				goto ldst_done;
1334 			err = 0;
1335 			__put_user_asmx(regs->gpr[rd], ea, err, "stwcx.", cr);
1336 			if (!err)
1337 				regs->ccr = (regs->ccr & 0x0fffffff) |
1338 					(cr & 0xe0000000) |
1339 					((regs->xer >> 3) & 0x10000000);
1340 			goto ldst_done;
1341 
1342 #ifdef __powerpc64__
1343 		case 84:	/* ldarx */
1344 			ea = xform_ea(instr, regs, 0);
1345 			if (ea & 7)
1346 				break;		/* can't handle misaligned */
1347 			err = -EFAULT;
1348 			if (!address_ok(regs, ea, 8))
1349 				goto ldst_done;
1350 			err = 0;
1351 			__get_user_asmx(val, ea, err, "ldarx");
1352 			if (!err)
1353 				regs->gpr[rd] = val;
1354 			goto ldst_done;
1355 
1356 		case 214:	/* stdcx. */
1357 			ea = xform_ea(instr, regs, 0);
1358 			if (ea & 7)
1359 				break;		/* can't handle misaligned */
1360 			err = -EFAULT;
1361 			if (!address_ok(regs, ea, 8))
1362 				goto ldst_done;
1363 			err = 0;
1364 			__put_user_asmx(regs->gpr[rd], ea, err, "stdcx.", cr);
1365 			if (!err)
1366 				regs->ccr = (regs->ccr & 0x0fffffff) |
1367 					(cr & 0xe0000000) |
1368 					((regs->xer >> 3) & 0x10000000);
1369 			goto ldst_done;
1370 
1371 		case 21:	/* ldx */
1372 		case 53:	/* ldux */
1373 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1374 				       8, regs);
1375 			goto ldst_done;
1376 #endif
1377 
1378 		case 23:	/* lwzx */
1379 		case 55:	/* lwzux */
1380 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1381 				       4, regs);
1382 			goto ldst_done;
1383 
1384 		case 87:	/* lbzx */
1385 		case 119:	/* lbzux */
1386 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1387 				       1, regs);
1388 			goto ldst_done;
1389 
1390 #ifdef CONFIG_ALTIVEC
1391 		case 103:	/* lvx */
1392 		case 359:	/* lvxl */
1393 			if (!(regs->msr & MSR_VEC))
1394 				break;
1395 			ea = xform_ea(instr, regs, 0);
1396 			err = do_vec_load(rd, do_lvx, ea, regs);
1397 			goto ldst_done;
1398 
1399 		case 231:	/* stvx */
1400 		case 487:	/* stvxl */
1401 			if (!(regs->msr & MSR_VEC))
1402 				break;
1403 			ea = xform_ea(instr, regs, 0);
1404 			err = do_vec_store(rd, do_stvx, ea, regs);
1405 			goto ldst_done;
1406 #endif /* CONFIG_ALTIVEC */
1407 
1408 #ifdef __powerpc64__
1409 		case 149:	/* stdx */
1410 		case 181:	/* stdux */
1411 			val = regs->gpr[rd];
1412 			err = write_mem(val, xform_ea(instr, regs, u), 8, regs);
1413 			goto ldst_done;
1414 #endif
1415 
1416 		case 151:	/* stwx */
1417 		case 183:	/* stwux */
1418 			val = regs->gpr[rd];
1419 			err = write_mem(val, xform_ea(instr, regs, u), 4, regs);
1420 			goto ldst_done;
1421 
1422 		case 215:	/* stbx */
1423 		case 247:	/* stbux */
1424 			val = regs->gpr[rd];
1425 			err = write_mem(val, xform_ea(instr, regs, u), 1, regs);
1426 			goto ldst_done;
1427 
1428 		case 279:	/* lhzx */
1429 		case 311:	/* lhzux */
1430 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1431 				       2, regs);
1432 			goto ldst_done;
1433 
1434 #ifdef __powerpc64__
1435 		case 341:	/* lwax */
1436 		case 373:	/* lwaux */
1437 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1438 				       4, regs);
1439 			if (!err)
1440 				regs->gpr[rd] = (signed int) regs->gpr[rd];
1441 			goto ldst_done;
1442 #endif
1443 
1444 		case 343:	/* lhax */
1445 		case 375:	/* lhaux */
1446 			err = read_mem(&regs->gpr[rd], xform_ea(instr, regs, u),
1447 				       2, regs);
1448 			if (!err)
1449 				regs->gpr[rd] = (signed short) regs->gpr[rd];
1450 			goto ldst_done;
1451 
1452 		case 407:	/* sthx */
1453 		case 439:	/* sthux */
1454 			val = regs->gpr[rd];
1455 			err = write_mem(val, xform_ea(instr, regs, u), 2, regs);
1456 			goto ldst_done;
1457 
1458 #ifdef __powerpc64__
1459 		case 532:	/* ldbrx */
1460 			err = read_mem(&val, xform_ea(instr, regs, 0), 8, regs);
1461 			if (!err)
1462 				regs->gpr[rd] = byterev_8(val);
1463 			goto ldst_done;
1464 
1465 #endif
1466 
1467 		case 534:	/* lwbrx */
1468 			err = read_mem(&val, xform_ea(instr, regs, 0), 4, regs);
1469 			if (!err)
1470 				regs->gpr[rd] = byterev_4(val);
1471 			goto ldst_done;
1472 
1473 #ifdef CONFIG_PPC_FPU
1474 		case 535:	/* lfsx */
1475 		case 567:	/* lfsux */
1476 			if (!(regs->msr & MSR_FP))
1477 				break;
1478 			ea = xform_ea(instr, regs, u);
1479 			err = do_fp_load(rd, do_lfs, ea, 4, regs);
1480 			goto ldst_done;
1481 
1482 		case 599:	/* lfdx */
1483 		case 631:	/* lfdux */
1484 			if (!(regs->msr & MSR_FP))
1485 				break;
1486 			ea = xform_ea(instr, regs, u);
1487 			err = do_fp_load(rd, do_lfd, ea, 8, regs);
1488 			goto ldst_done;
1489 
1490 		case 663:	/* stfsx */
1491 		case 695:	/* stfsux */
1492 			if (!(regs->msr & MSR_FP))
1493 				break;
1494 			ea = xform_ea(instr, regs, u);
1495 			err = do_fp_store(rd, do_stfs, ea, 4, regs);
1496 			goto ldst_done;
1497 
1498 		case 727:	/* stfdx */
1499 		case 759:	/* stfdux */
1500 			if (!(regs->msr & MSR_FP))
1501 				break;
1502 			ea = xform_ea(instr, regs, u);
1503 			err = do_fp_store(rd, do_stfd, ea, 8, regs);
1504 			goto ldst_done;
1505 #endif
1506 
1507 #ifdef __powerpc64__
1508 		case 660:	/* stdbrx */
1509 			val = byterev_8(regs->gpr[rd]);
1510 			err = write_mem(val, xform_ea(instr, regs, 0), 8, regs);
1511 			goto ldst_done;
1512 
1513 #endif
1514 		case 662:	/* stwbrx */
1515 			val = byterev_4(regs->gpr[rd]);
1516 			err = write_mem(val, xform_ea(instr, regs, 0), 4, regs);
1517 			goto ldst_done;
1518 
1519 		case 790:	/* lhbrx */
1520 			err = read_mem(&val, xform_ea(instr, regs, 0), 2, regs);
1521 			if (!err)
1522 				regs->gpr[rd] = byterev_2(val);
1523 			goto ldst_done;
1524 
1525 		case 918:	/* sthbrx */
1526 			val = byterev_2(regs->gpr[rd]);
1527 			err = write_mem(val, xform_ea(instr, regs, 0), 2, regs);
1528 			goto ldst_done;
1529 
1530 #ifdef CONFIG_VSX
1531 		case 844:	/* lxvd2x */
1532 		case 876:	/* lxvd2ux */
1533 			if (!(regs->msr & MSR_VSX))
1534 				break;
1535 			rd |= (instr & 1) << 5;
1536 			ea = xform_ea(instr, regs, u);
1537 			err = do_vsx_load(rd, do_lxvd2x, ea, regs);
1538 			goto ldst_done;
1539 
1540 		case 972:	/* stxvd2x */
1541 		case 1004:	/* stxvd2ux */
1542 			if (!(regs->msr & MSR_VSX))
1543 				break;
1544 			rd |= (instr & 1) << 5;
1545 			ea = xform_ea(instr, regs, u);
1546 			err = do_vsx_store(rd, do_stxvd2x, ea, regs);
1547 			goto ldst_done;
1548 
1549 #endif /* CONFIG_VSX */
1550 		}
1551 		break;
1552 
1553 	case 32:	/* lwz */
1554 	case 33:	/* lwzu */
1555 		err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 4, regs);
1556 		goto ldst_done;
1557 
1558 	case 34:	/* lbz */
1559 	case 35:	/* lbzu */
1560 		err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 1, regs);
1561 		goto ldst_done;
1562 
1563 	case 36:	/* stw */
1564 		val = regs->gpr[rd];
1565 		err = write_mem(val, dform_ea(instr, regs), 4, regs);
1566 		goto ldst_done;
1567 
1568 	case 37:	/* stwu */
1569 		val = regs->gpr[rd];
1570 		val3 = dform_ea(instr, regs);
1571 		/*
1572 		 * For PPC32 we always use stwu to change stack point with r1. So
1573 		 * this emulated store may corrupt the exception frame, now we
1574 		 * have to provide the exception frame trampoline, which is pushed
1575 		 * below the kprobed function stack. So we only update gpr[1] but
1576 		 * don't emulate the real store operation. We will do real store
1577 		 * operation safely in exception return code by checking this flag.
1578 		 */
1579 		if ((ra == 1) && !(regs->msr & MSR_PR) \
1580 			&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
1581 #ifdef CONFIG_PPC32
1582 			/*
1583 			 * Check if we will touch kernel sack overflow
1584 			 */
1585 			if (val3 - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1586 				printk(KERN_CRIT "Can't kprobe this since Kernel stack overflow.\n");
1587 				err = -EINVAL;
1588 				break;
1589 			}
1590 #endif /* CONFIG_PPC32 */
1591 			/*
1592 			 * Check if we already set since that means we'll
1593 			 * lose the previous value.
1594 			 */
1595 			WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1596 			set_thread_flag(TIF_EMULATE_STACK_STORE);
1597 			err = 0;
1598 		} else
1599 			err = write_mem(val, val3, 4, regs);
1600 		goto ldst_done;
1601 
1602 	case 38:	/* stb */
1603 	case 39:	/* stbu */
1604 		val = regs->gpr[rd];
1605 		err = write_mem(val, dform_ea(instr, regs), 1, regs);
1606 		goto ldst_done;
1607 
1608 	case 40:	/* lhz */
1609 	case 41:	/* lhzu */
1610 		err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1611 		goto ldst_done;
1612 
1613 	case 42:	/* lha */
1614 	case 43:	/* lhau */
1615 		err = read_mem(&regs->gpr[rd], dform_ea(instr, regs), 2, regs);
1616 		if (!err)
1617 			regs->gpr[rd] = (signed short) regs->gpr[rd];
1618 		goto ldst_done;
1619 
1620 	case 44:	/* sth */
1621 	case 45:	/* sthu */
1622 		val = regs->gpr[rd];
1623 		err = write_mem(val, dform_ea(instr, regs), 2, regs);
1624 		goto ldst_done;
1625 
1626 	case 46:	/* lmw */
1627 		ra = (instr >> 16) & 0x1f;
1628 		if (ra >= rd)
1629 			break;		/* invalid form, ra in range to load */
1630 		ea = dform_ea(instr, regs);
1631 		do {
1632 			err = read_mem(&regs->gpr[rd], ea, 4, regs);
1633 			if (err)
1634 				return 0;
1635 			ea += 4;
1636 		} while (++rd < 32);
1637 		goto instr_done;
1638 
1639 	case 47:	/* stmw */
1640 		ea = dform_ea(instr, regs);
1641 		do {
1642 			err = write_mem(regs->gpr[rd], ea, 4, regs);
1643 			if (err)
1644 				return 0;
1645 			ea += 4;
1646 		} while (++rd < 32);
1647 		goto instr_done;
1648 
1649 #ifdef CONFIG_PPC_FPU
1650 	case 48:	/* lfs */
1651 	case 49:	/* lfsu */
1652 		if (!(regs->msr & MSR_FP))
1653 			break;
1654 		ea = dform_ea(instr, regs);
1655 		err = do_fp_load(rd, do_lfs, ea, 4, regs);
1656 		goto ldst_done;
1657 
1658 	case 50:	/* lfd */
1659 	case 51:	/* lfdu */
1660 		if (!(regs->msr & MSR_FP))
1661 			break;
1662 		ea = dform_ea(instr, regs);
1663 		err = do_fp_load(rd, do_lfd, ea, 8, regs);
1664 		goto ldst_done;
1665 
1666 	case 52:	/* stfs */
1667 	case 53:	/* stfsu */
1668 		if (!(regs->msr & MSR_FP))
1669 			break;
1670 		ea = dform_ea(instr, regs);
1671 		err = do_fp_store(rd, do_stfs, ea, 4, regs);
1672 		goto ldst_done;
1673 
1674 	case 54:	/* stfd */
1675 	case 55:	/* stfdu */
1676 		if (!(regs->msr & MSR_FP))
1677 			break;
1678 		ea = dform_ea(instr, regs);
1679 		err = do_fp_store(rd, do_stfd, ea, 8, regs);
1680 		goto ldst_done;
1681 #endif
1682 
1683 #ifdef __powerpc64__
1684 	case 58:	/* ld[u], lwa */
1685 		switch (instr & 3) {
1686 		case 0:		/* ld */
1687 			err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1688 				       8, regs);
1689 			goto ldst_done;
1690 		case 1:		/* ldu */
1691 			err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1692 				       8, regs);
1693 			goto ldst_done;
1694 		case 2:		/* lwa */
1695 			err = read_mem(&regs->gpr[rd], dsform_ea(instr, regs),
1696 				       4, regs);
1697 			if (!err)
1698 				regs->gpr[rd] = (signed int) regs->gpr[rd];
1699 			goto ldst_done;
1700 		}
1701 		break;
1702 
1703 	case 62:	/* std[u] */
1704 		val = regs->gpr[rd];
1705 		switch (instr & 3) {
1706 		case 0:		/* std */
1707 			err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1708 			goto ldst_done;
1709 		case 1:		/* stdu */
1710 			err = write_mem(val, dsform_ea(instr, regs), 8, regs);
1711 			goto ldst_done;
1712 		}
1713 		break;
1714 #endif /* __powerpc64__ */
1715 
1716 	}
1717 	err = -EINVAL;
1718 
1719  ldst_done:
1720 	if (err) {
1721 		regs->gpr[ra] = old_ra;
1722 		return 0;	/* invoke DSI if -EFAULT? */
1723 	}
1724  instr_done:
1725 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1726 	return 1;
1727 
1728  logical_done:
1729 	if (instr & 1)
1730 		set_cr0(regs, ra);
1731 	goto instr_done;
1732 
1733  arith_done:
1734 	if (instr & 1)
1735 		set_cr0(regs, rd);
1736 	goto instr_done;
1737 }
1738