xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision 711aab1d)
1 /*
2  * Single-step support.
3  *
4  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
20 
21 extern char system_call_common[];
22 
23 #ifdef CONFIG_PPC64
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK	0xffffffff87c0ffffUL
26 #else
27 #define MSR_MASK	0x87c0ffff
28 #endif
29 
30 /* Bits in XER */
31 #define XER_SO		0x80000000U
32 #define XER_OV		0x40000000U
33 #define XER_CA		0x20000000U
34 
35 #ifdef CONFIG_PPC_FPU
36 /*
37  * Functions in ldstfp.S
38  */
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
47 #endif
48 
49 #ifdef __powerpc64__
50 /*
51  * Functions in quad.S
52  */
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
57 		    unsigned int *crp);
58 #endif
59 
60 #ifdef __LITTLE_ENDIAN__
61 #define IS_LE	1
62 #define IS_BE	0
63 #else
64 #define IS_LE	0
65 #define IS_BE	1
66 #endif
67 
68 /*
69  * Emulate the truncation of 64 bit values in 32-bit mode.
70  */
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
72 							unsigned long val)
73 {
74 #ifdef __powerpc64__
75 	if ((msr & MSR_64BIT) == 0)
76 		val &= 0xffffffffUL;
77 #endif
78 	return val;
79 }
80 
81 /*
82  * Determine whether a conditional branch instruction would branch.
83  */
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 					const struct pt_regs *regs,
86 					struct instruction_op *op)
87 {
88 	unsigned int bo = (instr >> 21) & 0x1f;
89 	unsigned int bi;
90 
91 	if ((bo & 4) == 0) {
92 		/* decrement counter */
93 		op->type |= DECCTR;
94 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
95 			return 0;
96 	}
97 	if ((bo & 0x10) == 0) {
98 		/* check bit from CR */
99 		bi = (instr >> 16) & 0x1f;
100 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
101 			return 0;
102 	}
103 	return 1;
104 }
105 
106 static nokprobe_inline long address_ok(struct pt_regs *regs,
107 				       unsigned long ea, int nb)
108 {
109 	if (!user_mode(regs))
110 		return 1;
111 	if (__access_ok(ea, nb, USER_DS))
112 		return 1;
113 	if (__access_ok(ea, 1, USER_DS))
114 		/* Access overlaps the end of the user region */
115 		regs->dar = USER_DS.seg;
116 	else
117 		regs->dar = ea;
118 	return 0;
119 }
120 
121 /*
122  * Calculate effective address for a D-form instruction
123  */
124 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
125 					      const struct pt_regs *regs)
126 {
127 	int ra;
128 	unsigned long ea;
129 
130 	ra = (instr >> 16) & 0x1f;
131 	ea = (signed short) instr;		/* sign-extend */
132 	if (ra)
133 		ea += regs->gpr[ra];
134 
135 	return ea;
136 }
137 
138 #ifdef __powerpc64__
139 /*
140  * Calculate effective address for a DS-form instruction
141  */
142 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
143 					       const struct pt_regs *regs)
144 {
145 	int ra;
146 	unsigned long ea;
147 
148 	ra = (instr >> 16) & 0x1f;
149 	ea = (signed short) (instr & ~3);	/* sign-extend */
150 	if (ra)
151 		ea += regs->gpr[ra];
152 
153 	return ea;
154 }
155 
156 /*
157  * Calculate effective address for a DQ-form instruction
158  */
159 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
160 					       const struct pt_regs *regs)
161 {
162 	int ra;
163 	unsigned long ea;
164 
165 	ra = (instr >> 16) & 0x1f;
166 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
167 	if (ra)
168 		ea += regs->gpr[ra];
169 
170 	return ea;
171 }
172 #endif /* __powerpc64 */
173 
174 /*
175  * Calculate effective address for an X-form instruction
176  */
177 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
178 					      const struct pt_regs *regs)
179 {
180 	int ra, rb;
181 	unsigned long ea;
182 
183 	ra = (instr >> 16) & 0x1f;
184 	rb = (instr >> 11) & 0x1f;
185 	ea = regs->gpr[rb];
186 	if (ra)
187 		ea += regs->gpr[ra];
188 
189 	return ea;
190 }
191 
192 /*
193  * Return the largest power of 2, not greater than sizeof(unsigned long),
194  * such that x is a multiple of it.
195  */
196 static nokprobe_inline unsigned long max_align(unsigned long x)
197 {
198 	x |= sizeof(unsigned long);
199 	return x & -x;		/* isolates rightmost bit */
200 }
201 
202 static nokprobe_inline unsigned long byterev_2(unsigned long x)
203 {
204 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
205 }
206 
207 static nokprobe_inline unsigned long byterev_4(unsigned long x)
208 {
209 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
210 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
211 }
212 
213 #ifdef __powerpc64__
214 static nokprobe_inline unsigned long byterev_8(unsigned long x)
215 {
216 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
217 }
218 #endif
219 
220 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
221 {
222 	switch (nb) {
223 	case 2:
224 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
225 		break;
226 	case 4:
227 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
228 		break;
229 #ifdef __powerpc64__
230 	case 8:
231 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
232 		break;
233 	case 16: {
234 		unsigned long *up = (unsigned long *)ptr;
235 		unsigned long tmp;
236 		tmp = byterev_8(up[0]);
237 		up[0] = byterev_8(up[1]);
238 		up[1] = tmp;
239 		break;
240 	}
241 #endif
242 	default:
243 		WARN_ON_ONCE(1);
244 	}
245 }
246 
247 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
248 					    unsigned long ea, int nb,
249 					    struct pt_regs *regs)
250 {
251 	int err = 0;
252 	unsigned long x = 0;
253 
254 	switch (nb) {
255 	case 1:
256 		err = __get_user(x, (unsigned char __user *) ea);
257 		break;
258 	case 2:
259 		err = __get_user(x, (unsigned short __user *) ea);
260 		break;
261 	case 4:
262 		err = __get_user(x, (unsigned int __user *) ea);
263 		break;
264 #ifdef __powerpc64__
265 	case 8:
266 		err = __get_user(x, (unsigned long __user *) ea);
267 		break;
268 #endif
269 	}
270 	if (!err)
271 		*dest = x;
272 	else
273 		regs->dar = ea;
274 	return err;
275 }
276 
277 /*
278  * Copy from userspace to a buffer, using the largest possible
279  * aligned accesses, up to sizeof(long).
280  */
281 static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb,
282 				       struct pt_regs *regs)
283 {
284 	int err = 0;
285 	int c;
286 
287 	for (; nb > 0; nb -= c) {
288 		c = max_align(ea);
289 		if (c > nb)
290 			c = max_align(nb);
291 		switch (c) {
292 		case 1:
293 			err = __get_user(*dest, (unsigned char __user *) ea);
294 			break;
295 		case 2:
296 			err = __get_user(*(u16 *)dest,
297 					 (unsigned short __user *) ea);
298 			break;
299 		case 4:
300 			err = __get_user(*(u32 *)dest,
301 					 (unsigned int __user *) ea);
302 			break;
303 #ifdef __powerpc64__
304 		case 8:
305 			err = __get_user(*(unsigned long *)dest,
306 					 (unsigned long __user *) ea);
307 			break;
308 #endif
309 		}
310 		if (err) {
311 			regs->dar = ea;
312 			return err;
313 		}
314 		dest += c;
315 		ea += c;
316 	}
317 	return 0;
318 }
319 
320 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
321 					      unsigned long ea, int nb,
322 					      struct pt_regs *regs)
323 {
324 	union {
325 		unsigned long ul;
326 		u8 b[sizeof(unsigned long)];
327 	} u;
328 	int i;
329 	int err;
330 
331 	u.ul = 0;
332 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
333 	err = copy_mem_in(&u.b[i], ea, nb, regs);
334 	if (!err)
335 		*dest = u.ul;
336 	return err;
337 }
338 
339 /*
340  * Read memory at address ea for nb bytes, return 0 for success
341  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
342  * If nb < sizeof(long), the result is right-justified on BE systems.
343  */
344 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
345 			      struct pt_regs *regs)
346 {
347 	if (!address_ok(regs, ea, nb))
348 		return -EFAULT;
349 	if ((ea & (nb - 1)) == 0)
350 		return read_mem_aligned(dest, ea, nb, regs);
351 	return read_mem_unaligned(dest, ea, nb, regs);
352 }
353 NOKPROBE_SYMBOL(read_mem);
354 
355 static nokprobe_inline int write_mem_aligned(unsigned long val,
356 					     unsigned long ea, int nb,
357 					     struct pt_regs *regs)
358 {
359 	int err = 0;
360 
361 	switch (nb) {
362 	case 1:
363 		err = __put_user(val, (unsigned char __user *) ea);
364 		break;
365 	case 2:
366 		err = __put_user(val, (unsigned short __user *) ea);
367 		break;
368 	case 4:
369 		err = __put_user(val, (unsigned int __user *) ea);
370 		break;
371 #ifdef __powerpc64__
372 	case 8:
373 		err = __put_user(val, (unsigned long __user *) ea);
374 		break;
375 #endif
376 	}
377 	if (err)
378 		regs->dar = ea;
379 	return err;
380 }
381 
382 /*
383  * Copy from a buffer to userspace, using the largest possible
384  * aligned accesses, up to sizeof(long).
385  */
386 static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb,
387 					struct pt_regs *regs)
388 {
389 	int err = 0;
390 	int c;
391 
392 	for (; nb > 0; nb -= c) {
393 		c = max_align(ea);
394 		if (c > nb)
395 			c = max_align(nb);
396 		switch (c) {
397 		case 1:
398 			err = __put_user(*dest, (unsigned char __user *) ea);
399 			break;
400 		case 2:
401 			err = __put_user(*(u16 *)dest,
402 					 (unsigned short __user *) ea);
403 			break;
404 		case 4:
405 			err = __put_user(*(u32 *)dest,
406 					 (unsigned int __user *) ea);
407 			break;
408 #ifdef __powerpc64__
409 		case 8:
410 			err = __put_user(*(unsigned long *)dest,
411 					 (unsigned long __user *) ea);
412 			break;
413 #endif
414 		}
415 		if (err) {
416 			regs->dar = ea;
417 			return err;
418 		}
419 		dest += c;
420 		ea += c;
421 	}
422 	return 0;
423 }
424 
425 static nokprobe_inline int write_mem_unaligned(unsigned long val,
426 					       unsigned long ea, int nb,
427 					       struct pt_regs *regs)
428 {
429 	union {
430 		unsigned long ul;
431 		u8 b[sizeof(unsigned long)];
432 	} u;
433 	int i;
434 
435 	u.ul = val;
436 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
437 	return copy_mem_out(&u.b[i], ea, nb, regs);
438 }
439 
440 /*
441  * Write memory at address ea for nb bytes, return 0 for success
442  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
443  */
444 static int write_mem(unsigned long val, unsigned long ea, int nb,
445 			       struct pt_regs *regs)
446 {
447 	if (!address_ok(regs, ea, nb))
448 		return -EFAULT;
449 	if ((ea & (nb - 1)) == 0)
450 		return write_mem_aligned(val, ea, nb, regs);
451 	return write_mem_unaligned(val, ea, nb, regs);
452 }
453 NOKPROBE_SYMBOL(write_mem);
454 
455 #ifdef CONFIG_PPC_FPU
456 /*
457  * These access either the real FP register or the image in the
458  * thread_struct, depending on regs->msr & MSR_FP.
459  */
460 static int do_fp_load(struct instruction_op *op, unsigned long ea,
461 		      struct pt_regs *regs, bool cross_endian)
462 {
463 	int err, rn, nb;
464 	union {
465 		int i;
466 		unsigned int u;
467 		float f;
468 		double d[2];
469 		unsigned long l[2];
470 		u8 b[2 * sizeof(double)];
471 	} u;
472 
473 	nb = GETSIZE(op->type);
474 	if (!address_ok(regs, ea, nb))
475 		return -EFAULT;
476 	rn = op->reg;
477 	err = copy_mem_in(u.b, ea, nb, regs);
478 	if (err)
479 		return err;
480 	if (unlikely(cross_endian)) {
481 		do_byte_reverse(u.b, min(nb, 8));
482 		if (nb == 16)
483 			do_byte_reverse(&u.b[8], 8);
484 	}
485 	preempt_disable();
486 	if (nb == 4) {
487 		if (op->type & FPCONV)
488 			conv_sp_to_dp(&u.f, &u.d[0]);
489 		else if (op->type & SIGNEXT)
490 			u.l[0] = u.i;
491 		else
492 			u.l[0] = u.u;
493 	}
494 	if (regs->msr & MSR_FP)
495 		put_fpr(rn, &u.d[0]);
496 	else
497 		current->thread.TS_FPR(rn) = u.l[0];
498 	if (nb == 16) {
499 		/* lfdp */
500 		rn |= 1;
501 		if (regs->msr & MSR_FP)
502 			put_fpr(rn, &u.d[1]);
503 		else
504 			current->thread.TS_FPR(rn) = u.l[1];
505 	}
506 	preempt_enable();
507 	return 0;
508 }
509 NOKPROBE_SYMBOL(do_fp_load);
510 
511 static int do_fp_store(struct instruction_op *op, unsigned long ea,
512 		       struct pt_regs *regs, bool cross_endian)
513 {
514 	int rn, nb;
515 	union {
516 		unsigned int u;
517 		float f;
518 		double d[2];
519 		unsigned long l[2];
520 		u8 b[2 * sizeof(double)];
521 	} u;
522 
523 	nb = GETSIZE(op->type);
524 	if (!address_ok(regs, ea, nb))
525 		return -EFAULT;
526 	rn = op->reg;
527 	preempt_disable();
528 	if (regs->msr & MSR_FP)
529 		get_fpr(rn, &u.d[0]);
530 	else
531 		u.l[0] = current->thread.TS_FPR(rn);
532 	if (nb == 4) {
533 		if (op->type & FPCONV)
534 			conv_dp_to_sp(&u.d[0], &u.f);
535 		else
536 			u.u = u.l[0];
537 	}
538 	if (nb == 16) {
539 		rn |= 1;
540 		if (regs->msr & MSR_FP)
541 			get_fpr(rn, &u.d[1]);
542 		else
543 			u.l[1] = current->thread.TS_FPR(rn);
544 	}
545 	preempt_enable();
546 	if (unlikely(cross_endian)) {
547 		do_byte_reverse(u.b, min(nb, 8));
548 		if (nb == 16)
549 			do_byte_reverse(&u.b[8], 8);
550 	}
551 	return copy_mem_out(u.b, ea, nb, regs);
552 }
553 NOKPROBE_SYMBOL(do_fp_store);
554 #endif
555 
556 #ifdef CONFIG_ALTIVEC
557 /* For Altivec/VMX, no need to worry about alignment */
558 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
559 				       int size, struct pt_regs *regs,
560 				       bool cross_endian)
561 {
562 	int err;
563 	union {
564 		__vector128 v;
565 		u8 b[sizeof(__vector128)];
566 	} u = {};
567 
568 	if (!address_ok(regs, ea & ~0xfUL, 16))
569 		return -EFAULT;
570 	/* align to multiple of size */
571 	ea &= ~(size - 1);
572 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
573 	if (err)
574 		return err;
575 	if (unlikely(cross_endian))
576 		do_byte_reverse(&u.b[ea & 0xf], size);
577 	preempt_disable();
578 	if (regs->msr & MSR_VEC)
579 		put_vr(rn, &u.v);
580 	else
581 		current->thread.vr_state.vr[rn] = u.v;
582 	preempt_enable();
583 	return 0;
584 }
585 
586 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
587 					int size, struct pt_regs *regs,
588 					bool cross_endian)
589 {
590 	union {
591 		__vector128 v;
592 		u8 b[sizeof(__vector128)];
593 	} u;
594 
595 	if (!address_ok(regs, ea & ~0xfUL, 16))
596 		return -EFAULT;
597 	/* align to multiple of size */
598 	ea &= ~(size - 1);
599 
600 	preempt_disable();
601 	if (regs->msr & MSR_VEC)
602 		get_vr(rn, &u.v);
603 	else
604 		u.v = current->thread.vr_state.vr[rn];
605 	preempt_enable();
606 	if (unlikely(cross_endian))
607 		do_byte_reverse(&u.b[ea & 0xf], size);
608 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
609 }
610 #endif /* CONFIG_ALTIVEC */
611 
612 #ifdef __powerpc64__
613 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
614 				      int reg, bool cross_endian)
615 {
616 	int err;
617 
618 	if (!address_ok(regs, ea, 16))
619 		return -EFAULT;
620 	/* if aligned, should be atomic */
621 	if ((ea & 0xf) == 0) {
622 		err = do_lq(ea, &regs->gpr[reg]);
623 	} else {
624 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
625 		if (!err)
626 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
627 	}
628 	if (!err && unlikely(cross_endian))
629 		do_byte_reverse(&regs->gpr[reg], 16);
630 	return err;
631 }
632 
633 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
634 				       int reg, bool cross_endian)
635 {
636 	int err;
637 	unsigned long vals[2];
638 
639 	if (!address_ok(regs, ea, 16))
640 		return -EFAULT;
641 	vals[0] = regs->gpr[reg];
642 	vals[1] = regs->gpr[reg + 1];
643 	if (unlikely(cross_endian))
644 		do_byte_reverse(vals, 16);
645 
646 	/* if aligned, should be atomic */
647 	if ((ea & 0xf) == 0)
648 		return do_stq(ea, vals[0], vals[1]);
649 
650 	err = write_mem(vals[IS_LE], ea, 8, regs);
651 	if (!err)
652 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
653 	return err;
654 }
655 #endif /* __powerpc64 */
656 
657 #ifdef CONFIG_VSX
658 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
659 		      const void *mem, bool rev)
660 {
661 	int size, read_size;
662 	int i, j;
663 	const unsigned int *wp;
664 	const unsigned short *hp;
665 	const unsigned char *bp;
666 
667 	size = GETSIZE(op->type);
668 	reg->d[0] = reg->d[1] = 0;
669 
670 	switch (op->element_size) {
671 	case 16:
672 		/* whole vector; lxv[x] or lxvl[l] */
673 		if (size == 0)
674 			break;
675 		memcpy(reg, mem, size);
676 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
677 			rev = !rev;
678 		if (rev)
679 			do_byte_reverse(reg, 16);
680 		break;
681 	case 8:
682 		/* scalar loads, lxvd2x, lxvdsx */
683 		read_size = (size >= 8) ? 8 : size;
684 		i = IS_LE ? 8 : 8 - read_size;
685 		memcpy(&reg->b[i], mem, read_size);
686 		if (rev)
687 			do_byte_reverse(&reg->b[i], 8);
688 		if (size < 8) {
689 			if (op->type & SIGNEXT) {
690 				/* size == 4 is the only case here */
691 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
692 			} else if (op->vsx_flags & VSX_FPCONV) {
693 				preempt_disable();
694 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
695 					      &reg->dp[IS_LE]);
696 				preempt_enable();
697 			}
698 		} else {
699 			if (size == 16) {
700 				unsigned long v = *(unsigned long *)(mem + 8);
701 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
702 			} else if (op->vsx_flags & VSX_SPLAT)
703 				reg->d[IS_BE] = reg->d[IS_LE];
704 		}
705 		break;
706 	case 4:
707 		/* lxvw4x, lxvwsx */
708 		wp = mem;
709 		for (j = 0; j < size / 4; ++j) {
710 			i = IS_LE ? 3 - j : j;
711 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
712 		}
713 		if (op->vsx_flags & VSX_SPLAT) {
714 			u32 val = reg->w[IS_LE ? 3 : 0];
715 			for (; j < 4; ++j) {
716 				i = IS_LE ? 3 - j : j;
717 				reg->w[i] = val;
718 			}
719 		}
720 		break;
721 	case 2:
722 		/* lxvh8x */
723 		hp = mem;
724 		for (j = 0; j < size / 2; ++j) {
725 			i = IS_LE ? 7 - j : j;
726 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
727 		}
728 		break;
729 	case 1:
730 		/* lxvb16x */
731 		bp = mem;
732 		for (j = 0; j < size; ++j) {
733 			i = IS_LE ? 15 - j : j;
734 			reg->b[i] = *bp++;
735 		}
736 		break;
737 	}
738 }
739 EXPORT_SYMBOL_GPL(emulate_vsx_load);
740 NOKPROBE_SYMBOL(emulate_vsx_load);
741 
742 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
743 		       void *mem, bool rev)
744 {
745 	int size, write_size;
746 	int i, j;
747 	union vsx_reg buf;
748 	unsigned int *wp;
749 	unsigned short *hp;
750 	unsigned char *bp;
751 
752 	size = GETSIZE(op->type);
753 
754 	switch (op->element_size) {
755 	case 16:
756 		/* stxv, stxvx, stxvl, stxvll */
757 		if (size == 0)
758 			break;
759 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
760 			rev = !rev;
761 		if (rev) {
762 			/* reverse 16 bytes */
763 			buf.d[0] = byterev_8(reg->d[1]);
764 			buf.d[1] = byterev_8(reg->d[0]);
765 			reg = &buf;
766 		}
767 		memcpy(mem, reg, size);
768 		break;
769 	case 8:
770 		/* scalar stores, stxvd2x */
771 		write_size = (size >= 8) ? 8 : size;
772 		i = IS_LE ? 8 : 8 - write_size;
773 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
774 			buf.d[0] = buf.d[1] = 0;
775 			preempt_disable();
776 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
777 			preempt_enable();
778 			reg = &buf;
779 		}
780 		memcpy(mem, &reg->b[i], write_size);
781 		if (size == 16)
782 			memcpy(mem + 8, &reg->d[IS_BE], 8);
783 		if (unlikely(rev)) {
784 			do_byte_reverse(mem, write_size);
785 			if (size == 16)
786 				do_byte_reverse(mem + 8, 8);
787 		}
788 		break;
789 	case 4:
790 		/* stxvw4x */
791 		wp = mem;
792 		for (j = 0; j < size / 4; ++j) {
793 			i = IS_LE ? 3 - j : j;
794 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
795 		}
796 		break;
797 	case 2:
798 		/* stxvh8x */
799 		hp = mem;
800 		for (j = 0; j < size / 2; ++j) {
801 			i = IS_LE ? 7 - j : j;
802 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
803 		}
804 		break;
805 	case 1:
806 		/* stvxb16x */
807 		bp = mem;
808 		for (j = 0; j < size; ++j) {
809 			i = IS_LE ? 15 - j : j;
810 			*bp++ = reg->b[i];
811 		}
812 		break;
813 	}
814 }
815 EXPORT_SYMBOL_GPL(emulate_vsx_store);
816 NOKPROBE_SYMBOL(emulate_vsx_store);
817 
818 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
819 				       unsigned long ea, struct pt_regs *regs,
820 				       bool cross_endian)
821 {
822 	int reg = op->reg;
823 	u8 mem[16];
824 	union vsx_reg buf;
825 	int size = GETSIZE(op->type);
826 
827 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
828 		return -EFAULT;
829 
830 	emulate_vsx_load(op, &buf, mem, cross_endian);
831 	preempt_disable();
832 	if (reg < 32) {
833 		/* FP regs + extensions */
834 		if (regs->msr & MSR_FP) {
835 			load_vsrn(reg, &buf);
836 		} else {
837 			current->thread.fp_state.fpr[reg][0] = buf.d[0];
838 			current->thread.fp_state.fpr[reg][1] = buf.d[1];
839 		}
840 	} else {
841 		if (regs->msr & MSR_VEC)
842 			load_vsrn(reg, &buf);
843 		else
844 			current->thread.vr_state.vr[reg - 32] = buf.v;
845 	}
846 	preempt_enable();
847 	return 0;
848 }
849 
850 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
851 					unsigned long ea, struct pt_regs *regs,
852 					bool cross_endian)
853 {
854 	int reg = op->reg;
855 	u8 mem[16];
856 	union vsx_reg buf;
857 	int size = GETSIZE(op->type);
858 
859 	if (!address_ok(regs, ea, size))
860 		return -EFAULT;
861 
862 	preempt_disable();
863 	if (reg < 32) {
864 		/* FP regs + extensions */
865 		if (regs->msr & MSR_FP) {
866 			store_vsrn(reg, &buf);
867 		} else {
868 			buf.d[0] = current->thread.fp_state.fpr[reg][0];
869 			buf.d[1] = current->thread.fp_state.fpr[reg][1];
870 		}
871 	} else {
872 		if (regs->msr & MSR_VEC)
873 			store_vsrn(reg, &buf);
874 		else
875 			buf.v = current->thread.vr_state.vr[reg - 32];
876 	}
877 	preempt_enable();
878 	emulate_vsx_store(op, &buf, mem, cross_endian);
879 	return  copy_mem_out(mem, ea, size, regs);
880 }
881 #endif /* CONFIG_VSX */
882 
883 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
884 {
885 	int err;
886 	unsigned long i, size;
887 
888 #ifdef __powerpc64__
889 	size = ppc64_caches.l1d.block_size;
890 	if (!(regs->msr & MSR_64BIT))
891 		ea &= 0xffffffffUL;
892 #else
893 	size = L1_CACHE_BYTES;
894 #endif
895 	ea &= ~(size - 1);
896 	if (!address_ok(regs, ea, size))
897 		return -EFAULT;
898 	for (i = 0; i < size; i += sizeof(long)) {
899 		err = __put_user(0, (unsigned long __user *) (ea + i));
900 		if (err) {
901 			regs->dar = ea;
902 			return err;
903 		}
904 	}
905 	return 0;
906 }
907 NOKPROBE_SYMBOL(emulate_dcbz);
908 
909 #define __put_user_asmx(x, addr, err, op, cr)		\
910 	__asm__ __volatile__(				\
911 		"1:	" op " %2,0,%3\n"		\
912 		"	mfcr	%1\n"			\
913 		"2:\n"					\
914 		".section .fixup,\"ax\"\n"		\
915 		"3:	li	%0,%4\n"		\
916 		"	b	2b\n"			\
917 		".previous\n"				\
918 		EX_TABLE(1b, 3b)			\
919 		: "=r" (err), "=r" (cr)			\
920 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
921 
922 #define __get_user_asmx(x, addr, err, op)		\
923 	__asm__ __volatile__(				\
924 		"1:	"op" %1,0,%2\n"			\
925 		"2:\n"					\
926 		".section .fixup,\"ax\"\n"		\
927 		"3:	li	%0,%3\n"		\
928 		"	b	2b\n"			\
929 		".previous\n"				\
930 		EX_TABLE(1b, 3b)			\
931 		: "=r" (err), "=r" (x)			\
932 		: "r" (addr), "i" (-EFAULT), "0" (err))
933 
934 #define __cacheop_user_asmx(addr, err, op)		\
935 	__asm__ __volatile__(				\
936 		"1:	"op" 0,%1\n"			\
937 		"2:\n"					\
938 		".section .fixup,\"ax\"\n"		\
939 		"3:	li	%0,%3\n"		\
940 		"	b	2b\n"			\
941 		".previous\n"				\
942 		EX_TABLE(1b, 3b)			\
943 		: "=r" (err)				\
944 		: "r" (addr), "i" (-EFAULT), "0" (err))
945 
946 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
947 				    struct instruction_op *op, int rd)
948 {
949 	long val = regs->gpr[rd];
950 
951 	op->type |= SETCC;
952 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
953 #ifdef __powerpc64__
954 	if (!(regs->msr & MSR_64BIT))
955 		val = (int) val;
956 #endif
957 	if (val < 0)
958 		op->ccval |= 0x80000000;
959 	else if (val > 0)
960 		op->ccval |= 0x40000000;
961 	else
962 		op->ccval |= 0x20000000;
963 }
964 
965 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
966 				     struct instruction_op *op, int rd,
967 				     unsigned long val1, unsigned long val2,
968 				     unsigned long carry_in)
969 {
970 	unsigned long val = val1 + val2;
971 
972 	if (carry_in)
973 		++val;
974 	op->type = COMPUTE + SETREG + SETXER;
975 	op->reg = rd;
976 	op->val = val;
977 #ifdef __powerpc64__
978 	if (!(regs->msr & MSR_64BIT)) {
979 		val = (unsigned int) val;
980 		val1 = (unsigned int) val1;
981 	}
982 #endif
983 	op->xerval = regs->xer;
984 	if (val < val1 || (carry_in && val == val1))
985 		op->xerval |= XER_CA;
986 	else
987 		op->xerval &= ~XER_CA;
988 }
989 
990 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
991 					  struct instruction_op *op,
992 					  long v1, long v2, int crfld)
993 {
994 	unsigned int crval, shift;
995 
996 	op->type = COMPUTE + SETCC;
997 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
998 	if (v1 < v2)
999 		crval |= 8;
1000 	else if (v1 > v2)
1001 		crval |= 4;
1002 	else
1003 		crval |= 2;
1004 	shift = (7 - crfld) * 4;
1005 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1006 }
1007 
1008 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1009 					    struct instruction_op *op,
1010 					    unsigned long v1,
1011 					    unsigned long v2, int crfld)
1012 {
1013 	unsigned int crval, shift;
1014 
1015 	op->type = COMPUTE + SETCC;
1016 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1017 	if (v1 < v2)
1018 		crval |= 8;
1019 	else if (v1 > v2)
1020 		crval |= 4;
1021 	else
1022 		crval |= 2;
1023 	shift = (7 - crfld) * 4;
1024 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1025 }
1026 
1027 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1028 				    struct instruction_op *op,
1029 				    unsigned long v1, unsigned long v2)
1030 {
1031 	unsigned long long out_val, mask;
1032 	int i;
1033 
1034 	out_val = 0;
1035 	for (i = 0; i < 8; i++) {
1036 		mask = 0xffUL << (i * 8);
1037 		if ((v1 & mask) == (v2 & mask))
1038 			out_val |= mask;
1039 	}
1040 	op->val = out_val;
1041 }
1042 
1043 /*
1044  * The size parameter is used to adjust the equivalent popcnt instruction.
1045  * popcntb = 8, popcntw = 32, popcntd = 64
1046  */
1047 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1048 				      struct instruction_op *op,
1049 				      unsigned long v1, int size)
1050 {
1051 	unsigned long long out = v1;
1052 
1053 	out -= (out >> 1) & 0x5555555555555555;
1054 	out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
1055 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
1056 
1057 	if (size == 8) {	/* popcntb */
1058 		op->val = out;
1059 		return;
1060 	}
1061 	out += out >> 8;
1062 	out += out >> 16;
1063 	if (size == 32) {	/* popcntw */
1064 		op->val = out & 0x0000003f0000003f;
1065 		return;
1066 	}
1067 
1068 	out = (out + (out >> 32)) & 0x7f;
1069 	op->val = out;	/* popcntd */
1070 }
1071 
1072 #ifdef CONFIG_PPC64
1073 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1074 				      struct instruction_op *op,
1075 				      unsigned long v1, unsigned long v2)
1076 {
1077 	unsigned char perm, idx;
1078 	unsigned int i;
1079 
1080 	perm = 0;
1081 	for (i = 0; i < 8; i++) {
1082 		idx = (v1 >> (i * 8)) & 0xff;
1083 		if (idx < 64)
1084 			if (v2 & PPC_BIT(idx))
1085 				perm |= 1 << i;
1086 	}
1087 	op->val = perm;
1088 }
1089 #endif /* CONFIG_PPC64 */
1090 /*
1091  * The size parameter adjusts the equivalent prty instruction.
1092  * prtyw = 32, prtyd = 64
1093  */
1094 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1095 				    struct instruction_op *op,
1096 				    unsigned long v, int size)
1097 {
1098 	unsigned long long res = v ^ (v >> 8);
1099 
1100 	res ^= res >> 16;
1101 	if (size == 32) {		/* prtyw */
1102 		op->val = res & 0x0000000100000001;
1103 		return;
1104 	}
1105 
1106 	res ^= res >> 32;
1107 	op->val = res & 1;	/*prtyd */
1108 }
1109 
1110 static nokprobe_inline int trap_compare(long v1, long v2)
1111 {
1112 	int ret = 0;
1113 
1114 	if (v1 < v2)
1115 		ret |= 0x10;
1116 	else if (v1 > v2)
1117 		ret |= 0x08;
1118 	else
1119 		ret |= 0x04;
1120 	if ((unsigned long)v1 < (unsigned long)v2)
1121 		ret |= 0x02;
1122 	else if ((unsigned long)v1 > (unsigned long)v2)
1123 		ret |= 0x01;
1124 	return ret;
1125 }
1126 
1127 /*
1128  * Elements of 32-bit rotate and mask instructions.
1129  */
1130 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1131 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1132 #ifdef __powerpc64__
1133 #define MASK64_L(mb)	(~0UL >> (mb))
1134 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1135 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1136 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1137 #else
1138 #define DATA32(x)	(x)
1139 #endif
1140 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1141 
1142 /*
1143  * Decode an instruction, and return information about it in *op
1144  * without changing *regs.
1145  * Integer arithmetic and logical instructions, branches, and barrier
1146  * instructions can be emulated just using the information in *op.
1147  *
1148  * Return value is 1 if the instruction can be emulated just by
1149  * updating *regs with the information in *op, -1 if we need the
1150  * GPRs but *regs doesn't contain the full register set, or 0
1151  * otherwise.
1152  */
1153 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1154 		  unsigned int instr)
1155 {
1156 	unsigned int opcode, ra, rb, rd, spr, u;
1157 	unsigned long int imm;
1158 	unsigned long int val, val2;
1159 	unsigned int mb, me, sh;
1160 	long ival;
1161 
1162 	op->type = COMPUTE;
1163 
1164 	opcode = instr >> 26;
1165 	switch (opcode) {
1166 	case 16:	/* bc */
1167 		op->type = BRANCH;
1168 		imm = (signed short)(instr & 0xfffc);
1169 		if ((instr & 2) == 0)
1170 			imm += regs->nip;
1171 		op->val = truncate_if_32bit(regs->msr, imm);
1172 		if (instr & 1)
1173 			op->type |= SETLK;
1174 		if (branch_taken(instr, regs, op))
1175 			op->type |= BRTAKEN;
1176 		return 1;
1177 #ifdef CONFIG_PPC64
1178 	case 17:	/* sc */
1179 		if ((instr & 0xfe2) == 2)
1180 			op->type = SYSCALL;
1181 		else
1182 			op->type = UNKNOWN;
1183 		return 0;
1184 #endif
1185 	case 18:	/* b */
1186 		op->type = BRANCH | BRTAKEN;
1187 		imm = instr & 0x03fffffc;
1188 		if (imm & 0x02000000)
1189 			imm -= 0x04000000;
1190 		if ((instr & 2) == 0)
1191 			imm += regs->nip;
1192 		op->val = truncate_if_32bit(regs->msr, imm);
1193 		if (instr & 1)
1194 			op->type |= SETLK;
1195 		return 1;
1196 	case 19:
1197 		switch ((instr >> 1) & 0x3ff) {
1198 		case 0:		/* mcrf */
1199 			op->type = COMPUTE + SETCC;
1200 			rd = 7 - ((instr >> 23) & 0x7);
1201 			ra = 7 - ((instr >> 18) & 0x7);
1202 			rd *= 4;
1203 			ra *= 4;
1204 			val = (regs->ccr >> ra) & 0xf;
1205 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1206 			return 1;
1207 
1208 		case 16:	/* bclr */
1209 		case 528:	/* bcctr */
1210 			op->type = BRANCH;
1211 			imm = (instr & 0x400)? regs->ctr: regs->link;
1212 			op->val = truncate_if_32bit(regs->msr, imm);
1213 			if (instr & 1)
1214 				op->type |= SETLK;
1215 			if (branch_taken(instr, regs, op))
1216 				op->type |= BRTAKEN;
1217 			return 1;
1218 
1219 		case 18:	/* rfid, scary */
1220 			if (regs->msr & MSR_PR)
1221 				goto priv;
1222 			op->type = RFI;
1223 			return 0;
1224 
1225 		case 150:	/* isync */
1226 			op->type = BARRIER | BARRIER_ISYNC;
1227 			return 1;
1228 
1229 		case 33:	/* crnor */
1230 		case 129:	/* crandc */
1231 		case 193:	/* crxor */
1232 		case 225:	/* crnand */
1233 		case 257:	/* crand */
1234 		case 289:	/* creqv */
1235 		case 417:	/* crorc */
1236 		case 449:	/* cror */
1237 			op->type = COMPUTE + SETCC;
1238 			ra = (instr >> 16) & 0x1f;
1239 			rb = (instr >> 11) & 0x1f;
1240 			rd = (instr >> 21) & 0x1f;
1241 			ra = (regs->ccr >> (31 - ra)) & 1;
1242 			rb = (regs->ccr >> (31 - rb)) & 1;
1243 			val = (instr >> (6 + ra * 2 + rb)) & 1;
1244 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1245 				(val << (31 - rd));
1246 			return 1;
1247 		}
1248 		break;
1249 	case 31:
1250 		switch ((instr >> 1) & 0x3ff) {
1251 		case 598:	/* sync */
1252 			op->type = BARRIER + BARRIER_SYNC;
1253 #ifdef __powerpc64__
1254 			switch ((instr >> 21) & 3) {
1255 			case 1:		/* lwsync */
1256 				op->type = BARRIER + BARRIER_LWSYNC;
1257 				break;
1258 			case 2:		/* ptesync */
1259 				op->type = BARRIER + BARRIER_PTESYNC;
1260 				break;
1261 			}
1262 #endif
1263 			return 1;
1264 
1265 		case 854:	/* eieio */
1266 			op->type = BARRIER + BARRIER_EIEIO;
1267 			return 1;
1268 		}
1269 		break;
1270 	}
1271 
1272 	/* Following cases refer to regs->gpr[], so we need all regs */
1273 	if (!FULL_REGS(regs))
1274 		return -1;
1275 
1276 	rd = (instr >> 21) & 0x1f;
1277 	ra = (instr >> 16) & 0x1f;
1278 	rb = (instr >> 11) & 0x1f;
1279 
1280 	switch (opcode) {
1281 #ifdef __powerpc64__
1282 	case 2:		/* tdi */
1283 		if (rd & trap_compare(regs->gpr[ra], (short) instr))
1284 			goto trap;
1285 		return 1;
1286 #endif
1287 	case 3:		/* twi */
1288 		if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1289 			goto trap;
1290 		return 1;
1291 
1292 	case 7:		/* mulli */
1293 		op->val = regs->gpr[ra] * (short) instr;
1294 		goto compute_done;
1295 
1296 	case 8:		/* subfic */
1297 		imm = (short) instr;
1298 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1299 		return 1;
1300 
1301 	case 10:	/* cmpli */
1302 		imm = (unsigned short) instr;
1303 		val = regs->gpr[ra];
1304 #ifdef __powerpc64__
1305 		if ((rd & 1) == 0)
1306 			val = (unsigned int) val;
1307 #endif
1308 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1309 		return 1;
1310 
1311 	case 11:	/* cmpi */
1312 		imm = (short) instr;
1313 		val = regs->gpr[ra];
1314 #ifdef __powerpc64__
1315 		if ((rd & 1) == 0)
1316 			val = (int) val;
1317 #endif
1318 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1319 		return 1;
1320 
1321 	case 12:	/* addic */
1322 		imm = (short) instr;
1323 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1324 		return 1;
1325 
1326 	case 13:	/* addic. */
1327 		imm = (short) instr;
1328 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1329 		set_cr0(regs, op, rd);
1330 		return 1;
1331 
1332 	case 14:	/* addi */
1333 		imm = (short) instr;
1334 		if (ra)
1335 			imm += regs->gpr[ra];
1336 		op->val = imm;
1337 		goto compute_done;
1338 
1339 	case 15:	/* addis */
1340 		imm = ((short) instr) << 16;
1341 		if (ra)
1342 			imm += regs->gpr[ra];
1343 		op->val = imm;
1344 		goto compute_done;
1345 
1346 	case 19:
1347 		if (((instr >> 1) & 0x1f) == 2) {
1348 			/* addpcis */
1349 			imm = (short) (instr & 0xffc1);	/* d0 + d2 fields */
1350 			imm |= (instr >> 15) & 0x3e;	/* d1 field */
1351 			op->val = regs->nip + (imm << 16) + 4;
1352 			goto compute_done;
1353 		}
1354 		op->type = UNKNOWN;
1355 		return 0;
1356 
1357 	case 20:	/* rlwimi */
1358 		mb = (instr >> 6) & 0x1f;
1359 		me = (instr >> 1) & 0x1f;
1360 		val = DATA32(regs->gpr[rd]);
1361 		imm = MASK32(mb, me);
1362 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1363 		goto logical_done;
1364 
1365 	case 21:	/* rlwinm */
1366 		mb = (instr >> 6) & 0x1f;
1367 		me = (instr >> 1) & 0x1f;
1368 		val = DATA32(regs->gpr[rd]);
1369 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1370 		goto logical_done;
1371 
1372 	case 23:	/* rlwnm */
1373 		mb = (instr >> 6) & 0x1f;
1374 		me = (instr >> 1) & 0x1f;
1375 		rb = regs->gpr[rb] & 0x1f;
1376 		val = DATA32(regs->gpr[rd]);
1377 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1378 		goto logical_done;
1379 
1380 	case 24:	/* ori */
1381 		op->val = regs->gpr[rd] | (unsigned short) instr;
1382 		goto logical_done_nocc;
1383 
1384 	case 25:	/* oris */
1385 		imm = (unsigned short) instr;
1386 		op->val = regs->gpr[rd] | (imm << 16);
1387 		goto logical_done_nocc;
1388 
1389 	case 26:	/* xori */
1390 		op->val = regs->gpr[rd] ^ (unsigned short) instr;
1391 		goto logical_done_nocc;
1392 
1393 	case 27:	/* xoris */
1394 		imm = (unsigned short) instr;
1395 		op->val = regs->gpr[rd] ^ (imm << 16);
1396 		goto logical_done_nocc;
1397 
1398 	case 28:	/* andi. */
1399 		op->val = regs->gpr[rd] & (unsigned short) instr;
1400 		set_cr0(regs, op, ra);
1401 		goto logical_done_nocc;
1402 
1403 	case 29:	/* andis. */
1404 		imm = (unsigned short) instr;
1405 		op->val = regs->gpr[rd] & (imm << 16);
1406 		set_cr0(regs, op, ra);
1407 		goto logical_done_nocc;
1408 
1409 #ifdef __powerpc64__
1410 	case 30:	/* rld* */
1411 		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1412 		val = regs->gpr[rd];
1413 		if ((instr & 0x10) == 0) {
1414 			sh = rb | ((instr & 2) << 4);
1415 			val = ROTATE(val, sh);
1416 			switch ((instr >> 2) & 3) {
1417 			case 0:		/* rldicl */
1418 				val &= MASK64_L(mb);
1419 				break;
1420 			case 1:		/* rldicr */
1421 				val &= MASK64_R(mb);
1422 				break;
1423 			case 2:		/* rldic */
1424 				val &= MASK64(mb, 63 - sh);
1425 				break;
1426 			case 3:		/* rldimi */
1427 				imm = MASK64(mb, 63 - sh);
1428 				val = (regs->gpr[ra] & ~imm) |
1429 					(val & imm);
1430 			}
1431 			op->val = val;
1432 			goto logical_done;
1433 		} else {
1434 			sh = regs->gpr[rb] & 0x3f;
1435 			val = ROTATE(val, sh);
1436 			switch ((instr >> 1) & 7) {
1437 			case 0:		/* rldcl */
1438 				op->val = val & MASK64_L(mb);
1439 				goto logical_done;
1440 			case 1:		/* rldcr */
1441 				op->val = val & MASK64_R(mb);
1442 				goto logical_done;
1443 			}
1444 		}
1445 #endif
1446 		op->type = UNKNOWN;	/* illegal instruction */
1447 		return 0;
1448 
1449 	case 31:
1450 		/* isel occupies 32 minor opcodes */
1451 		if (((instr >> 1) & 0x1f) == 15) {
1452 			mb = (instr >> 6) & 0x1f; /* bc field */
1453 			val = (regs->ccr >> (31 - mb)) & 1;
1454 			val2 = (ra) ? regs->gpr[ra] : 0;
1455 
1456 			op->val = (val) ? val2 : regs->gpr[rb];
1457 			goto compute_done;
1458 		}
1459 
1460 		switch ((instr >> 1) & 0x3ff) {
1461 		case 4:		/* tw */
1462 			if (rd == 0x1f ||
1463 			    (rd & trap_compare((int)regs->gpr[ra],
1464 					       (int)regs->gpr[rb])))
1465 				goto trap;
1466 			return 1;
1467 #ifdef __powerpc64__
1468 		case 68:	/* td */
1469 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1470 				goto trap;
1471 			return 1;
1472 #endif
1473 		case 83:	/* mfmsr */
1474 			if (regs->msr & MSR_PR)
1475 				goto priv;
1476 			op->type = MFMSR;
1477 			op->reg = rd;
1478 			return 0;
1479 		case 146:	/* mtmsr */
1480 			if (regs->msr & MSR_PR)
1481 				goto priv;
1482 			op->type = MTMSR;
1483 			op->reg = rd;
1484 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1485 			return 0;
1486 #ifdef CONFIG_PPC64
1487 		case 178:	/* mtmsrd */
1488 			if (regs->msr & MSR_PR)
1489 				goto priv;
1490 			op->type = MTMSR;
1491 			op->reg = rd;
1492 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1493 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1494 			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1495 			op->val = imm;
1496 			return 0;
1497 #endif
1498 
1499 		case 19:	/* mfcr */
1500 			imm = 0xffffffffUL;
1501 			if ((instr >> 20) & 1) {
1502 				imm = 0xf0000000UL;
1503 				for (sh = 0; sh < 8; ++sh) {
1504 					if (instr & (0x80000 >> sh))
1505 						break;
1506 					imm >>= 4;
1507 				}
1508 			}
1509 			op->val = regs->ccr & imm;
1510 			goto compute_done;
1511 
1512 		case 144:	/* mtcrf */
1513 			op->type = COMPUTE + SETCC;
1514 			imm = 0xf0000000UL;
1515 			val = regs->gpr[rd];
1516 			op->val = regs->ccr;
1517 			for (sh = 0; sh < 8; ++sh) {
1518 				if (instr & (0x80000 >> sh))
1519 					op->val = (op->val & ~imm) |
1520 						(val & imm);
1521 				imm >>= 4;
1522 			}
1523 			return 1;
1524 
1525 		case 339:	/* mfspr */
1526 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1527 			op->type = MFSPR;
1528 			op->reg = rd;
1529 			op->spr = spr;
1530 			if (spr == SPRN_XER || spr == SPRN_LR ||
1531 			    spr == SPRN_CTR)
1532 				return 1;
1533 			return 0;
1534 
1535 		case 467:	/* mtspr */
1536 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1537 			op->type = MTSPR;
1538 			op->val = regs->gpr[rd];
1539 			op->spr = spr;
1540 			if (spr == SPRN_XER || spr == SPRN_LR ||
1541 			    spr == SPRN_CTR)
1542 				return 1;
1543 			return 0;
1544 
1545 /*
1546  * Compare instructions
1547  */
1548 		case 0:	/* cmp */
1549 			val = regs->gpr[ra];
1550 			val2 = regs->gpr[rb];
1551 #ifdef __powerpc64__
1552 			if ((rd & 1) == 0) {
1553 				/* word (32-bit) compare */
1554 				val = (int) val;
1555 				val2 = (int) val2;
1556 			}
1557 #endif
1558 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1559 			return 1;
1560 
1561 		case 32:	/* cmpl */
1562 			val = regs->gpr[ra];
1563 			val2 = regs->gpr[rb];
1564 #ifdef __powerpc64__
1565 			if ((rd & 1) == 0) {
1566 				/* word (32-bit) compare */
1567 				val = (unsigned int) val;
1568 				val2 = (unsigned int) val2;
1569 			}
1570 #endif
1571 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1572 			return 1;
1573 
1574 		case 508: /* cmpb */
1575 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1576 			goto logical_done_nocc;
1577 
1578 /*
1579  * Arithmetic instructions
1580  */
1581 		case 8:	/* subfc */
1582 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1583 				       regs->gpr[rb], 1);
1584 			goto arith_done;
1585 #ifdef __powerpc64__
1586 		case 9:	/* mulhdu */
1587 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1588 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1589 			goto arith_done;
1590 #endif
1591 		case 10:	/* addc */
1592 			add_with_carry(regs, op, rd, regs->gpr[ra],
1593 				       regs->gpr[rb], 0);
1594 			goto arith_done;
1595 
1596 		case 11:	/* mulhwu */
1597 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1598 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1599 			goto arith_done;
1600 
1601 		case 40:	/* subf */
1602 			op->val = regs->gpr[rb] - regs->gpr[ra];
1603 			goto arith_done;
1604 #ifdef __powerpc64__
1605 		case 73:	/* mulhd */
1606 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1607 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1608 			goto arith_done;
1609 #endif
1610 		case 75:	/* mulhw */
1611 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1612 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1613 			goto arith_done;
1614 
1615 		case 104:	/* neg */
1616 			op->val = -regs->gpr[ra];
1617 			goto arith_done;
1618 
1619 		case 136:	/* subfe */
1620 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1621 				       regs->gpr[rb], regs->xer & XER_CA);
1622 			goto arith_done;
1623 
1624 		case 138:	/* adde */
1625 			add_with_carry(regs, op, rd, regs->gpr[ra],
1626 				       regs->gpr[rb], regs->xer & XER_CA);
1627 			goto arith_done;
1628 
1629 		case 200:	/* subfze */
1630 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1631 				       regs->xer & XER_CA);
1632 			goto arith_done;
1633 
1634 		case 202:	/* addze */
1635 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1636 				       regs->xer & XER_CA);
1637 			goto arith_done;
1638 
1639 		case 232:	/* subfme */
1640 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1641 				       regs->xer & XER_CA);
1642 			goto arith_done;
1643 #ifdef __powerpc64__
1644 		case 233:	/* mulld */
1645 			op->val = regs->gpr[ra] * regs->gpr[rb];
1646 			goto arith_done;
1647 #endif
1648 		case 234:	/* addme */
1649 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1650 				       regs->xer & XER_CA);
1651 			goto arith_done;
1652 
1653 		case 235:	/* mullw */
1654 			op->val = (unsigned int) regs->gpr[ra] *
1655 				(unsigned int) regs->gpr[rb];
1656 			goto arith_done;
1657 
1658 		case 266:	/* add */
1659 			op->val = regs->gpr[ra] + regs->gpr[rb];
1660 			goto arith_done;
1661 #ifdef __powerpc64__
1662 		case 457:	/* divdu */
1663 			op->val = regs->gpr[ra] / regs->gpr[rb];
1664 			goto arith_done;
1665 #endif
1666 		case 459:	/* divwu */
1667 			op->val = (unsigned int) regs->gpr[ra] /
1668 				(unsigned int) regs->gpr[rb];
1669 			goto arith_done;
1670 #ifdef __powerpc64__
1671 		case 489:	/* divd */
1672 			op->val = (long int) regs->gpr[ra] /
1673 				(long int) regs->gpr[rb];
1674 			goto arith_done;
1675 #endif
1676 		case 491:	/* divw */
1677 			op->val = (int) regs->gpr[ra] /
1678 				(int) regs->gpr[rb];
1679 			goto arith_done;
1680 
1681 
1682 /*
1683  * Logical instructions
1684  */
1685 		case 26:	/* cntlzw */
1686 			op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
1687 			goto logical_done;
1688 #ifdef __powerpc64__
1689 		case 58:	/* cntlzd */
1690 			op->val = __builtin_clzl(regs->gpr[rd]);
1691 			goto logical_done;
1692 #endif
1693 		case 28:	/* and */
1694 			op->val = regs->gpr[rd] & regs->gpr[rb];
1695 			goto logical_done;
1696 
1697 		case 60:	/* andc */
1698 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1699 			goto logical_done;
1700 
1701 		case 122:	/* popcntb */
1702 			do_popcnt(regs, op, regs->gpr[rd], 8);
1703 			goto logical_done_nocc;
1704 
1705 		case 124:	/* nor */
1706 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1707 			goto logical_done;
1708 
1709 		case 154:	/* prtyw */
1710 			do_prty(regs, op, regs->gpr[rd], 32);
1711 			goto logical_done_nocc;
1712 
1713 		case 186:	/* prtyd */
1714 			do_prty(regs, op, regs->gpr[rd], 64);
1715 			goto logical_done_nocc;
1716 #ifdef CONFIG_PPC64
1717 		case 252:	/* bpermd */
1718 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1719 			goto logical_done_nocc;
1720 #endif
1721 		case 284:	/* xor */
1722 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1723 			goto logical_done;
1724 
1725 		case 316:	/* xor */
1726 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
1727 			goto logical_done;
1728 
1729 		case 378:	/* popcntw */
1730 			do_popcnt(regs, op, regs->gpr[rd], 32);
1731 			goto logical_done_nocc;
1732 
1733 		case 412:	/* orc */
1734 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
1735 			goto logical_done;
1736 
1737 		case 444:	/* or */
1738 			op->val = regs->gpr[rd] | regs->gpr[rb];
1739 			goto logical_done;
1740 
1741 		case 476:	/* nand */
1742 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1743 			goto logical_done;
1744 #ifdef CONFIG_PPC64
1745 		case 506:	/* popcntd */
1746 			do_popcnt(regs, op, regs->gpr[rd], 64);
1747 			goto logical_done_nocc;
1748 #endif
1749 		case 922:	/* extsh */
1750 			op->val = (signed short) regs->gpr[rd];
1751 			goto logical_done;
1752 
1753 		case 954:	/* extsb */
1754 			op->val = (signed char) regs->gpr[rd];
1755 			goto logical_done;
1756 #ifdef __powerpc64__
1757 		case 986:	/* extsw */
1758 			op->val = (signed int) regs->gpr[rd];
1759 			goto logical_done;
1760 #endif
1761 
1762 /*
1763  * Shift instructions
1764  */
1765 		case 24:	/* slw */
1766 			sh = regs->gpr[rb] & 0x3f;
1767 			if (sh < 32)
1768 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1769 			else
1770 				op->val = 0;
1771 			goto logical_done;
1772 
1773 		case 536:	/* srw */
1774 			sh = regs->gpr[rb] & 0x3f;
1775 			if (sh < 32)
1776 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1777 			else
1778 				op->val = 0;
1779 			goto logical_done;
1780 
1781 		case 792:	/* sraw */
1782 			op->type = COMPUTE + SETREG + SETXER;
1783 			sh = regs->gpr[rb] & 0x3f;
1784 			ival = (signed int) regs->gpr[rd];
1785 			op->val = ival >> (sh < 32 ? sh : 31);
1786 			op->xerval = regs->xer;
1787 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1788 				op->xerval |= XER_CA;
1789 			else
1790 				op->xerval &= ~XER_CA;
1791 			goto logical_done;
1792 
1793 		case 824:	/* srawi */
1794 			op->type = COMPUTE + SETREG + SETXER;
1795 			sh = rb;
1796 			ival = (signed int) regs->gpr[rd];
1797 			op->val = ival >> sh;
1798 			op->xerval = regs->xer;
1799 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1800 				op->xerval |= XER_CA;
1801 			else
1802 				op->xerval &= ~XER_CA;
1803 			goto logical_done;
1804 
1805 #ifdef __powerpc64__
1806 		case 27:	/* sld */
1807 			sh = regs->gpr[rb] & 0x7f;
1808 			if (sh < 64)
1809 				op->val = regs->gpr[rd] << sh;
1810 			else
1811 				op->val = 0;
1812 			goto logical_done;
1813 
1814 		case 539:	/* srd */
1815 			sh = regs->gpr[rb] & 0x7f;
1816 			if (sh < 64)
1817 				op->val = regs->gpr[rd] >> sh;
1818 			else
1819 				op->val = 0;
1820 			goto logical_done;
1821 
1822 		case 794:	/* srad */
1823 			op->type = COMPUTE + SETREG + SETXER;
1824 			sh = regs->gpr[rb] & 0x7f;
1825 			ival = (signed long int) regs->gpr[rd];
1826 			op->val = ival >> (sh < 64 ? sh : 63);
1827 			op->xerval = regs->xer;
1828 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1829 				op->xerval |= XER_CA;
1830 			else
1831 				op->xerval &= ~XER_CA;
1832 			goto logical_done;
1833 
1834 		case 826:	/* sradi with sh_5 = 0 */
1835 		case 827:	/* sradi with sh_5 = 1 */
1836 			op->type = COMPUTE + SETREG + SETXER;
1837 			sh = rb | ((instr & 2) << 4);
1838 			ival = (signed long int) regs->gpr[rd];
1839 			op->val = ival >> sh;
1840 			op->xerval = regs->xer;
1841 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1842 				op->xerval |= XER_CA;
1843 			else
1844 				op->xerval &= ~XER_CA;
1845 			goto logical_done;
1846 #endif /* __powerpc64__ */
1847 
1848 /*
1849  * Cache instructions
1850  */
1851 		case 54:	/* dcbst */
1852 			op->type = MKOP(CACHEOP, DCBST, 0);
1853 			op->ea = xform_ea(instr, regs);
1854 			return 0;
1855 
1856 		case 86:	/* dcbf */
1857 			op->type = MKOP(CACHEOP, DCBF, 0);
1858 			op->ea = xform_ea(instr, regs);
1859 			return 0;
1860 
1861 		case 246:	/* dcbtst */
1862 			op->type = MKOP(CACHEOP, DCBTST, 0);
1863 			op->ea = xform_ea(instr, regs);
1864 			op->reg = rd;
1865 			return 0;
1866 
1867 		case 278:	/* dcbt */
1868 			op->type = MKOP(CACHEOP, DCBTST, 0);
1869 			op->ea = xform_ea(instr, regs);
1870 			op->reg = rd;
1871 			return 0;
1872 
1873 		case 982:	/* icbi */
1874 			op->type = MKOP(CACHEOP, ICBI, 0);
1875 			op->ea = xform_ea(instr, regs);
1876 			return 0;
1877 
1878 		case 1014:	/* dcbz */
1879 			op->type = MKOP(CACHEOP, DCBZ, 0);
1880 			op->ea = xform_ea(instr, regs);
1881 			return 0;
1882 		}
1883 		break;
1884 	}
1885 
1886 /*
1887  * Loads and stores.
1888  */
1889 	op->type = UNKNOWN;
1890 	op->update_reg = ra;
1891 	op->reg = rd;
1892 	op->val = regs->gpr[rd];
1893 	u = (instr >> 20) & UPDATE;
1894 	op->vsx_flags = 0;
1895 
1896 	switch (opcode) {
1897 	case 31:
1898 		u = instr & UPDATE;
1899 		op->ea = xform_ea(instr, regs);
1900 		switch ((instr >> 1) & 0x3ff) {
1901 		case 20:	/* lwarx */
1902 			op->type = MKOP(LARX, 0, 4);
1903 			break;
1904 
1905 		case 150:	/* stwcx. */
1906 			op->type = MKOP(STCX, 0, 4);
1907 			break;
1908 
1909 #ifdef __powerpc64__
1910 		case 84:	/* ldarx */
1911 			op->type = MKOP(LARX, 0, 8);
1912 			break;
1913 
1914 		case 214:	/* stdcx. */
1915 			op->type = MKOP(STCX, 0, 8);
1916 			break;
1917 
1918 		case 52:	/* lbarx */
1919 			op->type = MKOP(LARX, 0, 1);
1920 			break;
1921 
1922 		case 694:	/* stbcx. */
1923 			op->type = MKOP(STCX, 0, 1);
1924 			break;
1925 
1926 		case 116:	/* lharx */
1927 			op->type = MKOP(LARX, 0, 2);
1928 			break;
1929 
1930 		case 726:	/* sthcx. */
1931 			op->type = MKOP(STCX, 0, 2);
1932 			break;
1933 
1934 		case 276:	/* lqarx */
1935 			if (!((rd & 1) || rd == ra || rd == rb))
1936 				op->type = MKOP(LARX, 0, 16);
1937 			break;
1938 
1939 		case 182:	/* stqcx. */
1940 			if (!(rd & 1))
1941 				op->type = MKOP(STCX, 0, 16);
1942 			break;
1943 #endif
1944 
1945 		case 23:	/* lwzx */
1946 		case 55:	/* lwzux */
1947 			op->type = MKOP(LOAD, u, 4);
1948 			break;
1949 
1950 		case 87:	/* lbzx */
1951 		case 119:	/* lbzux */
1952 			op->type = MKOP(LOAD, u, 1);
1953 			break;
1954 
1955 #ifdef CONFIG_ALTIVEC
1956 		/*
1957 		 * Note: for the load/store vector element instructions,
1958 		 * bits of the EA say which field of the VMX register to use.
1959 		 */
1960 		case 7:		/* lvebx */
1961 			op->type = MKOP(LOAD_VMX, 0, 1);
1962 			op->element_size = 1;
1963 			break;
1964 
1965 		case 39:	/* lvehx */
1966 			op->type = MKOP(LOAD_VMX, 0, 2);
1967 			op->element_size = 2;
1968 			break;
1969 
1970 		case 71:	/* lvewx */
1971 			op->type = MKOP(LOAD_VMX, 0, 4);
1972 			op->element_size = 4;
1973 			break;
1974 
1975 		case 103:	/* lvx */
1976 		case 359:	/* lvxl */
1977 			op->type = MKOP(LOAD_VMX, 0, 16);
1978 			op->element_size = 16;
1979 			break;
1980 
1981 		case 135:	/* stvebx */
1982 			op->type = MKOP(STORE_VMX, 0, 1);
1983 			op->element_size = 1;
1984 			break;
1985 
1986 		case 167:	/* stvehx */
1987 			op->type = MKOP(STORE_VMX, 0, 2);
1988 			op->element_size = 2;
1989 			break;
1990 
1991 		case 199:	/* stvewx */
1992 			op->type = MKOP(STORE_VMX, 0, 4);
1993 			op->element_size = 4;
1994 			break;
1995 
1996 		case 231:	/* stvx */
1997 		case 487:	/* stvxl */
1998 			op->type = MKOP(STORE_VMX, 0, 16);
1999 			break;
2000 #endif /* CONFIG_ALTIVEC */
2001 
2002 #ifdef __powerpc64__
2003 		case 21:	/* ldx */
2004 		case 53:	/* ldux */
2005 			op->type = MKOP(LOAD, u, 8);
2006 			break;
2007 
2008 		case 149:	/* stdx */
2009 		case 181:	/* stdux */
2010 			op->type = MKOP(STORE, u, 8);
2011 			break;
2012 #endif
2013 
2014 		case 151:	/* stwx */
2015 		case 183:	/* stwux */
2016 			op->type = MKOP(STORE, u, 4);
2017 			break;
2018 
2019 		case 215:	/* stbx */
2020 		case 247:	/* stbux */
2021 			op->type = MKOP(STORE, u, 1);
2022 			break;
2023 
2024 		case 279:	/* lhzx */
2025 		case 311:	/* lhzux */
2026 			op->type = MKOP(LOAD, u, 2);
2027 			break;
2028 
2029 #ifdef __powerpc64__
2030 		case 341:	/* lwax */
2031 		case 373:	/* lwaux */
2032 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2033 			break;
2034 #endif
2035 
2036 		case 343:	/* lhax */
2037 		case 375:	/* lhaux */
2038 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2039 			break;
2040 
2041 		case 407:	/* sthx */
2042 		case 439:	/* sthux */
2043 			op->type = MKOP(STORE, u, 2);
2044 			break;
2045 
2046 #ifdef __powerpc64__
2047 		case 532:	/* ldbrx */
2048 			op->type = MKOP(LOAD, BYTEREV, 8);
2049 			break;
2050 
2051 #endif
2052 		case 533:	/* lswx */
2053 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2054 			break;
2055 
2056 		case 534:	/* lwbrx */
2057 			op->type = MKOP(LOAD, BYTEREV, 4);
2058 			break;
2059 
2060 		case 597:	/* lswi */
2061 			if (rb == 0)
2062 				rb = 32;	/* # bytes to load */
2063 			op->type = MKOP(LOAD_MULTI, 0, rb);
2064 			op->ea = ra ? regs->gpr[ra] : 0;
2065 			break;
2066 
2067 #ifdef CONFIG_PPC_FPU
2068 		case 535:	/* lfsx */
2069 		case 567:	/* lfsux */
2070 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2071 			break;
2072 
2073 		case 599:	/* lfdx */
2074 		case 631:	/* lfdux */
2075 			op->type = MKOP(LOAD_FP, u, 8);
2076 			break;
2077 
2078 		case 663:	/* stfsx */
2079 		case 695:	/* stfsux */
2080 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2081 			break;
2082 
2083 		case 727:	/* stfdx */
2084 		case 759:	/* stfdux */
2085 			op->type = MKOP(STORE_FP, u, 8);
2086 			break;
2087 
2088 #ifdef __powerpc64__
2089 		case 791:	/* lfdpx */
2090 			op->type = MKOP(LOAD_FP, 0, 16);
2091 			break;
2092 
2093 		case 855:	/* lfiwax */
2094 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2095 			break;
2096 
2097 		case 887:	/* lfiwzx */
2098 			op->type = MKOP(LOAD_FP, 0, 4);
2099 			break;
2100 
2101 		case 919:	/* stfdpx */
2102 			op->type = MKOP(STORE_FP, 0, 16);
2103 			break;
2104 
2105 		case 983:	/* stfiwx */
2106 			op->type = MKOP(STORE_FP, 0, 4);
2107 			break;
2108 #endif /* __powerpc64 */
2109 #endif /* CONFIG_PPC_FPU */
2110 
2111 #ifdef __powerpc64__
2112 		case 660:	/* stdbrx */
2113 			op->type = MKOP(STORE, BYTEREV, 8);
2114 			op->val = byterev_8(regs->gpr[rd]);
2115 			break;
2116 
2117 #endif
2118 		case 661:	/* stswx */
2119 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2120 			break;
2121 
2122 		case 662:	/* stwbrx */
2123 			op->type = MKOP(STORE, BYTEREV, 4);
2124 			op->val = byterev_4(regs->gpr[rd]);
2125 			break;
2126 
2127 		case 725:	/* stswi */
2128 			if (rb == 0)
2129 				rb = 32;	/* # bytes to store */
2130 			op->type = MKOP(STORE_MULTI, 0, rb);
2131 			op->ea = ra ? regs->gpr[ra] : 0;
2132 			break;
2133 
2134 		case 790:	/* lhbrx */
2135 			op->type = MKOP(LOAD, BYTEREV, 2);
2136 			break;
2137 
2138 		case 918:	/* sthbrx */
2139 			op->type = MKOP(STORE, BYTEREV, 2);
2140 			op->val = byterev_2(regs->gpr[rd]);
2141 			break;
2142 
2143 #ifdef CONFIG_VSX
2144 		case 12:	/* lxsiwzx */
2145 			op->reg = rd | ((instr & 1) << 5);
2146 			op->type = MKOP(LOAD_VSX, 0, 4);
2147 			op->element_size = 8;
2148 			break;
2149 
2150 		case 76:	/* lxsiwax */
2151 			op->reg = rd | ((instr & 1) << 5);
2152 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2153 			op->element_size = 8;
2154 			break;
2155 
2156 		case 140:	/* stxsiwx */
2157 			op->reg = rd | ((instr & 1) << 5);
2158 			op->type = MKOP(STORE_VSX, 0, 4);
2159 			op->element_size = 8;
2160 			break;
2161 
2162 		case 268:	/* lxvx */
2163 			op->reg = rd | ((instr & 1) << 5);
2164 			op->type = MKOP(LOAD_VSX, 0, 16);
2165 			op->element_size = 16;
2166 			op->vsx_flags = VSX_CHECK_VEC;
2167 			break;
2168 
2169 		case 269:	/* lxvl */
2170 		case 301: {	/* lxvll */
2171 			int nb;
2172 			op->reg = rd | ((instr & 1) << 5);
2173 			op->ea = ra ? regs->gpr[ra] : 0;
2174 			nb = regs->gpr[rb] & 0xff;
2175 			if (nb > 16)
2176 				nb = 16;
2177 			op->type = MKOP(LOAD_VSX, 0, nb);
2178 			op->element_size = 16;
2179 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2180 				VSX_CHECK_VEC;
2181 			break;
2182 		}
2183 		case 332:	/* lxvdsx */
2184 			op->reg = rd | ((instr & 1) << 5);
2185 			op->type = MKOP(LOAD_VSX, 0, 8);
2186 			op->element_size = 8;
2187 			op->vsx_flags = VSX_SPLAT;
2188 			break;
2189 
2190 		case 364:	/* lxvwsx */
2191 			op->reg = rd | ((instr & 1) << 5);
2192 			op->type = MKOP(LOAD_VSX, 0, 4);
2193 			op->element_size = 4;
2194 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2195 			break;
2196 
2197 		case 396:	/* stxvx */
2198 			op->reg = rd | ((instr & 1) << 5);
2199 			op->type = MKOP(STORE_VSX, 0, 16);
2200 			op->element_size = 16;
2201 			op->vsx_flags = VSX_CHECK_VEC;
2202 			break;
2203 
2204 		case 397:	/* stxvl */
2205 		case 429: {	/* stxvll */
2206 			int nb;
2207 			op->reg = rd | ((instr & 1) << 5);
2208 			op->ea = ra ? regs->gpr[ra] : 0;
2209 			nb = regs->gpr[rb] & 0xff;
2210 			if (nb > 16)
2211 				nb = 16;
2212 			op->type = MKOP(STORE_VSX, 0, nb);
2213 			op->element_size = 16;
2214 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2215 				VSX_CHECK_VEC;
2216 			break;
2217 		}
2218 		case 524:	/* lxsspx */
2219 			op->reg = rd | ((instr & 1) << 5);
2220 			op->type = MKOP(LOAD_VSX, 0, 4);
2221 			op->element_size = 8;
2222 			op->vsx_flags = VSX_FPCONV;
2223 			break;
2224 
2225 		case 588:	/* lxsdx */
2226 			op->reg = rd | ((instr & 1) << 5);
2227 			op->type = MKOP(LOAD_VSX, 0, 8);
2228 			op->element_size = 8;
2229 			break;
2230 
2231 		case 652:	/* stxsspx */
2232 			op->reg = rd | ((instr & 1) << 5);
2233 			op->type = MKOP(STORE_VSX, 0, 4);
2234 			op->element_size = 8;
2235 			op->vsx_flags = VSX_FPCONV;
2236 			break;
2237 
2238 		case 716:	/* stxsdx */
2239 			op->reg = rd | ((instr & 1) << 5);
2240 			op->type = MKOP(STORE_VSX, 0, 8);
2241 			op->element_size = 8;
2242 			break;
2243 
2244 		case 780:	/* lxvw4x */
2245 			op->reg = rd | ((instr & 1) << 5);
2246 			op->type = MKOP(LOAD_VSX, 0, 16);
2247 			op->element_size = 4;
2248 			break;
2249 
2250 		case 781:	/* lxsibzx */
2251 			op->reg = rd | ((instr & 1) << 5);
2252 			op->type = MKOP(LOAD_VSX, 0, 1);
2253 			op->element_size = 8;
2254 			op->vsx_flags = VSX_CHECK_VEC;
2255 			break;
2256 
2257 		case 812:	/* lxvh8x */
2258 			op->reg = rd | ((instr & 1) << 5);
2259 			op->type = MKOP(LOAD_VSX, 0, 16);
2260 			op->element_size = 2;
2261 			op->vsx_flags = VSX_CHECK_VEC;
2262 			break;
2263 
2264 		case 813:	/* lxsihzx */
2265 			op->reg = rd | ((instr & 1) << 5);
2266 			op->type = MKOP(LOAD_VSX, 0, 2);
2267 			op->element_size = 8;
2268 			op->vsx_flags = VSX_CHECK_VEC;
2269 			break;
2270 
2271 		case 844:	/* lxvd2x */
2272 			op->reg = rd | ((instr & 1) << 5);
2273 			op->type = MKOP(LOAD_VSX, 0, 16);
2274 			op->element_size = 8;
2275 			break;
2276 
2277 		case 876:	/* lxvb16x */
2278 			op->reg = rd | ((instr & 1) << 5);
2279 			op->type = MKOP(LOAD_VSX, 0, 16);
2280 			op->element_size = 1;
2281 			op->vsx_flags = VSX_CHECK_VEC;
2282 			break;
2283 
2284 		case 908:	/* stxvw4x */
2285 			op->reg = rd | ((instr & 1) << 5);
2286 			op->type = MKOP(STORE_VSX, 0, 16);
2287 			op->element_size = 4;
2288 			break;
2289 
2290 		case 909:	/* stxsibx */
2291 			op->reg = rd | ((instr & 1) << 5);
2292 			op->type = MKOP(STORE_VSX, 0, 1);
2293 			op->element_size = 8;
2294 			op->vsx_flags = VSX_CHECK_VEC;
2295 			break;
2296 
2297 		case 940:	/* stxvh8x */
2298 			op->reg = rd | ((instr & 1) << 5);
2299 			op->type = MKOP(STORE_VSX, 0, 16);
2300 			op->element_size = 2;
2301 			op->vsx_flags = VSX_CHECK_VEC;
2302 			break;
2303 
2304 		case 941:	/* stxsihx */
2305 			op->reg = rd | ((instr & 1) << 5);
2306 			op->type = MKOP(STORE_VSX, 0, 2);
2307 			op->element_size = 8;
2308 			op->vsx_flags = VSX_CHECK_VEC;
2309 			break;
2310 
2311 		case 972:	/* stxvd2x */
2312 			op->reg = rd | ((instr & 1) << 5);
2313 			op->type = MKOP(STORE_VSX, 0, 16);
2314 			op->element_size = 8;
2315 			break;
2316 
2317 		case 1004:	/* stxvb16x */
2318 			op->reg = rd | ((instr & 1) << 5);
2319 			op->type = MKOP(STORE_VSX, 0, 16);
2320 			op->element_size = 1;
2321 			op->vsx_flags = VSX_CHECK_VEC;
2322 			break;
2323 
2324 #endif /* CONFIG_VSX */
2325 		}
2326 		break;
2327 
2328 	case 32:	/* lwz */
2329 	case 33:	/* lwzu */
2330 		op->type = MKOP(LOAD, u, 4);
2331 		op->ea = dform_ea(instr, regs);
2332 		break;
2333 
2334 	case 34:	/* lbz */
2335 	case 35:	/* lbzu */
2336 		op->type = MKOP(LOAD, u, 1);
2337 		op->ea = dform_ea(instr, regs);
2338 		break;
2339 
2340 	case 36:	/* stw */
2341 	case 37:	/* stwu */
2342 		op->type = MKOP(STORE, u, 4);
2343 		op->ea = dform_ea(instr, regs);
2344 		break;
2345 
2346 	case 38:	/* stb */
2347 	case 39:	/* stbu */
2348 		op->type = MKOP(STORE, u, 1);
2349 		op->ea = dform_ea(instr, regs);
2350 		break;
2351 
2352 	case 40:	/* lhz */
2353 	case 41:	/* lhzu */
2354 		op->type = MKOP(LOAD, u, 2);
2355 		op->ea = dform_ea(instr, regs);
2356 		break;
2357 
2358 	case 42:	/* lha */
2359 	case 43:	/* lhau */
2360 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2361 		op->ea = dform_ea(instr, regs);
2362 		break;
2363 
2364 	case 44:	/* sth */
2365 	case 45:	/* sthu */
2366 		op->type = MKOP(STORE, u, 2);
2367 		op->ea = dform_ea(instr, regs);
2368 		break;
2369 
2370 	case 46:	/* lmw */
2371 		if (ra >= rd)
2372 			break;		/* invalid form, ra in range to load */
2373 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2374 		op->ea = dform_ea(instr, regs);
2375 		break;
2376 
2377 	case 47:	/* stmw */
2378 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2379 		op->ea = dform_ea(instr, regs);
2380 		break;
2381 
2382 #ifdef CONFIG_PPC_FPU
2383 	case 48:	/* lfs */
2384 	case 49:	/* lfsu */
2385 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2386 		op->ea = dform_ea(instr, regs);
2387 		break;
2388 
2389 	case 50:	/* lfd */
2390 	case 51:	/* lfdu */
2391 		op->type = MKOP(LOAD_FP, u, 8);
2392 		op->ea = dform_ea(instr, regs);
2393 		break;
2394 
2395 	case 52:	/* stfs */
2396 	case 53:	/* stfsu */
2397 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2398 		op->ea = dform_ea(instr, regs);
2399 		break;
2400 
2401 	case 54:	/* stfd */
2402 	case 55:	/* stfdu */
2403 		op->type = MKOP(STORE_FP, u, 8);
2404 		op->ea = dform_ea(instr, regs);
2405 		break;
2406 #endif
2407 
2408 #ifdef __powerpc64__
2409 	case 56:	/* lq */
2410 		if (!((rd & 1) || (rd == ra)))
2411 			op->type = MKOP(LOAD, 0, 16);
2412 		op->ea = dqform_ea(instr, regs);
2413 		break;
2414 #endif
2415 
2416 #ifdef CONFIG_VSX
2417 	case 57:	/* lfdp, lxsd, lxssp */
2418 		op->ea = dsform_ea(instr, regs);
2419 		switch (instr & 3) {
2420 		case 0:		/* lfdp */
2421 			if (rd & 1)
2422 				break;		/* reg must be even */
2423 			op->type = MKOP(LOAD_FP, 0, 16);
2424 			break;
2425 		case 2:		/* lxsd */
2426 			op->reg = rd + 32;
2427 			op->type = MKOP(LOAD_VSX, 0, 8);
2428 			op->element_size = 8;
2429 			op->vsx_flags = VSX_CHECK_VEC;
2430 			break;
2431 		case 3:		/* lxssp */
2432 			op->reg = rd + 32;
2433 			op->type = MKOP(LOAD_VSX, 0, 4);
2434 			op->element_size = 8;
2435 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2436 			break;
2437 		}
2438 		break;
2439 #endif /* CONFIG_VSX */
2440 
2441 #ifdef __powerpc64__
2442 	case 58:	/* ld[u], lwa */
2443 		op->ea = dsform_ea(instr, regs);
2444 		switch (instr & 3) {
2445 		case 0:		/* ld */
2446 			op->type = MKOP(LOAD, 0, 8);
2447 			break;
2448 		case 1:		/* ldu */
2449 			op->type = MKOP(LOAD, UPDATE, 8);
2450 			break;
2451 		case 2:		/* lwa */
2452 			op->type = MKOP(LOAD, SIGNEXT, 4);
2453 			break;
2454 		}
2455 		break;
2456 #endif
2457 
2458 #ifdef CONFIG_VSX
2459 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2460 		switch (instr & 7) {
2461 		case 0:		/* stfdp with LSB of DS field = 0 */
2462 		case 4:		/* stfdp with LSB of DS field = 1 */
2463 			op->ea = dsform_ea(instr, regs);
2464 			op->type = MKOP(STORE_FP, 0, 16);
2465 			break;
2466 
2467 		case 1:		/* lxv */
2468 			op->ea = dqform_ea(instr, regs);
2469 			if (instr & 8)
2470 				op->reg = rd + 32;
2471 			op->type = MKOP(LOAD_VSX, 0, 16);
2472 			op->element_size = 16;
2473 			op->vsx_flags = VSX_CHECK_VEC;
2474 			break;
2475 
2476 		case 2:		/* stxsd with LSB of DS field = 0 */
2477 		case 6:		/* stxsd with LSB of DS field = 1 */
2478 			op->ea = dsform_ea(instr, regs);
2479 			op->reg = rd + 32;
2480 			op->type = MKOP(STORE_VSX, 0, 8);
2481 			op->element_size = 8;
2482 			op->vsx_flags = VSX_CHECK_VEC;
2483 			break;
2484 
2485 		case 3:		/* stxssp with LSB of DS field = 0 */
2486 		case 7:		/* stxssp with LSB of DS field = 1 */
2487 			op->ea = dsform_ea(instr, regs);
2488 			op->reg = rd + 32;
2489 			op->type = MKOP(STORE_VSX, 0, 4);
2490 			op->element_size = 8;
2491 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2492 			break;
2493 
2494 		case 5:		/* stxv */
2495 			op->ea = dqform_ea(instr, regs);
2496 			if (instr & 8)
2497 				op->reg = rd + 32;
2498 			op->type = MKOP(STORE_VSX, 0, 16);
2499 			op->element_size = 16;
2500 			op->vsx_flags = VSX_CHECK_VEC;
2501 			break;
2502 		}
2503 		break;
2504 #endif /* CONFIG_VSX */
2505 
2506 #ifdef __powerpc64__
2507 	case 62:	/* std[u] */
2508 		op->ea = dsform_ea(instr, regs);
2509 		switch (instr & 3) {
2510 		case 0:		/* std */
2511 			op->type = MKOP(STORE, 0, 8);
2512 			break;
2513 		case 1:		/* stdu */
2514 			op->type = MKOP(STORE, UPDATE, 8);
2515 			break;
2516 		case 2:		/* stq */
2517 			if (!(rd & 1))
2518 				op->type = MKOP(STORE, 0, 16);
2519 			break;
2520 		}
2521 		break;
2522 #endif /* __powerpc64__ */
2523 
2524 	}
2525 	return 0;
2526 
2527  logical_done:
2528 	if (instr & 1)
2529 		set_cr0(regs, op, ra);
2530  logical_done_nocc:
2531 	op->reg = ra;
2532 	op->type |= SETREG;
2533 	return 1;
2534 
2535  arith_done:
2536 	if (instr & 1)
2537 		set_cr0(regs, op, rd);
2538  compute_done:
2539 	op->reg = rd;
2540 	op->type |= SETREG;
2541 	return 1;
2542 
2543  priv:
2544 	op->type = INTERRUPT | 0x700;
2545 	op->val = SRR1_PROGPRIV;
2546 	return 0;
2547 
2548  trap:
2549 	op->type = INTERRUPT | 0x700;
2550 	op->val = SRR1_PROGTRAP;
2551 	return 0;
2552 }
2553 EXPORT_SYMBOL_GPL(analyse_instr);
2554 NOKPROBE_SYMBOL(analyse_instr);
2555 
2556 /*
2557  * For PPC32 we always use stwu with r1 to change the stack pointer.
2558  * So this emulated store may corrupt the exception frame, now we
2559  * have to provide the exception frame trampoline, which is pushed
2560  * below the kprobed function stack. So we only update gpr[1] but
2561  * don't emulate the real store operation. We will do real store
2562  * operation safely in exception return code by checking this flag.
2563  */
2564 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2565 {
2566 #ifdef CONFIG_PPC32
2567 	/*
2568 	 * Check if we will touch kernel stack overflow
2569 	 */
2570 	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2571 		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2572 		return -EINVAL;
2573 	}
2574 #endif /* CONFIG_PPC32 */
2575 	/*
2576 	 * Check if we already set since that means we'll
2577 	 * lose the previous value.
2578 	 */
2579 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2580 	set_thread_flag(TIF_EMULATE_STACK_STORE);
2581 	return 0;
2582 }
2583 
2584 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2585 {
2586 	switch (size) {
2587 	case 2:
2588 		*valp = (signed short) *valp;
2589 		break;
2590 	case 4:
2591 		*valp = (signed int) *valp;
2592 		break;
2593 	}
2594 }
2595 
2596 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2597 {
2598 	switch (size) {
2599 	case 2:
2600 		*valp = byterev_2(*valp);
2601 		break;
2602 	case 4:
2603 		*valp = byterev_4(*valp);
2604 		break;
2605 #ifdef __powerpc64__
2606 	case 8:
2607 		*valp = byterev_8(*valp);
2608 		break;
2609 #endif
2610 	}
2611 }
2612 
2613 /*
2614  * Emulate an instruction that can be executed just by updating
2615  * fields in *regs.
2616  */
2617 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2618 {
2619 	unsigned long next_pc;
2620 
2621 	next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2622 	switch (op->type & INSTR_TYPE_MASK) {
2623 	case COMPUTE:
2624 		if (op->type & SETREG)
2625 			regs->gpr[op->reg] = op->val;
2626 		if (op->type & SETCC)
2627 			regs->ccr = op->ccval;
2628 		if (op->type & SETXER)
2629 			regs->xer = op->xerval;
2630 		break;
2631 
2632 	case BRANCH:
2633 		if (op->type & SETLK)
2634 			regs->link = next_pc;
2635 		if (op->type & BRTAKEN)
2636 			next_pc = op->val;
2637 		if (op->type & DECCTR)
2638 			--regs->ctr;
2639 		break;
2640 
2641 	case BARRIER:
2642 		switch (op->type & BARRIER_MASK) {
2643 		case BARRIER_SYNC:
2644 			mb();
2645 			break;
2646 		case BARRIER_ISYNC:
2647 			isync();
2648 			break;
2649 		case BARRIER_EIEIO:
2650 			eieio();
2651 			break;
2652 		case BARRIER_LWSYNC:
2653 			asm volatile("lwsync" : : : "memory");
2654 			break;
2655 		case BARRIER_PTESYNC:
2656 			asm volatile("ptesync" : : : "memory");
2657 			break;
2658 		}
2659 		break;
2660 
2661 	case MFSPR:
2662 		switch (op->spr) {
2663 		case SPRN_XER:
2664 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2665 			break;
2666 		case SPRN_LR:
2667 			regs->gpr[op->reg] = regs->link;
2668 			break;
2669 		case SPRN_CTR:
2670 			regs->gpr[op->reg] = regs->ctr;
2671 			break;
2672 		default:
2673 			WARN_ON_ONCE(1);
2674 		}
2675 		break;
2676 
2677 	case MTSPR:
2678 		switch (op->spr) {
2679 		case SPRN_XER:
2680 			regs->xer = op->val & 0xffffffffUL;
2681 			break;
2682 		case SPRN_LR:
2683 			regs->link = op->val;
2684 			break;
2685 		case SPRN_CTR:
2686 			regs->ctr = op->val;
2687 			break;
2688 		default:
2689 			WARN_ON_ONCE(1);
2690 		}
2691 		break;
2692 
2693 	default:
2694 		WARN_ON_ONCE(1);
2695 	}
2696 	regs->nip = next_pc;
2697 }
2698 
2699 /*
2700  * Emulate a previously-analysed load or store instruction.
2701  * Return values are:
2702  * 0 = instruction emulated successfully
2703  * -EFAULT = address out of range or access faulted (regs->dar
2704  *	     contains the faulting address)
2705  * -EACCES = misaligned access, instruction requires alignment
2706  * -EINVAL = unknown operation in *op
2707  */
2708 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
2709 {
2710 	int err, size, type;
2711 	int i, rd, nb;
2712 	unsigned int cr;
2713 	unsigned long val;
2714 	unsigned long ea;
2715 	bool cross_endian;
2716 
2717 	err = 0;
2718 	size = GETSIZE(op->type);
2719 	type = op->type & INSTR_TYPE_MASK;
2720 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
2721 	ea = truncate_if_32bit(regs->msr, op->ea);
2722 
2723 	switch (type) {
2724 	case LARX:
2725 		if (ea & (size - 1))
2726 			return -EACCES;		/* can't handle misaligned */
2727 		if (!address_ok(regs, ea, size))
2728 			return -EFAULT;
2729 		err = 0;
2730 		val = 0;
2731 		switch (size) {
2732 #ifdef __powerpc64__
2733 		case 1:
2734 			__get_user_asmx(val, ea, err, "lbarx");
2735 			break;
2736 		case 2:
2737 			__get_user_asmx(val, ea, err, "lharx");
2738 			break;
2739 #endif
2740 		case 4:
2741 			__get_user_asmx(val, ea, err, "lwarx");
2742 			break;
2743 #ifdef __powerpc64__
2744 		case 8:
2745 			__get_user_asmx(val, ea, err, "ldarx");
2746 			break;
2747 		case 16:
2748 			err = do_lqarx(ea, &regs->gpr[op->reg]);
2749 			break;
2750 #endif
2751 		default:
2752 			return -EINVAL;
2753 		}
2754 		if (err) {
2755 			regs->dar = ea;
2756 			break;
2757 		}
2758 		if (size < 16)
2759 			regs->gpr[op->reg] = val;
2760 		break;
2761 
2762 	case STCX:
2763 		if (ea & (size - 1))
2764 			return -EACCES;		/* can't handle misaligned */
2765 		if (!address_ok(regs, ea, size))
2766 			return -EFAULT;
2767 		err = 0;
2768 		switch (size) {
2769 #ifdef __powerpc64__
2770 		case 1:
2771 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
2772 			break;
2773 		case 2:
2774 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
2775 			break;
2776 #endif
2777 		case 4:
2778 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
2779 			break;
2780 #ifdef __powerpc64__
2781 		case 8:
2782 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
2783 			break;
2784 		case 16:
2785 			err = do_stqcx(ea, regs->gpr[op->reg],
2786 				       regs->gpr[op->reg + 1], &cr);
2787 			break;
2788 #endif
2789 		default:
2790 			return -EINVAL;
2791 		}
2792 		if (!err)
2793 			regs->ccr = (regs->ccr & 0x0fffffff) |
2794 				(cr & 0xe0000000) |
2795 				((regs->xer >> 3) & 0x10000000);
2796 		else
2797 			regs->dar = ea;
2798 		break;
2799 
2800 	case LOAD:
2801 #ifdef __powerpc64__
2802 		if (size == 16) {
2803 			err = emulate_lq(regs, ea, op->reg, cross_endian);
2804 			break;
2805 		}
2806 #endif
2807 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
2808 		if (!err) {
2809 			if (op->type & SIGNEXT)
2810 				do_signext(&regs->gpr[op->reg], size);
2811 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
2812 				do_byterev(&regs->gpr[op->reg], size);
2813 		}
2814 		break;
2815 
2816 #ifdef CONFIG_PPC_FPU
2817 	case LOAD_FP:
2818 		/*
2819 		 * If the instruction is in userspace, we can emulate it even
2820 		 * if the VMX state is not live, because we have the state
2821 		 * stored in the thread_struct.  If the instruction is in
2822 		 * the kernel, we must not touch the state in the thread_struct.
2823 		 */
2824 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2825 			return 0;
2826 		err = do_fp_load(op, ea, regs, cross_endian);
2827 		break;
2828 #endif
2829 #ifdef CONFIG_ALTIVEC
2830 	case LOAD_VMX:
2831 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2832 			return 0;
2833 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
2834 		break;
2835 #endif
2836 #ifdef CONFIG_VSX
2837 	case LOAD_VSX: {
2838 		unsigned long msrbit = MSR_VSX;
2839 
2840 		/*
2841 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2842 		 * when the target of the instruction is a vector register.
2843 		 */
2844 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2845 			msrbit = MSR_VEC;
2846 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2847 			return 0;
2848 		err = do_vsx_load(op, ea, regs, cross_endian);
2849 		break;
2850 	}
2851 #endif
2852 	case LOAD_MULTI:
2853 		if (!address_ok(regs, ea, size))
2854 			return -EFAULT;
2855 		rd = op->reg;
2856 		for (i = 0; i < size; i += 4) {
2857 			unsigned int v32 = 0;
2858 
2859 			nb = size - i;
2860 			if (nb > 4)
2861 				nb = 4;
2862 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
2863 			if (err)
2864 				break;
2865 			if (unlikely(cross_endian))
2866 				v32 = byterev_4(v32);
2867 			regs->gpr[rd] = v32;
2868 			ea += 4;
2869 			/* reg number wraps from 31 to 0 for lsw[ix] */
2870 			rd = (rd + 1) & 0x1f;
2871 		}
2872 		break;
2873 
2874 	case STORE:
2875 #ifdef __powerpc64__
2876 		if (size == 16) {
2877 			err = emulate_stq(regs, ea, op->reg, cross_endian);
2878 			break;
2879 		}
2880 #endif
2881 		if ((op->type & UPDATE) && size == sizeof(long) &&
2882 		    op->reg == 1 && op->update_reg == 1 &&
2883 		    !(regs->msr & MSR_PR) &&
2884 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2885 			err = handle_stack_update(ea, regs);
2886 			break;
2887 		}
2888 		if (unlikely(cross_endian))
2889 			do_byterev(&op->val, size);
2890 		err = write_mem(op->val, ea, size, regs);
2891 		break;
2892 
2893 #ifdef CONFIG_PPC_FPU
2894 	case STORE_FP:
2895 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2896 			return 0;
2897 		err = do_fp_store(op, ea, regs, cross_endian);
2898 		break;
2899 #endif
2900 #ifdef CONFIG_ALTIVEC
2901 	case STORE_VMX:
2902 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2903 			return 0;
2904 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
2905 		break;
2906 #endif
2907 #ifdef CONFIG_VSX
2908 	case STORE_VSX: {
2909 		unsigned long msrbit = MSR_VSX;
2910 
2911 		/*
2912 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2913 		 * when the target of the instruction is a vector register.
2914 		 */
2915 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2916 			msrbit = MSR_VEC;
2917 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2918 			return 0;
2919 		err = do_vsx_store(op, ea, regs, cross_endian);
2920 		break;
2921 	}
2922 #endif
2923 	case STORE_MULTI:
2924 		if (!address_ok(regs, ea, size))
2925 			return -EFAULT;
2926 		rd = op->reg;
2927 		for (i = 0; i < size; i += 4) {
2928 			unsigned int v32 = regs->gpr[rd];
2929 
2930 			nb = size - i;
2931 			if (nb > 4)
2932 				nb = 4;
2933 			if (unlikely(cross_endian))
2934 				v32 = byterev_4(v32);
2935 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
2936 			if (err)
2937 				break;
2938 			ea += 4;
2939 			/* reg number wraps from 31 to 0 for stsw[ix] */
2940 			rd = (rd + 1) & 0x1f;
2941 		}
2942 		break;
2943 
2944 	default:
2945 		return -EINVAL;
2946 	}
2947 
2948 	if (err)
2949 		return err;
2950 
2951 	if (op->type & UPDATE)
2952 		regs->gpr[op->update_reg] = op->ea;
2953 
2954 	return 0;
2955 }
2956 NOKPROBE_SYMBOL(emulate_loadstore);
2957 
2958 /*
2959  * Emulate instructions that cause a transfer of control,
2960  * loads and stores, and a few other instructions.
2961  * Returns 1 if the step was emulated, 0 if not,
2962  * or -1 if the instruction is one that should not be stepped,
2963  * such as an rfid, or a mtmsrd that would clear MSR_RI.
2964  */
2965 int emulate_step(struct pt_regs *regs, unsigned int instr)
2966 {
2967 	struct instruction_op op;
2968 	int r, err, type;
2969 	unsigned long val;
2970 	unsigned long ea;
2971 
2972 	r = analyse_instr(&op, regs, instr);
2973 	if (r < 0)
2974 		return r;
2975 	if (r > 0) {
2976 		emulate_update_regs(regs, &op);
2977 		return 1;
2978 	}
2979 
2980 	err = 0;
2981 	type = op.type & INSTR_TYPE_MASK;
2982 
2983 	if (OP_IS_LOAD_STORE(type)) {
2984 		err = emulate_loadstore(regs, &op);
2985 		if (err)
2986 			return 0;
2987 		goto instr_done;
2988 	}
2989 
2990 	switch (type) {
2991 	case CACHEOP:
2992 		ea = truncate_if_32bit(regs->msr, op.ea);
2993 		if (!address_ok(regs, ea, 8))
2994 			return 0;
2995 		switch (op.type & CACHEOP_MASK) {
2996 		case DCBST:
2997 			__cacheop_user_asmx(ea, err, "dcbst");
2998 			break;
2999 		case DCBF:
3000 			__cacheop_user_asmx(ea, err, "dcbf");
3001 			break;
3002 		case DCBTST:
3003 			if (op.reg == 0)
3004 				prefetchw((void *) ea);
3005 			break;
3006 		case DCBT:
3007 			if (op.reg == 0)
3008 				prefetch((void *) ea);
3009 			break;
3010 		case ICBI:
3011 			__cacheop_user_asmx(ea, err, "icbi");
3012 			break;
3013 		case DCBZ:
3014 			err = emulate_dcbz(ea, regs);
3015 			break;
3016 		}
3017 		if (err) {
3018 			regs->dar = ea;
3019 			return 0;
3020 		}
3021 		goto instr_done;
3022 
3023 	case MFMSR:
3024 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3025 		goto instr_done;
3026 
3027 	case MTMSR:
3028 		val = regs->gpr[op.reg];
3029 		if ((val & MSR_RI) == 0)
3030 			/* can't step mtmsr[d] that would clear MSR_RI */
3031 			return -1;
3032 		/* here op.val is the mask of bits to change */
3033 		regs->msr = (regs->msr & ~op.val) | (val & op.val);
3034 		goto instr_done;
3035 
3036 #ifdef CONFIG_PPC64
3037 	case SYSCALL:	/* sc */
3038 		/*
3039 		 * N.B. this uses knowledge about how the syscall
3040 		 * entry code works.  If that is changed, this will
3041 		 * need to be changed also.
3042 		 */
3043 		if (regs->gpr[0] == 0x1ebe &&
3044 		    cpu_has_feature(CPU_FTR_REAL_LE)) {
3045 			regs->msr ^= MSR_LE;
3046 			goto instr_done;
3047 		}
3048 		regs->gpr[9] = regs->gpr[13];
3049 		regs->gpr[10] = MSR_KERNEL;
3050 		regs->gpr[11] = regs->nip + 4;
3051 		regs->gpr[12] = regs->msr & MSR_MASK;
3052 		regs->gpr[13] = (unsigned long) get_paca();
3053 		regs->nip = (unsigned long) &system_call_common;
3054 		regs->msr = MSR_KERNEL;
3055 		return 1;
3056 
3057 	case RFI:
3058 		return -1;
3059 #endif
3060 	}
3061 	return 0;
3062 
3063  instr_done:
3064 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
3065 	return 1;
3066 }
3067 NOKPROBE_SYMBOL(emulate_step);
3068