xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision 5a244f48)
1 /*
2  * Single-step support.
3  *
4  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
20 
21 extern char system_call_common[];
22 
23 #ifdef CONFIG_PPC64
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK	0xffffffff87c0ffffUL
26 #else
27 #define MSR_MASK	0x87c0ffff
28 #endif
29 
30 /* Bits in XER */
31 #define XER_SO		0x80000000U
32 #define XER_OV		0x40000000U
33 #define XER_CA		0x20000000U
34 
35 #ifdef CONFIG_PPC_FPU
36 /*
37  * Functions in ldstfp.S
38  */
39 extern void get_fpr(int rn, double *p);
40 extern void put_fpr(int rn, const double *p);
41 extern void get_vr(int rn, __vector128 *p);
42 extern void put_vr(int rn, __vector128 *p);
43 extern void load_vsrn(int vsr, const void *p);
44 extern void store_vsrn(int vsr, void *p);
45 extern void conv_sp_to_dp(const float *sp, double *dp);
46 extern void conv_dp_to_sp(const double *dp, float *sp);
47 #endif
48 
49 #ifdef __powerpc64__
50 /*
51  * Functions in quad.S
52  */
53 extern int do_lq(unsigned long ea, unsigned long *regs);
54 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
55 extern int do_lqarx(unsigned long ea, unsigned long *regs);
56 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
57 		    unsigned int *crp);
58 #endif
59 
60 #ifdef __LITTLE_ENDIAN__
61 #define IS_LE	1
62 #define IS_BE	0
63 #else
64 #define IS_LE	0
65 #define IS_BE	1
66 #endif
67 
68 /*
69  * Emulate the truncation of 64 bit values in 32-bit mode.
70  */
71 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
72 							unsigned long val)
73 {
74 #ifdef __powerpc64__
75 	if ((msr & MSR_64BIT) == 0)
76 		val &= 0xffffffffUL;
77 #endif
78 	return val;
79 }
80 
81 /*
82  * Determine whether a conditional branch instruction would branch.
83  */
84 static nokprobe_inline int branch_taken(unsigned int instr,
85 					const struct pt_regs *regs,
86 					struct instruction_op *op)
87 {
88 	unsigned int bo = (instr >> 21) & 0x1f;
89 	unsigned int bi;
90 
91 	if ((bo & 4) == 0) {
92 		/* decrement counter */
93 		op->type |= DECCTR;
94 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
95 			return 0;
96 	}
97 	if ((bo & 0x10) == 0) {
98 		/* check bit from CR */
99 		bi = (instr >> 16) & 0x1f;
100 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
101 			return 0;
102 	}
103 	return 1;
104 }
105 
106 static nokprobe_inline long address_ok(struct pt_regs *regs,
107 				       unsigned long ea, int nb)
108 {
109 	if (!user_mode(regs))
110 		return 1;
111 	if (__access_ok(ea, nb, USER_DS))
112 		return 1;
113 	if (__access_ok(ea, 1, USER_DS))
114 		/* Access overlaps the end of the user region */
115 		regs->dar = USER_DS.seg;
116 	else
117 		regs->dar = ea;
118 	return 0;
119 }
120 
121 /*
122  * Calculate effective address for a D-form instruction
123  */
124 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
125 					      const struct pt_regs *regs)
126 {
127 	int ra;
128 	unsigned long ea;
129 
130 	ra = (instr >> 16) & 0x1f;
131 	ea = (signed short) instr;		/* sign-extend */
132 	if (ra)
133 		ea += regs->gpr[ra];
134 
135 	return ea;
136 }
137 
138 #ifdef __powerpc64__
139 /*
140  * Calculate effective address for a DS-form instruction
141  */
142 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
143 					       const struct pt_regs *regs)
144 {
145 	int ra;
146 	unsigned long ea;
147 
148 	ra = (instr >> 16) & 0x1f;
149 	ea = (signed short) (instr & ~3);	/* sign-extend */
150 	if (ra)
151 		ea += regs->gpr[ra];
152 
153 	return ea;
154 }
155 
156 /*
157  * Calculate effective address for a DQ-form instruction
158  */
159 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
160 					       const struct pt_regs *regs)
161 {
162 	int ra;
163 	unsigned long ea;
164 
165 	ra = (instr >> 16) & 0x1f;
166 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
167 	if (ra)
168 		ea += regs->gpr[ra];
169 
170 	return ea;
171 }
172 #endif /* __powerpc64 */
173 
174 /*
175  * Calculate effective address for an X-form instruction
176  */
177 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
178 					      const struct pt_regs *regs)
179 {
180 	int ra, rb;
181 	unsigned long ea;
182 
183 	ra = (instr >> 16) & 0x1f;
184 	rb = (instr >> 11) & 0x1f;
185 	ea = regs->gpr[rb];
186 	if (ra)
187 		ea += regs->gpr[ra];
188 
189 	return ea;
190 }
191 
192 /*
193  * Return the largest power of 2, not greater than sizeof(unsigned long),
194  * such that x is a multiple of it.
195  */
196 static nokprobe_inline unsigned long max_align(unsigned long x)
197 {
198 	x |= sizeof(unsigned long);
199 	return x & -x;		/* isolates rightmost bit */
200 }
201 
202 static nokprobe_inline unsigned long byterev_2(unsigned long x)
203 {
204 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
205 }
206 
207 static nokprobe_inline unsigned long byterev_4(unsigned long x)
208 {
209 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
210 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
211 }
212 
213 #ifdef __powerpc64__
214 static nokprobe_inline unsigned long byterev_8(unsigned long x)
215 {
216 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
217 }
218 #endif
219 
220 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
221 {
222 	switch (nb) {
223 	case 2:
224 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
225 		break;
226 	case 4:
227 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
228 		break;
229 #ifdef __powerpc64__
230 	case 8:
231 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
232 		break;
233 	case 16: {
234 		unsigned long *up = (unsigned long *)ptr;
235 		unsigned long tmp;
236 		tmp = byterev_8(up[0]);
237 		up[0] = byterev_8(up[1]);
238 		up[1] = tmp;
239 		break;
240 	}
241 #endif
242 	default:
243 		WARN_ON_ONCE(1);
244 	}
245 }
246 
247 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
248 					    unsigned long ea, int nb,
249 					    struct pt_regs *regs)
250 {
251 	int err = 0;
252 	unsigned long x = 0;
253 
254 	switch (nb) {
255 	case 1:
256 		err = __get_user(x, (unsigned char __user *) ea);
257 		break;
258 	case 2:
259 		err = __get_user(x, (unsigned short __user *) ea);
260 		break;
261 	case 4:
262 		err = __get_user(x, (unsigned int __user *) ea);
263 		break;
264 #ifdef __powerpc64__
265 	case 8:
266 		err = __get_user(x, (unsigned long __user *) ea);
267 		break;
268 #endif
269 	}
270 	if (!err)
271 		*dest = x;
272 	else
273 		regs->dar = ea;
274 	return err;
275 }
276 
277 /*
278  * Copy from userspace to a buffer, using the largest possible
279  * aligned accesses, up to sizeof(long).
280  */
281 static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb,
282 				       struct pt_regs *regs)
283 {
284 	int err = 0;
285 	int c;
286 
287 	for (; nb > 0; nb -= c) {
288 		c = max_align(ea);
289 		if (c > nb)
290 			c = max_align(nb);
291 		switch (c) {
292 		case 1:
293 			err = __get_user(*dest, (unsigned char __user *) ea);
294 			break;
295 		case 2:
296 			err = __get_user(*(u16 *)dest,
297 					 (unsigned short __user *) ea);
298 			break;
299 		case 4:
300 			err = __get_user(*(u32 *)dest,
301 					 (unsigned int __user *) ea);
302 			break;
303 #ifdef __powerpc64__
304 		case 8:
305 			err = __get_user(*(unsigned long *)dest,
306 					 (unsigned long __user *) ea);
307 			break;
308 #endif
309 		}
310 		if (err) {
311 			regs->dar = ea;
312 			return err;
313 		}
314 		dest += c;
315 		ea += c;
316 	}
317 	return 0;
318 }
319 
320 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
321 					      unsigned long ea, int nb,
322 					      struct pt_regs *regs)
323 {
324 	union {
325 		unsigned long ul;
326 		u8 b[sizeof(unsigned long)];
327 	} u;
328 	int i;
329 	int err;
330 
331 	u.ul = 0;
332 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
333 	err = copy_mem_in(&u.b[i], ea, nb, regs);
334 	if (!err)
335 		*dest = u.ul;
336 	return err;
337 }
338 
339 /*
340  * Read memory at address ea for nb bytes, return 0 for success
341  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
342  * If nb < sizeof(long), the result is right-justified on BE systems.
343  */
344 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
345 			      struct pt_regs *regs)
346 {
347 	if (!address_ok(regs, ea, nb))
348 		return -EFAULT;
349 	if ((ea & (nb - 1)) == 0)
350 		return read_mem_aligned(dest, ea, nb, regs);
351 	return read_mem_unaligned(dest, ea, nb, regs);
352 }
353 NOKPROBE_SYMBOL(read_mem);
354 
355 static nokprobe_inline int write_mem_aligned(unsigned long val,
356 					     unsigned long ea, int nb,
357 					     struct pt_regs *regs)
358 {
359 	int err = 0;
360 
361 	switch (nb) {
362 	case 1:
363 		err = __put_user(val, (unsigned char __user *) ea);
364 		break;
365 	case 2:
366 		err = __put_user(val, (unsigned short __user *) ea);
367 		break;
368 	case 4:
369 		err = __put_user(val, (unsigned int __user *) ea);
370 		break;
371 #ifdef __powerpc64__
372 	case 8:
373 		err = __put_user(val, (unsigned long __user *) ea);
374 		break;
375 #endif
376 	}
377 	if (err)
378 		regs->dar = ea;
379 	return err;
380 }
381 
382 /*
383  * Copy from a buffer to userspace, using the largest possible
384  * aligned accesses, up to sizeof(long).
385  */
386 static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb,
387 					struct pt_regs *regs)
388 {
389 	int err = 0;
390 	int c;
391 
392 	for (; nb > 0; nb -= c) {
393 		c = max_align(ea);
394 		if (c > nb)
395 			c = max_align(nb);
396 		switch (c) {
397 		case 1:
398 			err = __put_user(*dest, (unsigned char __user *) ea);
399 			break;
400 		case 2:
401 			err = __put_user(*(u16 *)dest,
402 					 (unsigned short __user *) ea);
403 			break;
404 		case 4:
405 			err = __put_user(*(u32 *)dest,
406 					 (unsigned int __user *) ea);
407 			break;
408 #ifdef __powerpc64__
409 		case 8:
410 			err = __put_user(*(unsigned long *)dest,
411 					 (unsigned long __user *) ea);
412 			break;
413 #endif
414 		}
415 		if (err) {
416 			regs->dar = ea;
417 			return err;
418 		}
419 		dest += c;
420 		ea += c;
421 	}
422 	return 0;
423 }
424 
425 static nokprobe_inline int write_mem_unaligned(unsigned long val,
426 					       unsigned long ea, int nb,
427 					       struct pt_regs *regs)
428 {
429 	union {
430 		unsigned long ul;
431 		u8 b[sizeof(unsigned long)];
432 	} u;
433 	int i;
434 
435 	u.ul = val;
436 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
437 	return copy_mem_out(&u.b[i], ea, nb, regs);
438 }
439 
440 /*
441  * Write memory at address ea for nb bytes, return 0 for success
442  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
443  */
444 static int write_mem(unsigned long val, unsigned long ea, int nb,
445 			       struct pt_regs *regs)
446 {
447 	if (!address_ok(regs, ea, nb))
448 		return -EFAULT;
449 	if ((ea & (nb - 1)) == 0)
450 		return write_mem_aligned(val, ea, nb, regs);
451 	return write_mem_unaligned(val, ea, nb, regs);
452 }
453 NOKPROBE_SYMBOL(write_mem);
454 
455 #ifdef CONFIG_PPC_FPU
456 /*
457  * These access either the real FP register or the image in the
458  * thread_struct, depending on regs->msr & MSR_FP.
459  */
460 static int do_fp_load(struct instruction_op *op, unsigned long ea,
461 		      struct pt_regs *regs, bool cross_endian)
462 {
463 	int err, rn, nb;
464 	union {
465 		int i;
466 		unsigned int u;
467 		float f;
468 		double d[2];
469 		unsigned long l[2];
470 		u8 b[2 * sizeof(double)];
471 	} u;
472 
473 	nb = GETSIZE(op->type);
474 	if (!address_ok(regs, ea, nb))
475 		return -EFAULT;
476 	rn = op->reg;
477 	err = copy_mem_in(u.b, ea, nb, regs);
478 	if (err)
479 		return err;
480 	if (unlikely(cross_endian)) {
481 		do_byte_reverse(u.b, min(nb, 8));
482 		if (nb == 16)
483 			do_byte_reverse(&u.b[8], 8);
484 	}
485 	preempt_disable();
486 	if (nb == 4) {
487 		if (op->type & FPCONV)
488 			conv_sp_to_dp(&u.f, &u.d[0]);
489 		else if (op->type & SIGNEXT)
490 			u.l[0] = u.i;
491 		else
492 			u.l[0] = u.u;
493 	}
494 	if (regs->msr & MSR_FP)
495 		put_fpr(rn, &u.d[0]);
496 	else
497 		current->thread.TS_FPR(rn) = u.l[0];
498 	if (nb == 16) {
499 		/* lfdp */
500 		rn |= 1;
501 		if (regs->msr & MSR_FP)
502 			put_fpr(rn, &u.d[1]);
503 		else
504 			current->thread.TS_FPR(rn) = u.l[1];
505 	}
506 	preempt_enable();
507 	return 0;
508 }
509 NOKPROBE_SYMBOL(do_fp_load);
510 
511 static int do_fp_store(struct instruction_op *op, unsigned long ea,
512 		       struct pt_regs *regs, bool cross_endian)
513 {
514 	int rn, nb;
515 	union {
516 		unsigned int u;
517 		float f;
518 		double d[2];
519 		unsigned long l[2];
520 		u8 b[2 * sizeof(double)];
521 	} u;
522 
523 	nb = GETSIZE(op->type);
524 	if (!address_ok(regs, ea, nb))
525 		return -EFAULT;
526 	rn = op->reg;
527 	preempt_disable();
528 	if (regs->msr & MSR_FP)
529 		get_fpr(rn, &u.d[0]);
530 	else
531 		u.l[0] = current->thread.TS_FPR(rn);
532 	if (nb == 4) {
533 		if (op->type & FPCONV)
534 			conv_dp_to_sp(&u.d[0], &u.f);
535 		else
536 			u.u = u.l[0];
537 	}
538 	if (nb == 16) {
539 		rn |= 1;
540 		if (regs->msr & MSR_FP)
541 			get_fpr(rn, &u.d[1]);
542 		else
543 			u.l[1] = current->thread.TS_FPR(rn);
544 	}
545 	preempt_enable();
546 	if (unlikely(cross_endian)) {
547 		do_byte_reverse(u.b, min(nb, 8));
548 		if (nb == 16)
549 			do_byte_reverse(&u.b[8], 8);
550 	}
551 	return copy_mem_out(u.b, ea, nb, regs);
552 }
553 NOKPROBE_SYMBOL(do_fp_store);
554 #endif
555 
556 #ifdef CONFIG_ALTIVEC
557 /* For Altivec/VMX, no need to worry about alignment */
558 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
559 				       int size, struct pt_regs *regs,
560 				       bool cross_endian)
561 {
562 	int err;
563 	union {
564 		__vector128 v;
565 		u8 b[sizeof(__vector128)];
566 	} u = {};
567 
568 	if (!address_ok(regs, ea & ~0xfUL, 16))
569 		return -EFAULT;
570 	/* align to multiple of size */
571 	ea &= ~(size - 1);
572 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
573 	if (err)
574 		return err;
575 	if (unlikely(cross_endian))
576 		do_byte_reverse(&u.b[ea & 0xf], size);
577 	preempt_disable();
578 	if (regs->msr & MSR_VEC)
579 		put_vr(rn, &u.v);
580 	else
581 		current->thread.vr_state.vr[rn] = u.v;
582 	preempt_enable();
583 	return 0;
584 }
585 
586 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
587 					int size, struct pt_regs *regs,
588 					bool cross_endian)
589 {
590 	union {
591 		__vector128 v;
592 		u8 b[sizeof(__vector128)];
593 	} u;
594 
595 	if (!address_ok(regs, ea & ~0xfUL, 16))
596 		return -EFAULT;
597 	/* align to multiple of size */
598 	ea &= ~(size - 1);
599 
600 	preempt_disable();
601 	if (regs->msr & MSR_VEC)
602 		get_vr(rn, &u.v);
603 	else
604 		u.v = current->thread.vr_state.vr[rn];
605 	preempt_enable();
606 	if (unlikely(cross_endian))
607 		do_byte_reverse(&u.b[ea & 0xf], size);
608 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
609 }
610 #endif /* CONFIG_ALTIVEC */
611 
612 #ifdef __powerpc64__
613 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
614 				      int reg, bool cross_endian)
615 {
616 	int err;
617 
618 	if (!address_ok(regs, ea, 16))
619 		return -EFAULT;
620 	/* if aligned, should be atomic */
621 	if ((ea & 0xf) == 0) {
622 		err = do_lq(ea, &regs->gpr[reg]);
623 	} else {
624 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
625 		if (!err)
626 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
627 	}
628 	if (!err && unlikely(cross_endian))
629 		do_byte_reverse(&regs->gpr[reg], 16);
630 	return err;
631 }
632 
633 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
634 				       int reg, bool cross_endian)
635 {
636 	int err;
637 	unsigned long vals[2];
638 
639 	if (!address_ok(regs, ea, 16))
640 		return -EFAULT;
641 	vals[0] = regs->gpr[reg];
642 	vals[1] = regs->gpr[reg + 1];
643 	if (unlikely(cross_endian))
644 		do_byte_reverse(vals, 16);
645 
646 	/* if aligned, should be atomic */
647 	if ((ea & 0xf) == 0)
648 		return do_stq(ea, vals[0], vals[1]);
649 
650 	err = write_mem(vals[IS_LE], ea, 8, regs);
651 	if (!err)
652 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
653 	return err;
654 }
655 #endif /* __powerpc64 */
656 
657 #ifdef CONFIG_VSX
658 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
659 		      const void *mem, bool rev)
660 {
661 	int size, read_size;
662 	int i, j;
663 	const unsigned int *wp;
664 	const unsigned short *hp;
665 	const unsigned char *bp;
666 
667 	size = GETSIZE(op->type);
668 	reg->d[0] = reg->d[1] = 0;
669 
670 	switch (op->element_size) {
671 	case 16:
672 		/* whole vector; lxv[x] or lxvl[l] */
673 		if (size == 0)
674 			break;
675 		memcpy(reg, mem, size);
676 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
677 			rev = !rev;
678 		if (rev)
679 			do_byte_reverse(reg, 16);
680 		break;
681 	case 8:
682 		/* scalar loads, lxvd2x, lxvdsx */
683 		read_size = (size >= 8) ? 8 : size;
684 		i = IS_LE ? 8 : 8 - read_size;
685 		memcpy(&reg->b[i], mem, read_size);
686 		if (rev)
687 			do_byte_reverse(&reg->b[i], 8);
688 		if (size < 8) {
689 			if (op->type & SIGNEXT) {
690 				/* size == 4 is the only case here */
691 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
692 			} else if (op->vsx_flags & VSX_FPCONV) {
693 				preempt_disable();
694 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
695 					      &reg->dp[IS_LE]);
696 				preempt_enable();
697 			}
698 		} else {
699 			if (size == 16) {
700 				unsigned long v = *(unsigned long *)(mem + 8);
701 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
702 			} else if (op->vsx_flags & VSX_SPLAT)
703 				reg->d[IS_BE] = reg->d[IS_LE];
704 		}
705 		break;
706 	case 4:
707 		/* lxvw4x, lxvwsx */
708 		wp = mem;
709 		for (j = 0; j < size / 4; ++j) {
710 			i = IS_LE ? 3 - j : j;
711 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
712 		}
713 		if (op->vsx_flags & VSX_SPLAT) {
714 			u32 val = reg->w[IS_LE ? 3 : 0];
715 			for (; j < 4; ++j) {
716 				i = IS_LE ? 3 - j : j;
717 				reg->w[i] = val;
718 			}
719 		}
720 		break;
721 	case 2:
722 		/* lxvh8x */
723 		hp = mem;
724 		for (j = 0; j < size / 2; ++j) {
725 			i = IS_LE ? 7 - j : j;
726 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
727 		}
728 		break;
729 	case 1:
730 		/* lxvb16x */
731 		bp = mem;
732 		for (j = 0; j < size; ++j) {
733 			i = IS_LE ? 15 - j : j;
734 			reg->b[i] = *bp++;
735 		}
736 		break;
737 	}
738 }
739 EXPORT_SYMBOL_GPL(emulate_vsx_load);
740 NOKPROBE_SYMBOL(emulate_vsx_load);
741 
742 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
743 		       void *mem, bool rev)
744 {
745 	int size, write_size;
746 	int i, j;
747 	union vsx_reg buf;
748 	unsigned int *wp;
749 	unsigned short *hp;
750 	unsigned char *bp;
751 
752 	size = GETSIZE(op->type);
753 
754 	switch (op->element_size) {
755 	case 16:
756 		/* stxv, stxvx, stxvl, stxvll */
757 		if (size == 0)
758 			break;
759 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
760 			rev = !rev;
761 		if (rev) {
762 			/* reverse 16 bytes */
763 			buf.d[0] = byterev_8(reg->d[1]);
764 			buf.d[1] = byterev_8(reg->d[0]);
765 			reg = &buf;
766 		}
767 		memcpy(mem, reg, size);
768 		break;
769 	case 8:
770 		/* scalar stores, stxvd2x */
771 		write_size = (size >= 8) ? 8 : size;
772 		i = IS_LE ? 8 : 8 - write_size;
773 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
774 			buf.d[0] = buf.d[1] = 0;
775 			preempt_disable();
776 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
777 			preempt_enable();
778 			reg = &buf;
779 		}
780 		memcpy(mem, &reg->b[i], write_size);
781 		if (size == 16)
782 			memcpy(mem + 8, &reg->d[IS_BE], 8);
783 		if (unlikely(rev)) {
784 			do_byte_reverse(mem, write_size);
785 			if (size == 16)
786 				do_byte_reverse(mem + 8, 8);
787 		}
788 		break;
789 	case 4:
790 		/* stxvw4x */
791 		wp = mem;
792 		for (j = 0; j < size / 4; ++j) {
793 			i = IS_LE ? 3 - j : j;
794 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
795 		}
796 		break;
797 	case 2:
798 		/* stxvh8x */
799 		hp = mem;
800 		for (j = 0; j < size / 2; ++j) {
801 			i = IS_LE ? 7 - j : j;
802 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
803 		}
804 		break;
805 	case 1:
806 		/* stvxb16x */
807 		bp = mem;
808 		for (j = 0; j < size; ++j) {
809 			i = IS_LE ? 15 - j : j;
810 			*bp++ = reg->b[i];
811 		}
812 		break;
813 	}
814 }
815 EXPORT_SYMBOL_GPL(emulate_vsx_store);
816 NOKPROBE_SYMBOL(emulate_vsx_store);
817 
818 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
819 				       unsigned long ea, struct pt_regs *regs,
820 				       bool cross_endian)
821 {
822 	int reg = op->reg;
823 	u8 mem[16];
824 	union vsx_reg buf;
825 	int size = GETSIZE(op->type);
826 
827 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
828 		return -EFAULT;
829 
830 	emulate_vsx_load(op, &buf, mem, cross_endian);
831 	preempt_disable();
832 	if (reg < 32) {
833 		/* FP regs + extensions */
834 		if (regs->msr & MSR_FP) {
835 			load_vsrn(reg, &buf);
836 		} else {
837 			current->thread.fp_state.fpr[reg][0] = buf.d[0];
838 			current->thread.fp_state.fpr[reg][1] = buf.d[1];
839 		}
840 	} else {
841 		if (regs->msr & MSR_VEC)
842 			load_vsrn(reg, &buf);
843 		else
844 			current->thread.vr_state.vr[reg - 32] = buf.v;
845 	}
846 	preempt_enable();
847 	return 0;
848 }
849 
850 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
851 					unsigned long ea, struct pt_regs *regs,
852 					bool cross_endian)
853 {
854 	int reg = op->reg;
855 	u8 mem[16];
856 	union vsx_reg buf;
857 	int size = GETSIZE(op->type);
858 
859 	if (!address_ok(regs, ea, size))
860 		return -EFAULT;
861 
862 	preempt_disable();
863 	if (reg < 32) {
864 		/* FP regs + extensions */
865 		if (regs->msr & MSR_FP) {
866 			store_vsrn(reg, &buf);
867 		} else {
868 			buf.d[0] = current->thread.fp_state.fpr[reg][0];
869 			buf.d[1] = current->thread.fp_state.fpr[reg][1];
870 		}
871 	} else {
872 		if (regs->msr & MSR_VEC)
873 			store_vsrn(reg, &buf);
874 		else
875 			buf.v = current->thread.vr_state.vr[reg - 32];
876 	}
877 	preempt_enable();
878 	emulate_vsx_store(op, &buf, mem, cross_endian);
879 	return  copy_mem_out(mem, ea, size, regs);
880 }
881 #endif /* CONFIG_VSX */
882 
883 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
884 {
885 	int err;
886 	unsigned long i, size;
887 
888 #ifdef __powerpc64__
889 	size = ppc64_caches.l1d.block_size;
890 	if (!(regs->msr & MSR_64BIT))
891 		ea &= 0xffffffffUL;
892 #else
893 	size = L1_CACHE_BYTES;
894 #endif
895 	ea &= ~(size - 1);
896 	if (!address_ok(regs, ea, size))
897 		return -EFAULT;
898 	for (i = 0; i < size; i += sizeof(long)) {
899 		err = __put_user(0, (unsigned long __user *) (ea + i));
900 		if (err) {
901 			regs->dar = ea;
902 			return err;
903 		}
904 	}
905 	return 0;
906 }
907 NOKPROBE_SYMBOL(emulate_dcbz);
908 
909 #define __put_user_asmx(x, addr, err, op, cr)		\
910 	__asm__ __volatile__(				\
911 		"1:	" op " %2,0,%3\n"		\
912 		"	mfcr	%1\n"			\
913 		"2:\n"					\
914 		".section .fixup,\"ax\"\n"		\
915 		"3:	li	%0,%4\n"		\
916 		"	b	2b\n"			\
917 		".previous\n"				\
918 		EX_TABLE(1b, 3b)			\
919 		: "=r" (err), "=r" (cr)			\
920 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
921 
922 #define __get_user_asmx(x, addr, err, op)		\
923 	__asm__ __volatile__(				\
924 		"1:	"op" %1,0,%2\n"			\
925 		"2:\n"					\
926 		".section .fixup,\"ax\"\n"		\
927 		"3:	li	%0,%3\n"		\
928 		"	b	2b\n"			\
929 		".previous\n"				\
930 		EX_TABLE(1b, 3b)			\
931 		: "=r" (err), "=r" (x)			\
932 		: "r" (addr), "i" (-EFAULT), "0" (err))
933 
934 #define __cacheop_user_asmx(addr, err, op)		\
935 	__asm__ __volatile__(				\
936 		"1:	"op" 0,%1\n"			\
937 		"2:\n"					\
938 		".section .fixup,\"ax\"\n"		\
939 		"3:	li	%0,%3\n"		\
940 		"	b	2b\n"			\
941 		".previous\n"				\
942 		EX_TABLE(1b, 3b)			\
943 		: "=r" (err)				\
944 		: "r" (addr), "i" (-EFAULT), "0" (err))
945 
946 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
947 				    struct instruction_op *op)
948 {
949 	long val = op->val;
950 
951 	op->type |= SETCC;
952 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
953 #ifdef __powerpc64__
954 	if (!(regs->msr & MSR_64BIT))
955 		val = (int) val;
956 #endif
957 	if (val < 0)
958 		op->ccval |= 0x80000000;
959 	else if (val > 0)
960 		op->ccval |= 0x40000000;
961 	else
962 		op->ccval |= 0x20000000;
963 }
964 
965 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
966 				     struct instruction_op *op, int rd,
967 				     unsigned long val1, unsigned long val2,
968 				     unsigned long carry_in)
969 {
970 	unsigned long val = val1 + val2;
971 
972 	if (carry_in)
973 		++val;
974 	op->type = COMPUTE + SETREG + SETXER;
975 	op->reg = rd;
976 	op->val = val;
977 #ifdef __powerpc64__
978 	if (!(regs->msr & MSR_64BIT)) {
979 		val = (unsigned int) val;
980 		val1 = (unsigned int) val1;
981 	}
982 #endif
983 	op->xerval = regs->xer;
984 	if (val < val1 || (carry_in && val == val1))
985 		op->xerval |= XER_CA;
986 	else
987 		op->xerval &= ~XER_CA;
988 }
989 
990 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
991 					  struct instruction_op *op,
992 					  long v1, long v2, int crfld)
993 {
994 	unsigned int crval, shift;
995 
996 	op->type = COMPUTE + SETCC;
997 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
998 	if (v1 < v2)
999 		crval |= 8;
1000 	else if (v1 > v2)
1001 		crval |= 4;
1002 	else
1003 		crval |= 2;
1004 	shift = (7 - crfld) * 4;
1005 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1006 }
1007 
1008 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1009 					    struct instruction_op *op,
1010 					    unsigned long v1,
1011 					    unsigned long v2, int crfld)
1012 {
1013 	unsigned int crval, shift;
1014 
1015 	op->type = COMPUTE + SETCC;
1016 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1017 	if (v1 < v2)
1018 		crval |= 8;
1019 	else if (v1 > v2)
1020 		crval |= 4;
1021 	else
1022 		crval |= 2;
1023 	shift = (7 - crfld) * 4;
1024 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1025 }
1026 
1027 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1028 				    struct instruction_op *op,
1029 				    unsigned long v1, unsigned long v2)
1030 {
1031 	unsigned long long out_val, mask;
1032 	int i;
1033 
1034 	out_val = 0;
1035 	for (i = 0; i < 8; i++) {
1036 		mask = 0xffUL << (i * 8);
1037 		if ((v1 & mask) == (v2 & mask))
1038 			out_val |= mask;
1039 	}
1040 	op->val = out_val;
1041 }
1042 
1043 /*
1044  * The size parameter is used to adjust the equivalent popcnt instruction.
1045  * popcntb = 8, popcntw = 32, popcntd = 64
1046  */
1047 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1048 				      struct instruction_op *op,
1049 				      unsigned long v1, int size)
1050 {
1051 	unsigned long long out = v1;
1052 
1053 	out -= (out >> 1) & 0x5555555555555555;
1054 	out = (0x3333333333333333 & out) + (0x3333333333333333 & (out >> 2));
1055 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0f;
1056 
1057 	if (size == 8) {	/* popcntb */
1058 		op->val = out;
1059 		return;
1060 	}
1061 	out += out >> 8;
1062 	out += out >> 16;
1063 	if (size == 32) {	/* popcntw */
1064 		op->val = out & 0x0000003f0000003f;
1065 		return;
1066 	}
1067 
1068 	out = (out + (out >> 32)) & 0x7f;
1069 	op->val = out;	/* popcntd */
1070 }
1071 
1072 #ifdef CONFIG_PPC64
1073 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1074 				      struct instruction_op *op,
1075 				      unsigned long v1, unsigned long v2)
1076 {
1077 	unsigned char perm, idx;
1078 	unsigned int i;
1079 
1080 	perm = 0;
1081 	for (i = 0; i < 8; i++) {
1082 		idx = (v1 >> (i * 8)) & 0xff;
1083 		if (idx < 64)
1084 			if (v2 & PPC_BIT(idx))
1085 				perm |= 1 << i;
1086 	}
1087 	op->val = perm;
1088 }
1089 #endif /* CONFIG_PPC64 */
1090 /*
1091  * The size parameter adjusts the equivalent prty instruction.
1092  * prtyw = 32, prtyd = 64
1093  */
1094 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1095 				    struct instruction_op *op,
1096 				    unsigned long v, int size)
1097 {
1098 	unsigned long long res = v ^ (v >> 8);
1099 
1100 	res ^= res >> 16;
1101 	if (size == 32) {		/* prtyw */
1102 		op->val = res & 0x0000000100000001;
1103 		return;
1104 	}
1105 
1106 	res ^= res >> 32;
1107 	op->val = res & 1;	/*prtyd */
1108 }
1109 
1110 static nokprobe_inline int trap_compare(long v1, long v2)
1111 {
1112 	int ret = 0;
1113 
1114 	if (v1 < v2)
1115 		ret |= 0x10;
1116 	else if (v1 > v2)
1117 		ret |= 0x08;
1118 	else
1119 		ret |= 0x04;
1120 	if ((unsigned long)v1 < (unsigned long)v2)
1121 		ret |= 0x02;
1122 	else if ((unsigned long)v1 > (unsigned long)v2)
1123 		ret |= 0x01;
1124 	return ret;
1125 }
1126 
1127 /*
1128  * Elements of 32-bit rotate and mask instructions.
1129  */
1130 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1131 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1132 #ifdef __powerpc64__
1133 #define MASK64_L(mb)	(~0UL >> (mb))
1134 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1135 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1136 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1137 #else
1138 #define DATA32(x)	(x)
1139 #endif
1140 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1141 
1142 /*
1143  * Decode an instruction, and return information about it in *op
1144  * without changing *regs.
1145  * Integer arithmetic and logical instructions, branches, and barrier
1146  * instructions can be emulated just using the information in *op.
1147  *
1148  * Return value is 1 if the instruction can be emulated just by
1149  * updating *regs with the information in *op, -1 if we need the
1150  * GPRs but *regs doesn't contain the full register set, or 0
1151  * otherwise.
1152  */
1153 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1154 		  unsigned int instr)
1155 {
1156 	unsigned int opcode, ra, rb, rd, spr, u;
1157 	unsigned long int imm;
1158 	unsigned long int val, val2;
1159 	unsigned int mb, me, sh;
1160 	long ival;
1161 
1162 	op->type = COMPUTE;
1163 
1164 	opcode = instr >> 26;
1165 	switch (opcode) {
1166 	case 16:	/* bc */
1167 		op->type = BRANCH;
1168 		imm = (signed short)(instr & 0xfffc);
1169 		if ((instr & 2) == 0)
1170 			imm += regs->nip;
1171 		op->val = truncate_if_32bit(regs->msr, imm);
1172 		if (instr & 1)
1173 			op->type |= SETLK;
1174 		if (branch_taken(instr, regs, op))
1175 			op->type |= BRTAKEN;
1176 		return 1;
1177 #ifdef CONFIG_PPC64
1178 	case 17:	/* sc */
1179 		if ((instr & 0xfe2) == 2)
1180 			op->type = SYSCALL;
1181 		else
1182 			op->type = UNKNOWN;
1183 		return 0;
1184 #endif
1185 	case 18:	/* b */
1186 		op->type = BRANCH | BRTAKEN;
1187 		imm = instr & 0x03fffffc;
1188 		if (imm & 0x02000000)
1189 			imm -= 0x04000000;
1190 		if ((instr & 2) == 0)
1191 			imm += regs->nip;
1192 		op->val = truncate_if_32bit(regs->msr, imm);
1193 		if (instr & 1)
1194 			op->type |= SETLK;
1195 		return 1;
1196 	case 19:
1197 		switch ((instr >> 1) & 0x3ff) {
1198 		case 0:		/* mcrf */
1199 			op->type = COMPUTE + SETCC;
1200 			rd = 7 - ((instr >> 23) & 0x7);
1201 			ra = 7 - ((instr >> 18) & 0x7);
1202 			rd *= 4;
1203 			ra *= 4;
1204 			val = (regs->ccr >> ra) & 0xf;
1205 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1206 			return 1;
1207 
1208 		case 16:	/* bclr */
1209 		case 528:	/* bcctr */
1210 			op->type = BRANCH;
1211 			imm = (instr & 0x400)? regs->ctr: regs->link;
1212 			op->val = truncate_if_32bit(regs->msr, imm);
1213 			if (instr & 1)
1214 				op->type |= SETLK;
1215 			if (branch_taken(instr, regs, op))
1216 				op->type |= BRTAKEN;
1217 			return 1;
1218 
1219 		case 18:	/* rfid, scary */
1220 			if (regs->msr & MSR_PR)
1221 				goto priv;
1222 			op->type = RFI;
1223 			return 0;
1224 
1225 		case 150:	/* isync */
1226 			op->type = BARRIER | BARRIER_ISYNC;
1227 			return 1;
1228 
1229 		case 33:	/* crnor */
1230 		case 129:	/* crandc */
1231 		case 193:	/* crxor */
1232 		case 225:	/* crnand */
1233 		case 257:	/* crand */
1234 		case 289:	/* creqv */
1235 		case 417:	/* crorc */
1236 		case 449:	/* cror */
1237 			op->type = COMPUTE + SETCC;
1238 			ra = (instr >> 16) & 0x1f;
1239 			rb = (instr >> 11) & 0x1f;
1240 			rd = (instr >> 21) & 0x1f;
1241 			ra = (regs->ccr >> (31 - ra)) & 1;
1242 			rb = (regs->ccr >> (31 - rb)) & 1;
1243 			val = (instr >> (6 + ra * 2 + rb)) & 1;
1244 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1245 				(val << (31 - rd));
1246 			return 1;
1247 		}
1248 		break;
1249 	case 31:
1250 		switch ((instr >> 1) & 0x3ff) {
1251 		case 598:	/* sync */
1252 			op->type = BARRIER + BARRIER_SYNC;
1253 #ifdef __powerpc64__
1254 			switch ((instr >> 21) & 3) {
1255 			case 1:		/* lwsync */
1256 				op->type = BARRIER + BARRIER_LWSYNC;
1257 				break;
1258 			case 2:		/* ptesync */
1259 				op->type = BARRIER + BARRIER_PTESYNC;
1260 				break;
1261 			}
1262 #endif
1263 			return 1;
1264 
1265 		case 854:	/* eieio */
1266 			op->type = BARRIER + BARRIER_EIEIO;
1267 			return 1;
1268 		}
1269 		break;
1270 	}
1271 
1272 	/* Following cases refer to regs->gpr[], so we need all regs */
1273 	if (!FULL_REGS(regs))
1274 		return -1;
1275 
1276 	rd = (instr >> 21) & 0x1f;
1277 	ra = (instr >> 16) & 0x1f;
1278 	rb = (instr >> 11) & 0x1f;
1279 
1280 	switch (opcode) {
1281 #ifdef __powerpc64__
1282 	case 2:		/* tdi */
1283 		if (rd & trap_compare(regs->gpr[ra], (short) instr))
1284 			goto trap;
1285 		return 1;
1286 #endif
1287 	case 3:		/* twi */
1288 		if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
1289 			goto trap;
1290 		return 1;
1291 
1292 	case 7:		/* mulli */
1293 		op->val = regs->gpr[ra] * (short) instr;
1294 		goto compute_done;
1295 
1296 	case 8:		/* subfic */
1297 		imm = (short) instr;
1298 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1299 		return 1;
1300 
1301 	case 10:	/* cmpli */
1302 		imm = (unsigned short) instr;
1303 		val = regs->gpr[ra];
1304 #ifdef __powerpc64__
1305 		if ((rd & 1) == 0)
1306 			val = (unsigned int) val;
1307 #endif
1308 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1309 		return 1;
1310 
1311 	case 11:	/* cmpi */
1312 		imm = (short) instr;
1313 		val = regs->gpr[ra];
1314 #ifdef __powerpc64__
1315 		if ((rd & 1) == 0)
1316 			val = (int) val;
1317 #endif
1318 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1319 		return 1;
1320 
1321 	case 12:	/* addic */
1322 		imm = (short) instr;
1323 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1324 		return 1;
1325 
1326 	case 13:	/* addic. */
1327 		imm = (short) instr;
1328 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1329 		set_cr0(regs, op);
1330 		return 1;
1331 
1332 	case 14:	/* addi */
1333 		imm = (short) instr;
1334 		if (ra)
1335 			imm += regs->gpr[ra];
1336 		op->val = imm;
1337 		goto compute_done;
1338 
1339 	case 15:	/* addis */
1340 		imm = ((short) instr) << 16;
1341 		if (ra)
1342 			imm += regs->gpr[ra];
1343 		op->val = imm;
1344 		goto compute_done;
1345 
1346 	case 19:
1347 		if (((instr >> 1) & 0x1f) == 2) {
1348 			/* addpcis */
1349 			imm = (short) (instr & 0xffc1);	/* d0 + d2 fields */
1350 			imm |= (instr >> 15) & 0x3e;	/* d1 field */
1351 			op->val = regs->nip + (imm << 16) + 4;
1352 			goto compute_done;
1353 		}
1354 		op->type = UNKNOWN;
1355 		return 0;
1356 
1357 	case 20:	/* rlwimi */
1358 		mb = (instr >> 6) & 0x1f;
1359 		me = (instr >> 1) & 0x1f;
1360 		val = DATA32(regs->gpr[rd]);
1361 		imm = MASK32(mb, me);
1362 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1363 		goto logical_done;
1364 
1365 	case 21:	/* rlwinm */
1366 		mb = (instr >> 6) & 0x1f;
1367 		me = (instr >> 1) & 0x1f;
1368 		val = DATA32(regs->gpr[rd]);
1369 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1370 		goto logical_done;
1371 
1372 	case 23:	/* rlwnm */
1373 		mb = (instr >> 6) & 0x1f;
1374 		me = (instr >> 1) & 0x1f;
1375 		rb = regs->gpr[rb] & 0x1f;
1376 		val = DATA32(regs->gpr[rd]);
1377 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1378 		goto logical_done;
1379 
1380 	case 24:	/* ori */
1381 		op->val = regs->gpr[rd] | (unsigned short) instr;
1382 		goto logical_done_nocc;
1383 
1384 	case 25:	/* oris */
1385 		imm = (unsigned short) instr;
1386 		op->val = regs->gpr[rd] | (imm << 16);
1387 		goto logical_done_nocc;
1388 
1389 	case 26:	/* xori */
1390 		op->val = regs->gpr[rd] ^ (unsigned short) instr;
1391 		goto logical_done_nocc;
1392 
1393 	case 27:	/* xoris */
1394 		imm = (unsigned short) instr;
1395 		op->val = regs->gpr[rd] ^ (imm << 16);
1396 		goto logical_done_nocc;
1397 
1398 	case 28:	/* andi. */
1399 		op->val = regs->gpr[rd] & (unsigned short) instr;
1400 		set_cr0(regs, op);
1401 		goto logical_done_nocc;
1402 
1403 	case 29:	/* andis. */
1404 		imm = (unsigned short) instr;
1405 		op->val = regs->gpr[rd] & (imm << 16);
1406 		set_cr0(regs, op);
1407 		goto logical_done_nocc;
1408 
1409 #ifdef __powerpc64__
1410 	case 30:	/* rld* */
1411 		mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
1412 		val = regs->gpr[rd];
1413 		if ((instr & 0x10) == 0) {
1414 			sh = rb | ((instr & 2) << 4);
1415 			val = ROTATE(val, sh);
1416 			switch ((instr >> 2) & 3) {
1417 			case 0:		/* rldicl */
1418 				val &= MASK64_L(mb);
1419 				break;
1420 			case 1:		/* rldicr */
1421 				val &= MASK64_R(mb);
1422 				break;
1423 			case 2:		/* rldic */
1424 				val &= MASK64(mb, 63 - sh);
1425 				break;
1426 			case 3:		/* rldimi */
1427 				imm = MASK64(mb, 63 - sh);
1428 				val = (regs->gpr[ra] & ~imm) |
1429 					(val & imm);
1430 			}
1431 			op->val = val;
1432 			goto logical_done;
1433 		} else {
1434 			sh = regs->gpr[rb] & 0x3f;
1435 			val = ROTATE(val, sh);
1436 			switch ((instr >> 1) & 7) {
1437 			case 0:		/* rldcl */
1438 				op->val = val & MASK64_L(mb);
1439 				goto logical_done;
1440 			case 1:		/* rldcr */
1441 				op->val = val & MASK64_R(mb);
1442 				goto logical_done;
1443 			}
1444 		}
1445 #endif
1446 		op->type = UNKNOWN;	/* illegal instruction */
1447 		return 0;
1448 
1449 	case 31:
1450 		/* isel occupies 32 minor opcodes */
1451 		if (((instr >> 1) & 0x1f) == 15) {
1452 			mb = (instr >> 6) & 0x1f; /* bc field */
1453 			val = (regs->ccr >> (31 - mb)) & 1;
1454 			val2 = (ra) ? regs->gpr[ra] : 0;
1455 
1456 			op->val = (val) ? val2 : regs->gpr[rb];
1457 			goto compute_done;
1458 		}
1459 
1460 		switch ((instr >> 1) & 0x3ff) {
1461 		case 4:		/* tw */
1462 			if (rd == 0x1f ||
1463 			    (rd & trap_compare((int)regs->gpr[ra],
1464 					       (int)regs->gpr[rb])))
1465 				goto trap;
1466 			return 1;
1467 #ifdef __powerpc64__
1468 		case 68:	/* td */
1469 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1470 				goto trap;
1471 			return 1;
1472 #endif
1473 		case 83:	/* mfmsr */
1474 			if (regs->msr & MSR_PR)
1475 				goto priv;
1476 			op->type = MFMSR;
1477 			op->reg = rd;
1478 			return 0;
1479 		case 146:	/* mtmsr */
1480 			if (regs->msr & MSR_PR)
1481 				goto priv;
1482 			op->type = MTMSR;
1483 			op->reg = rd;
1484 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1485 			return 0;
1486 #ifdef CONFIG_PPC64
1487 		case 178:	/* mtmsrd */
1488 			if (regs->msr & MSR_PR)
1489 				goto priv;
1490 			op->type = MTMSR;
1491 			op->reg = rd;
1492 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1493 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1494 			imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1495 			op->val = imm;
1496 			return 0;
1497 #endif
1498 
1499 		case 19:	/* mfcr */
1500 			imm = 0xffffffffUL;
1501 			if ((instr >> 20) & 1) {
1502 				imm = 0xf0000000UL;
1503 				for (sh = 0; sh < 8; ++sh) {
1504 					if (instr & (0x80000 >> sh))
1505 						break;
1506 					imm >>= 4;
1507 				}
1508 			}
1509 			op->val = regs->ccr & imm;
1510 			goto compute_done;
1511 
1512 		case 144:	/* mtcrf */
1513 			op->type = COMPUTE + SETCC;
1514 			imm = 0xf0000000UL;
1515 			val = regs->gpr[rd];
1516 			op->ccval = regs->ccr;
1517 			for (sh = 0; sh < 8; ++sh) {
1518 				if (instr & (0x80000 >> sh))
1519 					op->ccval = (op->ccval & ~imm) |
1520 						(val & imm);
1521 				imm >>= 4;
1522 			}
1523 			return 1;
1524 
1525 		case 339:	/* mfspr */
1526 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1527 			op->type = MFSPR;
1528 			op->reg = rd;
1529 			op->spr = spr;
1530 			if (spr == SPRN_XER || spr == SPRN_LR ||
1531 			    spr == SPRN_CTR)
1532 				return 1;
1533 			return 0;
1534 
1535 		case 467:	/* mtspr */
1536 			spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1537 			op->type = MTSPR;
1538 			op->val = regs->gpr[rd];
1539 			op->spr = spr;
1540 			if (spr == SPRN_XER || spr == SPRN_LR ||
1541 			    spr == SPRN_CTR)
1542 				return 1;
1543 			return 0;
1544 
1545 /*
1546  * Compare instructions
1547  */
1548 		case 0:	/* cmp */
1549 			val = regs->gpr[ra];
1550 			val2 = regs->gpr[rb];
1551 #ifdef __powerpc64__
1552 			if ((rd & 1) == 0) {
1553 				/* word (32-bit) compare */
1554 				val = (int) val;
1555 				val2 = (int) val2;
1556 			}
1557 #endif
1558 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1559 			return 1;
1560 
1561 		case 32:	/* cmpl */
1562 			val = regs->gpr[ra];
1563 			val2 = regs->gpr[rb];
1564 #ifdef __powerpc64__
1565 			if ((rd & 1) == 0) {
1566 				/* word (32-bit) compare */
1567 				val = (unsigned int) val;
1568 				val2 = (unsigned int) val2;
1569 			}
1570 #endif
1571 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1572 			return 1;
1573 
1574 		case 508: /* cmpb */
1575 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1576 			goto logical_done_nocc;
1577 
1578 /*
1579  * Arithmetic instructions
1580  */
1581 		case 8:	/* subfc */
1582 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1583 				       regs->gpr[rb], 1);
1584 			goto arith_done;
1585 #ifdef __powerpc64__
1586 		case 9:	/* mulhdu */
1587 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1588 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1589 			goto arith_done;
1590 #endif
1591 		case 10:	/* addc */
1592 			add_with_carry(regs, op, rd, regs->gpr[ra],
1593 				       regs->gpr[rb], 0);
1594 			goto arith_done;
1595 
1596 		case 11:	/* mulhwu */
1597 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1598 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1599 			goto arith_done;
1600 
1601 		case 40:	/* subf */
1602 			op->val = regs->gpr[rb] - regs->gpr[ra];
1603 			goto arith_done;
1604 #ifdef __powerpc64__
1605 		case 73:	/* mulhd */
1606 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1607 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1608 			goto arith_done;
1609 #endif
1610 		case 75:	/* mulhw */
1611 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1612 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1613 			goto arith_done;
1614 
1615 		case 104:	/* neg */
1616 			op->val = -regs->gpr[ra];
1617 			goto arith_done;
1618 
1619 		case 136:	/* subfe */
1620 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1621 				       regs->gpr[rb], regs->xer & XER_CA);
1622 			goto arith_done;
1623 
1624 		case 138:	/* adde */
1625 			add_with_carry(regs, op, rd, regs->gpr[ra],
1626 				       regs->gpr[rb], regs->xer & XER_CA);
1627 			goto arith_done;
1628 
1629 		case 200:	/* subfze */
1630 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1631 				       regs->xer & XER_CA);
1632 			goto arith_done;
1633 
1634 		case 202:	/* addze */
1635 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1636 				       regs->xer & XER_CA);
1637 			goto arith_done;
1638 
1639 		case 232:	/* subfme */
1640 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1641 				       regs->xer & XER_CA);
1642 			goto arith_done;
1643 #ifdef __powerpc64__
1644 		case 233:	/* mulld */
1645 			op->val = regs->gpr[ra] * regs->gpr[rb];
1646 			goto arith_done;
1647 #endif
1648 		case 234:	/* addme */
1649 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1650 				       regs->xer & XER_CA);
1651 			goto arith_done;
1652 
1653 		case 235:	/* mullw */
1654 			op->val = (long)(int) regs->gpr[ra] *
1655 				(int) regs->gpr[rb];
1656 
1657 			goto arith_done;
1658 
1659 		case 266:	/* add */
1660 			op->val = regs->gpr[ra] + regs->gpr[rb];
1661 			goto arith_done;
1662 #ifdef __powerpc64__
1663 		case 457:	/* divdu */
1664 			op->val = regs->gpr[ra] / regs->gpr[rb];
1665 			goto arith_done;
1666 #endif
1667 		case 459:	/* divwu */
1668 			op->val = (unsigned int) regs->gpr[ra] /
1669 				(unsigned int) regs->gpr[rb];
1670 			goto arith_done;
1671 #ifdef __powerpc64__
1672 		case 489:	/* divd */
1673 			op->val = (long int) regs->gpr[ra] /
1674 				(long int) regs->gpr[rb];
1675 			goto arith_done;
1676 #endif
1677 		case 491:	/* divw */
1678 			op->val = (int) regs->gpr[ra] /
1679 				(int) regs->gpr[rb];
1680 			goto arith_done;
1681 
1682 
1683 /*
1684  * Logical instructions
1685  */
1686 		case 26:	/* cntlzw */
1687 			op->val = __builtin_clz((unsigned int) regs->gpr[rd]);
1688 			goto logical_done;
1689 #ifdef __powerpc64__
1690 		case 58:	/* cntlzd */
1691 			op->val = __builtin_clzl(regs->gpr[rd]);
1692 			goto logical_done;
1693 #endif
1694 		case 28:	/* and */
1695 			op->val = regs->gpr[rd] & regs->gpr[rb];
1696 			goto logical_done;
1697 
1698 		case 60:	/* andc */
1699 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
1700 			goto logical_done;
1701 
1702 		case 122:	/* popcntb */
1703 			do_popcnt(regs, op, regs->gpr[rd], 8);
1704 			goto logical_done_nocc;
1705 
1706 		case 124:	/* nor */
1707 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
1708 			goto logical_done;
1709 
1710 		case 154:	/* prtyw */
1711 			do_prty(regs, op, regs->gpr[rd], 32);
1712 			goto logical_done_nocc;
1713 
1714 		case 186:	/* prtyd */
1715 			do_prty(regs, op, regs->gpr[rd], 64);
1716 			goto logical_done_nocc;
1717 #ifdef CONFIG_PPC64
1718 		case 252:	/* bpermd */
1719 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
1720 			goto logical_done_nocc;
1721 #endif
1722 		case 284:	/* xor */
1723 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1724 			goto logical_done;
1725 
1726 		case 316:	/* xor */
1727 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
1728 			goto logical_done;
1729 
1730 		case 378:	/* popcntw */
1731 			do_popcnt(regs, op, regs->gpr[rd], 32);
1732 			goto logical_done_nocc;
1733 
1734 		case 412:	/* orc */
1735 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
1736 			goto logical_done;
1737 
1738 		case 444:	/* or */
1739 			op->val = regs->gpr[rd] | regs->gpr[rb];
1740 			goto logical_done;
1741 
1742 		case 476:	/* nand */
1743 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
1744 			goto logical_done;
1745 #ifdef CONFIG_PPC64
1746 		case 506:	/* popcntd */
1747 			do_popcnt(regs, op, regs->gpr[rd], 64);
1748 			goto logical_done_nocc;
1749 #endif
1750 		case 922:	/* extsh */
1751 			op->val = (signed short) regs->gpr[rd];
1752 			goto logical_done;
1753 
1754 		case 954:	/* extsb */
1755 			op->val = (signed char) regs->gpr[rd];
1756 			goto logical_done;
1757 #ifdef __powerpc64__
1758 		case 986:	/* extsw */
1759 			op->val = (signed int) regs->gpr[rd];
1760 			goto logical_done;
1761 #endif
1762 
1763 /*
1764  * Shift instructions
1765  */
1766 		case 24:	/* slw */
1767 			sh = regs->gpr[rb] & 0x3f;
1768 			if (sh < 32)
1769 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
1770 			else
1771 				op->val = 0;
1772 			goto logical_done;
1773 
1774 		case 536:	/* srw */
1775 			sh = regs->gpr[rb] & 0x3f;
1776 			if (sh < 32)
1777 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1778 			else
1779 				op->val = 0;
1780 			goto logical_done;
1781 
1782 		case 792:	/* sraw */
1783 			op->type = COMPUTE + SETREG + SETXER;
1784 			sh = regs->gpr[rb] & 0x3f;
1785 			ival = (signed int) regs->gpr[rd];
1786 			op->val = ival >> (sh < 32 ? sh : 31);
1787 			op->xerval = regs->xer;
1788 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1789 				op->xerval |= XER_CA;
1790 			else
1791 				op->xerval &= ~XER_CA;
1792 			goto logical_done;
1793 
1794 		case 824:	/* srawi */
1795 			op->type = COMPUTE + SETREG + SETXER;
1796 			sh = rb;
1797 			ival = (signed int) regs->gpr[rd];
1798 			op->val = ival >> sh;
1799 			op->xerval = regs->xer;
1800 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1801 				op->xerval |= XER_CA;
1802 			else
1803 				op->xerval &= ~XER_CA;
1804 			goto logical_done;
1805 
1806 #ifdef __powerpc64__
1807 		case 27:	/* sld */
1808 			sh = regs->gpr[rb] & 0x7f;
1809 			if (sh < 64)
1810 				op->val = regs->gpr[rd] << sh;
1811 			else
1812 				op->val = 0;
1813 			goto logical_done;
1814 
1815 		case 539:	/* srd */
1816 			sh = regs->gpr[rb] & 0x7f;
1817 			if (sh < 64)
1818 				op->val = regs->gpr[rd] >> sh;
1819 			else
1820 				op->val = 0;
1821 			goto logical_done;
1822 
1823 		case 794:	/* srad */
1824 			op->type = COMPUTE + SETREG + SETXER;
1825 			sh = regs->gpr[rb] & 0x7f;
1826 			ival = (signed long int) regs->gpr[rd];
1827 			op->val = ival >> (sh < 64 ? sh : 63);
1828 			op->xerval = regs->xer;
1829 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1830 				op->xerval |= XER_CA;
1831 			else
1832 				op->xerval &= ~XER_CA;
1833 			goto logical_done;
1834 
1835 		case 826:	/* sradi with sh_5 = 0 */
1836 		case 827:	/* sradi with sh_5 = 1 */
1837 			op->type = COMPUTE + SETREG + SETXER;
1838 			sh = rb | ((instr & 2) << 4);
1839 			ival = (signed long int) regs->gpr[rd];
1840 			op->val = ival >> sh;
1841 			op->xerval = regs->xer;
1842 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1843 				op->xerval |= XER_CA;
1844 			else
1845 				op->xerval &= ~XER_CA;
1846 			goto logical_done;
1847 #endif /* __powerpc64__ */
1848 
1849 /*
1850  * Cache instructions
1851  */
1852 		case 54:	/* dcbst */
1853 			op->type = MKOP(CACHEOP, DCBST, 0);
1854 			op->ea = xform_ea(instr, regs);
1855 			return 0;
1856 
1857 		case 86:	/* dcbf */
1858 			op->type = MKOP(CACHEOP, DCBF, 0);
1859 			op->ea = xform_ea(instr, regs);
1860 			return 0;
1861 
1862 		case 246:	/* dcbtst */
1863 			op->type = MKOP(CACHEOP, DCBTST, 0);
1864 			op->ea = xform_ea(instr, regs);
1865 			op->reg = rd;
1866 			return 0;
1867 
1868 		case 278:	/* dcbt */
1869 			op->type = MKOP(CACHEOP, DCBTST, 0);
1870 			op->ea = xform_ea(instr, regs);
1871 			op->reg = rd;
1872 			return 0;
1873 
1874 		case 982:	/* icbi */
1875 			op->type = MKOP(CACHEOP, ICBI, 0);
1876 			op->ea = xform_ea(instr, regs);
1877 			return 0;
1878 
1879 		case 1014:	/* dcbz */
1880 			op->type = MKOP(CACHEOP, DCBZ, 0);
1881 			op->ea = xform_ea(instr, regs);
1882 			return 0;
1883 		}
1884 		break;
1885 	}
1886 
1887 /*
1888  * Loads and stores.
1889  */
1890 	op->type = UNKNOWN;
1891 	op->update_reg = ra;
1892 	op->reg = rd;
1893 	op->val = regs->gpr[rd];
1894 	u = (instr >> 20) & UPDATE;
1895 	op->vsx_flags = 0;
1896 
1897 	switch (opcode) {
1898 	case 31:
1899 		u = instr & UPDATE;
1900 		op->ea = xform_ea(instr, regs);
1901 		switch ((instr >> 1) & 0x3ff) {
1902 		case 20:	/* lwarx */
1903 			op->type = MKOP(LARX, 0, 4);
1904 			break;
1905 
1906 		case 150:	/* stwcx. */
1907 			op->type = MKOP(STCX, 0, 4);
1908 			break;
1909 
1910 #ifdef __powerpc64__
1911 		case 84:	/* ldarx */
1912 			op->type = MKOP(LARX, 0, 8);
1913 			break;
1914 
1915 		case 214:	/* stdcx. */
1916 			op->type = MKOP(STCX, 0, 8);
1917 			break;
1918 
1919 		case 52:	/* lbarx */
1920 			op->type = MKOP(LARX, 0, 1);
1921 			break;
1922 
1923 		case 694:	/* stbcx. */
1924 			op->type = MKOP(STCX, 0, 1);
1925 			break;
1926 
1927 		case 116:	/* lharx */
1928 			op->type = MKOP(LARX, 0, 2);
1929 			break;
1930 
1931 		case 726:	/* sthcx. */
1932 			op->type = MKOP(STCX, 0, 2);
1933 			break;
1934 
1935 		case 276:	/* lqarx */
1936 			if (!((rd & 1) || rd == ra || rd == rb))
1937 				op->type = MKOP(LARX, 0, 16);
1938 			break;
1939 
1940 		case 182:	/* stqcx. */
1941 			if (!(rd & 1))
1942 				op->type = MKOP(STCX, 0, 16);
1943 			break;
1944 #endif
1945 
1946 		case 23:	/* lwzx */
1947 		case 55:	/* lwzux */
1948 			op->type = MKOP(LOAD, u, 4);
1949 			break;
1950 
1951 		case 87:	/* lbzx */
1952 		case 119:	/* lbzux */
1953 			op->type = MKOP(LOAD, u, 1);
1954 			break;
1955 
1956 #ifdef CONFIG_ALTIVEC
1957 		/*
1958 		 * Note: for the load/store vector element instructions,
1959 		 * bits of the EA say which field of the VMX register to use.
1960 		 */
1961 		case 7:		/* lvebx */
1962 			op->type = MKOP(LOAD_VMX, 0, 1);
1963 			op->element_size = 1;
1964 			break;
1965 
1966 		case 39:	/* lvehx */
1967 			op->type = MKOP(LOAD_VMX, 0, 2);
1968 			op->element_size = 2;
1969 			break;
1970 
1971 		case 71:	/* lvewx */
1972 			op->type = MKOP(LOAD_VMX, 0, 4);
1973 			op->element_size = 4;
1974 			break;
1975 
1976 		case 103:	/* lvx */
1977 		case 359:	/* lvxl */
1978 			op->type = MKOP(LOAD_VMX, 0, 16);
1979 			op->element_size = 16;
1980 			break;
1981 
1982 		case 135:	/* stvebx */
1983 			op->type = MKOP(STORE_VMX, 0, 1);
1984 			op->element_size = 1;
1985 			break;
1986 
1987 		case 167:	/* stvehx */
1988 			op->type = MKOP(STORE_VMX, 0, 2);
1989 			op->element_size = 2;
1990 			break;
1991 
1992 		case 199:	/* stvewx */
1993 			op->type = MKOP(STORE_VMX, 0, 4);
1994 			op->element_size = 4;
1995 			break;
1996 
1997 		case 231:	/* stvx */
1998 		case 487:	/* stvxl */
1999 			op->type = MKOP(STORE_VMX, 0, 16);
2000 			break;
2001 #endif /* CONFIG_ALTIVEC */
2002 
2003 #ifdef __powerpc64__
2004 		case 21:	/* ldx */
2005 		case 53:	/* ldux */
2006 			op->type = MKOP(LOAD, u, 8);
2007 			break;
2008 
2009 		case 149:	/* stdx */
2010 		case 181:	/* stdux */
2011 			op->type = MKOP(STORE, u, 8);
2012 			break;
2013 #endif
2014 
2015 		case 151:	/* stwx */
2016 		case 183:	/* stwux */
2017 			op->type = MKOP(STORE, u, 4);
2018 			break;
2019 
2020 		case 215:	/* stbx */
2021 		case 247:	/* stbux */
2022 			op->type = MKOP(STORE, u, 1);
2023 			break;
2024 
2025 		case 279:	/* lhzx */
2026 		case 311:	/* lhzux */
2027 			op->type = MKOP(LOAD, u, 2);
2028 			break;
2029 
2030 #ifdef __powerpc64__
2031 		case 341:	/* lwax */
2032 		case 373:	/* lwaux */
2033 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2034 			break;
2035 #endif
2036 
2037 		case 343:	/* lhax */
2038 		case 375:	/* lhaux */
2039 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2040 			break;
2041 
2042 		case 407:	/* sthx */
2043 		case 439:	/* sthux */
2044 			op->type = MKOP(STORE, u, 2);
2045 			break;
2046 
2047 #ifdef __powerpc64__
2048 		case 532:	/* ldbrx */
2049 			op->type = MKOP(LOAD, BYTEREV, 8);
2050 			break;
2051 
2052 #endif
2053 		case 533:	/* lswx */
2054 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2055 			break;
2056 
2057 		case 534:	/* lwbrx */
2058 			op->type = MKOP(LOAD, BYTEREV, 4);
2059 			break;
2060 
2061 		case 597:	/* lswi */
2062 			if (rb == 0)
2063 				rb = 32;	/* # bytes to load */
2064 			op->type = MKOP(LOAD_MULTI, 0, rb);
2065 			op->ea = ra ? regs->gpr[ra] : 0;
2066 			break;
2067 
2068 #ifdef CONFIG_PPC_FPU
2069 		case 535:	/* lfsx */
2070 		case 567:	/* lfsux */
2071 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2072 			break;
2073 
2074 		case 599:	/* lfdx */
2075 		case 631:	/* lfdux */
2076 			op->type = MKOP(LOAD_FP, u, 8);
2077 			break;
2078 
2079 		case 663:	/* stfsx */
2080 		case 695:	/* stfsux */
2081 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2082 			break;
2083 
2084 		case 727:	/* stfdx */
2085 		case 759:	/* stfdux */
2086 			op->type = MKOP(STORE_FP, u, 8);
2087 			break;
2088 
2089 #ifdef __powerpc64__
2090 		case 791:	/* lfdpx */
2091 			op->type = MKOP(LOAD_FP, 0, 16);
2092 			break;
2093 
2094 		case 855:	/* lfiwax */
2095 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2096 			break;
2097 
2098 		case 887:	/* lfiwzx */
2099 			op->type = MKOP(LOAD_FP, 0, 4);
2100 			break;
2101 
2102 		case 919:	/* stfdpx */
2103 			op->type = MKOP(STORE_FP, 0, 16);
2104 			break;
2105 
2106 		case 983:	/* stfiwx */
2107 			op->type = MKOP(STORE_FP, 0, 4);
2108 			break;
2109 #endif /* __powerpc64 */
2110 #endif /* CONFIG_PPC_FPU */
2111 
2112 #ifdef __powerpc64__
2113 		case 660:	/* stdbrx */
2114 			op->type = MKOP(STORE, BYTEREV, 8);
2115 			op->val = byterev_8(regs->gpr[rd]);
2116 			break;
2117 
2118 #endif
2119 		case 661:	/* stswx */
2120 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2121 			break;
2122 
2123 		case 662:	/* stwbrx */
2124 			op->type = MKOP(STORE, BYTEREV, 4);
2125 			op->val = byterev_4(regs->gpr[rd]);
2126 			break;
2127 
2128 		case 725:	/* stswi */
2129 			if (rb == 0)
2130 				rb = 32;	/* # bytes to store */
2131 			op->type = MKOP(STORE_MULTI, 0, rb);
2132 			op->ea = ra ? regs->gpr[ra] : 0;
2133 			break;
2134 
2135 		case 790:	/* lhbrx */
2136 			op->type = MKOP(LOAD, BYTEREV, 2);
2137 			break;
2138 
2139 		case 918:	/* sthbrx */
2140 			op->type = MKOP(STORE, BYTEREV, 2);
2141 			op->val = byterev_2(regs->gpr[rd]);
2142 			break;
2143 
2144 #ifdef CONFIG_VSX
2145 		case 12:	/* lxsiwzx */
2146 			op->reg = rd | ((instr & 1) << 5);
2147 			op->type = MKOP(LOAD_VSX, 0, 4);
2148 			op->element_size = 8;
2149 			break;
2150 
2151 		case 76:	/* lxsiwax */
2152 			op->reg = rd | ((instr & 1) << 5);
2153 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2154 			op->element_size = 8;
2155 			break;
2156 
2157 		case 140:	/* stxsiwx */
2158 			op->reg = rd | ((instr & 1) << 5);
2159 			op->type = MKOP(STORE_VSX, 0, 4);
2160 			op->element_size = 8;
2161 			break;
2162 
2163 		case 268:	/* lxvx */
2164 			op->reg = rd | ((instr & 1) << 5);
2165 			op->type = MKOP(LOAD_VSX, 0, 16);
2166 			op->element_size = 16;
2167 			op->vsx_flags = VSX_CHECK_VEC;
2168 			break;
2169 
2170 		case 269:	/* lxvl */
2171 		case 301: {	/* lxvll */
2172 			int nb;
2173 			op->reg = rd | ((instr & 1) << 5);
2174 			op->ea = ra ? regs->gpr[ra] : 0;
2175 			nb = regs->gpr[rb] & 0xff;
2176 			if (nb > 16)
2177 				nb = 16;
2178 			op->type = MKOP(LOAD_VSX, 0, nb);
2179 			op->element_size = 16;
2180 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2181 				VSX_CHECK_VEC;
2182 			break;
2183 		}
2184 		case 332:	/* lxvdsx */
2185 			op->reg = rd | ((instr & 1) << 5);
2186 			op->type = MKOP(LOAD_VSX, 0, 8);
2187 			op->element_size = 8;
2188 			op->vsx_flags = VSX_SPLAT;
2189 			break;
2190 
2191 		case 364:	/* lxvwsx */
2192 			op->reg = rd | ((instr & 1) << 5);
2193 			op->type = MKOP(LOAD_VSX, 0, 4);
2194 			op->element_size = 4;
2195 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2196 			break;
2197 
2198 		case 396:	/* stxvx */
2199 			op->reg = rd | ((instr & 1) << 5);
2200 			op->type = MKOP(STORE_VSX, 0, 16);
2201 			op->element_size = 16;
2202 			op->vsx_flags = VSX_CHECK_VEC;
2203 			break;
2204 
2205 		case 397:	/* stxvl */
2206 		case 429: {	/* stxvll */
2207 			int nb;
2208 			op->reg = rd | ((instr & 1) << 5);
2209 			op->ea = ra ? regs->gpr[ra] : 0;
2210 			nb = regs->gpr[rb] & 0xff;
2211 			if (nb > 16)
2212 				nb = 16;
2213 			op->type = MKOP(STORE_VSX, 0, nb);
2214 			op->element_size = 16;
2215 			op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
2216 				VSX_CHECK_VEC;
2217 			break;
2218 		}
2219 		case 524:	/* lxsspx */
2220 			op->reg = rd | ((instr & 1) << 5);
2221 			op->type = MKOP(LOAD_VSX, 0, 4);
2222 			op->element_size = 8;
2223 			op->vsx_flags = VSX_FPCONV;
2224 			break;
2225 
2226 		case 588:	/* lxsdx */
2227 			op->reg = rd | ((instr & 1) << 5);
2228 			op->type = MKOP(LOAD_VSX, 0, 8);
2229 			op->element_size = 8;
2230 			break;
2231 
2232 		case 652:	/* stxsspx */
2233 			op->reg = rd | ((instr & 1) << 5);
2234 			op->type = MKOP(STORE_VSX, 0, 4);
2235 			op->element_size = 8;
2236 			op->vsx_flags = VSX_FPCONV;
2237 			break;
2238 
2239 		case 716:	/* stxsdx */
2240 			op->reg = rd | ((instr & 1) << 5);
2241 			op->type = MKOP(STORE_VSX, 0, 8);
2242 			op->element_size = 8;
2243 			break;
2244 
2245 		case 780:	/* lxvw4x */
2246 			op->reg = rd | ((instr & 1) << 5);
2247 			op->type = MKOP(LOAD_VSX, 0, 16);
2248 			op->element_size = 4;
2249 			break;
2250 
2251 		case 781:	/* lxsibzx */
2252 			op->reg = rd | ((instr & 1) << 5);
2253 			op->type = MKOP(LOAD_VSX, 0, 1);
2254 			op->element_size = 8;
2255 			op->vsx_flags = VSX_CHECK_VEC;
2256 			break;
2257 
2258 		case 812:	/* lxvh8x */
2259 			op->reg = rd | ((instr & 1) << 5);
2260 			op->type = MKOP(LOAD_VSX, 0, 16);
2261 			op->element_size = 2;
2262 			op->vsx_flags = VSX_CHECK_VEC;
2263 			break;
2264 
2265 		case 813:	/* lxsihzx */
2266 			op->reg = rd | ((instr & 1) << 5);
2267 			op->type = MKOP(LOAD_VSX, 0, 2);
2268 			op->element_size = 8;
2269 			op->vsx_flags = VSX_CHECK_VEC;
2270 			break;
2271 
2272 		case 844:	/* lxvd2x */
2273 			op->reg = rd | ((instr & 1) << 5);
2274 			op->type = MKOP(LOAD_VSX, 0, 16);
2275 			op->element_size = 8;
2276 			break;
2277 
2278 		case 876:	/* lxvb16x */
2279 			op->reg = rd | ((instr & 1) << 5);
2280 			op->type = MKOP(LOAD_VSX, 0, 16);
2281 			op->element_size = 1;
2282 			op->vsx_flags = VSX_CHECK_VEC;
2283 			break;
2284 
2285 		case 908:	/* stxvw4x */
2286 			op->reg = rd | ((instr & 1) << 5);
2287 			op->type = MKOP(STORE_VSX, 0, 16);
2288 			op->element_size = 4;
2289 			break;
2290 
2291 		case 909:	/* stxsibx */
2292 			op->reg = rd | ((instr & 1) << 5);
2293 			op->type = MKOP(STORE_VSX, 0, 1);
2294 			op->element_size = 8;
2295 			op->vsx_flags = VSX_CHECK_VEC;
2296 			break;
2297 
2298 		case 940:	/* stxvh8x */
2299 			op->reg = rd | ((instr & 1) << 5);
2300 			op->type = MKOP(STORE_VSX, 0, 16);
2301 			op->element_size = 2;
2302 			op->vsx_flags = VSX_CHECK_VEC;
2303 			break;
2304 
2305 		case 941:	/* stxsihx */
2306 			op->reg = rd | ((instr & 1) << 5);
2307 			op->type = MKOP(STORE_VSX, 0, 2);
2308 			op->element_size = 8;
2309 			op->vsx_flags = VSX_CHECK_VEC;
2310 			break;
2311 
2312 		case 972:	/* stxvd2x */
2313 			op->reg = rd | ((instr & 1) << 5);
2314 			op->type = MKOP(STORE_VSX, 0, 16);
2315 			op->element_size = 8;
2316 			break;
2317 
2318 		case 1004:	/* stxvb16x */
2319 			op->reg = rd | ((instr & 1) << 5);
2320 			op->type = MKOP(STORE_VSX, 0, 16);
2321 			op->element_size = 1;
2322 			op->vsx_flags = VSX_CHECK_VEC;
2323 			break;
2324 
2325 #endif /* CONFIG_VSX */
2326 		}
2327 		break;
2328 
2329 	case 32:	/* lwz */
2330 	case 33:	/* lwzu */
2331 		op->type = MKOP(LOAD, u, 4);
2332 		op->ea = dform_ea(instr, regs);
2333 		break;
2334 
2335 	case 34:	/* lbz */
2336 	case 35:	/* lbzu */
2337 		op->type = MKOP(LOAD, u, 1);
2338 		op->ea = dform_ea(instr, regs);
2339 		break;
2340 
2341 	case 36:	/* stw */
2342 	case 37:	/* stwu */
2343 		op->type = MKOP(STORE, u, 4);
2344 		op->ea = dform_ea(instr, regs);
2345 		break;
2346 
2347 	case 38:	/* stb */
2348 	case 39:	/* stbu */
2349 		op->type = MKOP(STORE, u, 1);
2350 		op->ea = dform_ea(instr, regs);
2351 		break;
2352 
2353 	case 40:	/* lhz */
2354 	case 41:	/* lhzu */
2355 		op->type = MKOP(LOAD, u, 2);
2356 		op->ea = dform_ea(instr, regs);
2357 		break;
2358 
2359 	case 42:	/* lha */
2360 	case 43:	/* lhau */
2361 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2362 		op->ea = dform_ea(instr, regs);
2363 		break;
2364 
2365 	case 44:	/* sth */
2366 	case 45:	/* sthu */
2367 		op->type = MKOP(STORE, u, 2);
2368 		op->ea = dform_ea(instr, regs);
2369 		break;
2370 
2371 	case 46:	/* lmw */
2372 		if (ra >= rd)
2373 			break;		/* invalid form, ra in range to load */
2374 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2375 		op->ea = dform_ea(instr, regs);
2376 		break;
2377 
2378 	case 47:	/* stmw */
2379 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2380 		op->ea = dform_ea(instr, regs);
2381 		break;
2382 
2383 #ifdef CONFIG_PPC_FPU
2384 	case 48:	/* lfs */
2385 	case 49:	/* lfsu */
2386 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2387 		op->ea = dform_ea(instr, regs);
2388 		break;
2389 
2390 	case 50:	/* lfd */
2391 	case 51:	/* lfdu */
2392 		op->type = MKOP(LOAD_FP, u, 8);
2393 		op->ea = dform_ea(instr, regs);
2394 		break;
2395 
2396 	case 52:	/* stfs */
2397 	case 53:	/* stfsu */
2398 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2399 		op->ea = dform_ea(instr, regs);
2400 		break;
2401 
2402 	case 54:	/* stfd */
2403 	case 55:	/* stfdu */
2404 		op->type = MKOP(STORE_FP, u, 8);
2405 		op->ea = dform_ea(instr, regs);
2406 		break;
2407 #endif
2408 
2409 #ifdef __powerpc64__
2410 	case 56:	/* lq */
2411 		if (!((rd & 1) || (rd == ra)))
2412 			op->type = MKOP(LOAD, 0, 16);
2413 		op->ea = dqform_ea(instr, regs);
2414 		break;
2415 #endif
2416 
2417 #ifdef CONFIG_VSX
2418 	case 57:	/* lfdp, lxsd, lxssp */
2419 		op->ea = dsform_ea(instr, regs);
2420 		switch (instr & 3) {
2421 		case 0:		/* lfdp */
2422 			if (rd & 1)
2423 				break;		/* reg must be even */
2424 			op->type = MKOP(LOAD_FP, 0, 16);
2425 			break;
2426 		case 2:		/* lxsd */
2427 			op->reg = rd + 32;
2428 			op->type = MKOP(LOAD_VSX, 0, 8);
2429 			op->element_size = 8;
2430 			op->vsx_flags = VSX_CHECK_VEC;
2431 			break;
2432 		case 3:		/* lxssp */
2433 			op->reg = rd + 32;
2434 			op->type = MKOP(LOAD_VSX, 0, 4);
2435 			op->element_size = 8;
2436 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2437 			break;
2438 		}
2439 		break;
2440 #endif /* CONFIG_VSX */
2441 
2442 #ifdef __powerpc64__
2443 	case 58:	/* ld[u], lwa */
2444 		op->ea = dsform_ea(instr, regs);
2445 		switch (instr & 3) {
2446 		case 0:		/* ld */
2447 			op->type = MKOP(LOAD, 0, 8);
2448 			break;
2449 		case 1:		/* ldu */
2450 			op->type = MKOP(LOAD, UPDATE, 8);
2451 			break;
2452 		case 2:		/* lwa */
2453 			op->type = MKOP(LOAD, SIGNEXT, 4);
2454 			break;
2455 		}
2456 		break;
2457 #endif
2458 
2459 #ifdef CONFIG_VSX
2460 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2461 		switch (instr & 7) {
2462 		case 0:		/* stfdp with LSB of DS field = 0 */
2463 		case 4:		/* stfdp with LSB of DS field = 1 */
2464 			op->ea = dsform_ea(instr, regs);
2465 			op->type = MKOP(STORE_FP, 0, 16);
2466 			break;
2467 
2468 		case 1:		/* lxv */
2469 			op->ea = dqform_ea(instr, regs);
2470 			if (instr & 8)
2471 				op->reg = rd + 32;
2472 			op->type = MKOP(LOAD_VSX, 0, 16);
2473 			op->element_size = 16;
2474 			op->vsx_flags = VSX_CHECK_VEC;
2475 			break;
2476 
2477 		case 2:		/* stxsd with LSB of DS field = 0 */
2478 		case 6:		/* stxsd with LSB of DS field = 1 */
2479 			op->ea = dsform_ea(instr, regs);
2480 			op->reg = rd + 32;
2481 			op->type = MKOP(STORE_VSX, 0, 8);
2482 			op->element_size = 8;
2483 			op->vsx_flags = VSX_CHECK_VEC;
2484 			break;
2485 
2486 		case 3:		/* stxssp with LSB of DS field = 0 */
2487 		case 7:		/* stxssp with LSB of DS field = 1 */
2488 			op->ea = dsform_ea(instr, regs);
2489 			op->reg = rd + 32;
2490 			op->type = MKOP(STORE_VSX, 0, 4);
2491 			op->element_size = 8;
2492 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2493 			break;
2494 
2495 		case 5:		/* stxv */
2496 			op->ea = dqform_ea(instr, regs);
2497 			if (instr & 8)
2498 				op->reg = rd + 32;
2499 			op->type = MKOP(STORE_VSX, 0, 16);
2500 			op->element_size = 16;
2501 			op->vsx_flags = VSX_CHECK_VEC;
2502 			break;
2503 		}
2504 		break;
2505 #endif /* CONFIG_VSX */
2506 
2507 #ifdef __powerpc64__
2508 	case 62:	/* std[u] */
2509 		op->ea = dsform_ea(instr, regs);
2510 		switch (instr & 3) {
2511 		case 0:		/* std */
2512 			op->type = MKOP(STORE, 0, 8);
2513 			break;
2514 		case 1:		/* stdu */
2515 			op->type = MKOP(STORE, UPDATE, 8);
2516 			break;
2517 		case 2:		/* stq */
2518 			if (!(rd & 1))
2519 				op->type = MKOP(STORE, 0, 16);
2520 			break;
2521 		}
2522 		break;
2523 #endif /* __powerpc64__ */
2524 
2525 	}
2526 	return 0;
2527 
2528  logical_done:
2529 	if (instr & 1)
2530 		set_cr0(regs, op);
2531  logical_done_nocc:
2532 	op->reg = ra;
2533 	op->type |= SETREG;
2534 	return 1;
2535 
2536  arith_done:
2537 	if (instr & 1)
2538 		set_cr0(regs, op);
2539  compute_done:
2540 	op->reg = rd;
2541 	op->type |= SETREG;
2542 	return 1;
2543 
2544  priv:
2545 	op->type = INTERRUPT | 0x700;
2546 	op->val = SRR1_PROGPRIV;
2547 	return 0;
2548 
2549  trap:
2550 	op->type = INTERRUPT | 0x700;
2551 	op->val = SRR1_PROGTRAP;
2552 	return 0;
2553 }
2554 EXPORT_SYMBOL_GPL(analyse_instr);
2555 NOKPROBE_SYMBOL(analyse_instr);
2556 
2557 /*
2558  * For PPC32 we always use stwu with r1 to change the stack pointer.
2559  * So this emulated store may corrupt the exception frame, now we
2560  * have to provide the exception frame trampoline, which is pushed
2561  * below the kprobed function stack. So we only update gpr[1] but
2562  * don't emulate the real store operation. We will do real store
2563  * operation safely in exception return code by checking this flag.
2564  */
2565 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
2566 {
2567 #ifdef CONFIG_PPC32
2568 	/*
2569 	 * Check if we will touch kernel stack overflow
2570 	 */
2571 	if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
2572 		printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
2573 		return -EINVAL;
2574 	}
2575 #endif /* CONFIG_PPC32 */
2576 	/*
2577 	 * Check if we already set since that means we'll
2578 	 * lose the previous value.
2579 	 */
2580 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
2581 	set_thread_flag(TIF_EMULATE_STACK_STORE);
2582 	return 0;
2583 }
2584 
2585 static nokprobe_inline void do_signext(unsigned long *valp, int size)
2586 {
2587 	switch (size) {
2588 	case 2:
2589 		*valp = (signed short) *valp;
2590 		break;
2591 	case 4:
2592 		*valp = (signed int) *valp;
2593 		break;
2594 	}
2595 }
2596 
2597 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
2598 {
2599 	switch (size) {
2600 	case 2:
2601 		*valp = byterev_2(*valp);
2602 		break;
2603 	case 4:
2604 		*valp = byterev_4(*valp);
2605 		break;
2606 #ifdef __powerpc64__
2607 	case 8:
2608 		*valp = byterev_8(*valp);
2609 		break;
2610 #endif
2611 	}
2612 }
2613 
2614 /*
2615  * Emulate an instruction that can be executed just by updating
2616  * fields in *regs.
2617  */
2618 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
2619 {
2620 	unsigned long next_pc;
2621 
2622 	next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
2623 	switch (op->type & INSTR_TYPE_MASK) {
2624 	case COMPUTE:
2625 		if (op->type & SETREG)
2626 			regs->gpr[op->reg] = op->val;
2627 		if (op->type & SETCC)
2628 			regs->ccr = op->ccval;
2629 		if (op->type & SETXER)
2630 			regs->xer = op->xerval;
2631 		break;
2632 
2633 	case BRANCH:
2634 		if (op->type & SETLK)
2635 			regs->link = next_pc;
2636 		if (op->type & BRTAKEN)
2637 			next_pc = op->val;
2638 		if (op->type & DECCTR)
2639 			--regs->ctr;
2640 		break;
2641 
2642 	case BARRIER:
2643 		switch (op->type & BARRIER_MASK) {
2644 		case BARRIER_SYNC:
2645 			mb();
2646 			break;
2647 		case BARRIER_ISYNC:
2648 			isync();
2649 			break;
2650 		case BARRIER_EIEIO:
2651 			eieio();
2652 			break;
2653 		case BARRIER_LWSYNC:
2654 			asm volatile("lwsync" : : : "memory");
2655 			break;
2656 		case BARRIER_PTESYNC:
2657 			asm volatile("ptesync" : : : "memory");
2658 			break;
2659 		}
2660 		break;
2661 
2662 	case MFSPR:
2663 		switch (op->spr) {
2664 		case SPRN_XER:
2665 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
2666 			break;
2667 		case SPRN_LR:
2668 			regs->gpr[op->reg] = regs->link;
2669 			break;
2670 		case SPRN_CTR:
2671 			regs->gpr[op->reg] = regs->ctr;
2672 			break;
2673 		default:
2674 			WARN_ON_ONCE(1);
2675 		}
2676 		break;
2677 
2678 	case MTSPR:
2679 		switch (op->spr) {
2680 		case SPRN_XER:
2681 			regs->xer = op->val & 0xffffffffUL;
2682 			break;
2683 		case SPRN_LR:
2684 			regs->link = op->val;
2685 			break;
2686 		case SPRN_CTR:
2687 			regs->ctr = op->val;
2688 			break;
2689 		default:
2690 			WARN_ON_ONCE(1);
2691 		}
2692 		break;
2693 
2694 	default:
2695 		WARN_ON_ONCE(1);
2696 	}
2697 	regs->nip = next_pc;
2698 }
2699 
2700 /*
2701  * Emulate a previously-analysed load or store instruction.
2702  * Return values are:
2703  * 0 = instruction emulated successfully
2704  * -EFAULT = address out of range or access faulted (regs->dar
2705  *	     contains the faulting address)
2706  * -EACCES = misaligned access, instruction requires alignment
2707  * -EINVAL = unknown operation in *op
2708  */
2709 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
2710 {
2711 	int err, size, type;
2712 	int i, rd, nb;
2713 	unsigned int cr;
2714 	unsigned long val;
2715 	unsigned long ea;
2716 	bool cross_endian;
2717 
2718 	err = 0;
2719 	size = GETSIZE(op->type);
2720 	type = op->type & INSTR_TYPE_MASK;
2721 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
2722 	ea = truncate_if_32bit(regs->msr, op->ea);
2723 
2724 	switch (type) {
2725 	case LARX:
2726 		if (ea & (size - 1))
2727 			return -EACCES;		/* can't handle misaligned */
2728 		if (!address_ok(regs, ea, size))
2729 			return -EFAULT;
2730 		err = 0;
2731 		val = 0;
2732 		switch (size) {
2733 #ifdef __powerpc64__
2734 		case 1:
2735 			__get_user_asmx(val, ea, err, "lbarx");
2736 			break;
2737 		case 2:
2738 			__get_user_asmx(val, ea, err, "lharx");
2739 			break;
2740 #endif
2741 		case 4:
2742 			__get_user_asmx(val, ea, err, "lwarx");
2743 			break;
2744 #ifdef __powerpc64__
2745 		case 8:
2746 			__get_user_asmx(val, ea, err, "ldarx");
2747 			break;
2748 		case 16:
2749 			err = do_lqarx(ea, &regs->gpr[op->reg]);
2750 			break;
2751 #endif
2752 		default:
2753 			return -EINVAL;
2754 		}
2755 		if (err) {
2756 			regs->dar = ea;
2757 			break;
2758 		}
2759 		if (size < 16)
2760 			regs->gpr[op->reg] = val;
2761 		break;
2762 
2763 	case STCX:
2764 		if (ea & (size - 1))
2765 			return -EACCES;		/* can't handle misaligned */
2766 		if (!address_ok(regs, ea, size))
2767 			return -EFAULT;
2768 		err = 0;
2769 		switch (size) {
2770 #ifdef __powerpc64__
2771 		case 1:
2772 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
2773 			break;
2774 		case 2:
2775 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
2776 			break;
2777 #endif
2778 		case 4:
2779 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
2780 			break;
2781 #ifdef __powerpc64__
2782 		case 8:
2783 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
2784 			break;
2785 		case 16:
2786 			err = do_stqcx(ea, regs->gpr[op->reg],
2787 				       regs->gpr[op->reg + 1], &cr);
2788 			break;
2789 #endif
2790 		default:
2791 			return -EINVAL;
2792 		}
2793 		if (!err)
2794 			regs->ccr = (regs->ccr & 0x0fffffff) |
2795 				(cr & 0xe0000000) |
2796 				((regs->xer >> 3) & 0x10000000);
2797 		else
2798 			regs->dar = ea;
2799 		break;
2800 
2801 	case LOAD:
2802 #ifdef __powerpc64__
2803 		if (size == 16) {
2804 			err = emulate_lq(regs, ea, op->reg, cross_endian);
2805 			break;
2806 		}
2807 #endif
2808 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
2809 		if (!err) {
2810 			if (op->type & SIGNEXT)
2811 				do_signext(&regs->gpr[op->reg], size);
2812 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
2813 				do_byterev(&regs->gpr[op->reg], size);
2814 		}
2815 		break;
2816 
2817 #ifdef CONFIG_PPC_FPU
2818 	case LOAD_FP:
2819 		/*
2820 		 * If the instruction is in userspace, we can emulate it even
2821 		 * if the VMX state is not live, because we have the state
2822 		 * stored in the thread_struct.  If the instruction is in
2823 		 * the kernel, we must not touch the state in the thread_struct.
2824 		 */
2825 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2826 			return 0;
2827 		err = do_fp_load(op, ea, regs, cross_endian);
2828 		break;
2829 #endif
2830 #ifdef CONFIG_ALTIVEC
2831 	case LOAD_VMX:
2832 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2833 			return 0;
2834 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
2835 		break;
2836 #endif
2837 #ifdef CONFIG_VSX
2838 	case LOAD_VSX: {
2839 		unsigned long msrbit = MSR_VSX;
2840 
2841 		/*
2842 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2843 		 * when the target of the instruction is a vector register.
2844 		 */
2845 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2846 			msrbit = MSR_VEC;
2847 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2848 			return 0;
2849 		err = do_vsx_load(op, ea, regs, cross_endian);
2850 		break;
2851 	}
2852 #endif
2853 	case LOAD_MULTI:
2854 		if (!address_ok(regs, ea, size))
2855 			return -EFAULT;
2856 		rd = op->reg;
2857 		for (i = 0; i < size; i += 4) {
2858 			unsigned int v32 = 0;
2859 
2860 			nb = size - i;
2861 			if (nb > 4)
2862 				nb = 4;
2863 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
2864 			if (err)
2865 				break;
2866 			if (unlikely(cross_endian))
2867 				v32 = byterev_4(v32);
2868 			regs->gpr[rd] = v32;
2869 			ea += 4;
2870 			/* reg number wraps from 31 to 0 for lsw[ix] */
2871 			rd = (rd + 1) & 0x1f;
2872 		}
2873 		break;
2874 
2875 	case STORE:
2876 #ifdef __powerpc64__
2877 		if (size == 16) {
2878 			err = emulate_stq(regs, ea, op->reg, cross_endian);
2879 			break;
2880 		}
2881 #endif
2882 		if ((op->type & UPDATE) && size == sizeof(long) &&
2883 		    op->reg == 1 && op->update_reg == 1 &&
2884 		    !(regs->msr & MSR_PR) &&
2885 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
2886 			err = handle_stack_update(ea, regs);
2887 			break;
2888 		}
2889 		if (unlikely(cross_endian))
2890 			do_byterev(&op->val, size);
2891 		err = write_mem(op->val, ea, size, regs);
2892 		break;
2893 
2894 #ifdef CONFIG_PPC_FPU
2895 	case STORE_FP:
2896 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
2897 			return 0;
2898 		err = do_fp_store(op, ea, regs, cross_endian);
2899 		break;
2900 #endif
2901 #ifdef CONFIG_ALTIVEC
2902 	case STORE_VMX:
2903 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
2904 			return 0;
2905 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
2906 		break;
2907 #endif
2908 #ifdef CONFIG_VSX
2909 	case STORE_VSX: {
2910 		unsigned long msrbit = MSR_VSX;
2911 
2912 		/*
2913 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
2914 		 * when the target of the instruction is a vector register.
2915 		 */
2916 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
2917 			msrbit = MSR_VEC;
2918 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
2919 			return 0;
2920 		err = do_vsx_store(op, ea, regs, cross_endian);
2921 		break;
2922 	}
2923 #endif
2924 	case STORE_MULTI:
2925 		if (!address_ok(regs, ea, size))
2926 			return -EFAULT;
2927 		rd = op->reg;
2928 		for (i = 0; i < size; i += 4) {
2929 			unsigned int v32 = regs->gpr[rd];
2930 
2931 			nb = size - i;
2932 			if (nb > 4)
2933 				nb = 4;
2934 			if (unlikely(cross_endian))
2935 				v32 = byterev_4(v32);
2936 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
2937 			if (err)
2938 				break;
2939 			ea += 4;
2940 			/* reg number wraps from 31 to 0 for stsw[ix] */
2941 			rd = (rd + 1) & 0x1f;
2942 		}
2943 		break;
2944 
2945 	default:
2946 		return -EINVAL;
2947 	}
2948 
2949 	if (err)
2950 		return err;
2951 
2952 	if (op->type & UPDATE)
2953 		regs->gpr[op->update_reg] = op->ea;
2954 
2955 	return 0;
2956 }
2957 NOKPROBE_SYMBOL(emulate_loadstore);
2958 
2959 /*
2960  * Emulate instructions that cause a transfer of control,
2961  * loads and stores, and a few other instructions.
2962  * Returns 1 if the step was emulated, 0 if not,
2963  * or -1 if the instruction is one that should not be stepped,
2964  * such as an rfid, or a mtmsrd that would clear MSR_RI.
2965  */
2966 int emulate_step(struct pt_regs *regs, unsigned int instr)
2967 {
2968 	struct instruction_op op;
2969 	int r, err, type;
2970 	unsigned long val;
2971 	unsigned long ea;
2972 
2973 	r = analyse_instr(&op, regs, instr);
2974 	if (r < 0)
2975 		return r;
2976 	if (r > 0) {
2977 		emulate_update_regs(regs, &op);
2978 		return 1;
2979 	}
2980 
2981 	err = 0;
2982 	type = op.type & INSTR_TYPE_MASK;
2983 
2984 	if (OP_IS_LOAD_STORE(type)) {
2985 		err = emulate_loadstore(regs, &op);
2986 		if (err)
2987 			return 0;
2988 		goto instr_done;
2989 	}
2990 
2991 	switch (type) {
2992 	case CACHEOP:
2993 		ea = truncate_if_32bit(regs->msr, op.ea);
2994 		if (!address_ok(regs, ea, 8))
2995 			return 0;
2996 		switch (op.type & CACHEOP_MASK) {
2997 		case DCBST:
2998 			__cacheop_user_asmx(ea, err, "dcbst");
2999 			break;
3000 		case DCBF:
3001 			__cacheop_user_asmx(ea, err, "dcbf");
3002 			break;
3003 		case DCBTST:
3004 			if (op.reg == 0)
3005 				prefetchw((void *) ea);
3006 			break;
3007 		case DCBT:
3008 			if (op.reg == 0)
3009 				prefetch((void *) ea);
3010 			break;
3011 		case ICBI:
3012 			__cacheop_user_asmx(ea, err, "icbi");
3013 			break;
3014 		case DCBZ:
3015 			err = emulate_dcbz(ea, regs);
3016 			break;
3017 		}
3018 		if (err) {
3019 			regs->dar = ea;
3020 			return 0;
3021 		}
3022 		goto instr_done;
3023 
3024 	case MFMSR:
3025 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3026 		goto instr_done;
3027 
3028 	case MTMSR:
3029 		val = regs->gpr[op.reg];
3030 		if ((val & MSR_RI) == 0)
3031 			/* can't step mtmsr[d] that would clear MSR_RI */
3032 			return -1;
3033 		/* here op.val is the mask of bits to change */
3034 		regs->msr = (regs->msr & ~op.val) | (val & op.val);
3035 		goto instr_done;
3036 
3037 #ifdef CONFIG_PPC64
3038 	case SYSCALL:	/* sc */
3039 		/*
3040 		 * N.B. this uses knowledge about how the syscall
3041 		 * entry code works.  If that is changed, this will
3042 		 * need to be changed also.
3043 		 */
3044 		if (regs->gpr[0] == 0x1ebe &&
3045 		    cpu_has_feature(CPU_FTR_REAL_LE)) {
3046 			regs->msr ^= MSR_LE;
3047 			goto instr_done;
3048 		}
3049 		regs->gpr[9] = regs->gpr[13];
3050 		regs->gpr[10] = MSR_KERNEL;
3051 		regs->gpr[11] = regs->nip + 4;
3052 		regs->gpr[12] = regs->msr & MSR_MASK;
3053 		regs->gpr[13] = (unsigned long) get_paca();
3054 		regs->nip = (unsigned long) &system_call_common;
3055 		regs->msr = MSR_KERNEL;
3056 		return 1;
3057 
3058 	case RFI:
3059 		return -1;
3060 #endif
3061 	}
3062 	return 0;
3063 
3064  instr_done:
3065 	regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
3066 	return 1;
3067 }
3068 NOKPROBE_SYMBOL(emulate_step);
3069