xref: /openbmc/linux/arch/powerpc/lib/sstep.c (revision 28b8ba8e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Single-step support.
4  *
5  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6  */
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/prefetch.h>
11 #include <asm/sstep.h>
12 #include <asm/processor.h>
13 #include <linux/uaccess.h>
14 #include <asm/cpu_has_feature.h>
15 #include <asm/cputable.h>
16 #include <asm/disassemble.h>
17 
18 #ifdef CONFIG_PPC64
19 /* Bits in SRR1 that are copied from MSR */
20 #define MSR_MASK	0xffffffff87c0ffffUL
21 #else
22 #define MSR_MASK	0x87c0ffff
23 #endif
24 
25 /* Bits in XER */
26 #define XER_SO		0x80000000U
27 #define XER_OV		0x40000000U
28 #define XER_CA		0x20000000U
29 #define XER_OV32	0x00080000U
30 #define XER_CA32	0x00040000U
31 
32 #ifdef CONFIG_VSX
33 #define VSX_REGISTER_XTP(rd)   ((((rd) & 1) << 5) | ((rd) & 0xfe))
34 #endif
35 
36 #ifdef CONFIG_PPC_FPU
37 /*
38  * Functions in ldstfp.S
39  */
40 extern void get_fpr(int rn, double *p);
41 extern void put_fpr(int rn, const double *p);
42 extern void get_vr(int rn, __vector128 *p);
43 extern void put_vr(int rn, __vector128 *p);
44 extern void load_vsrn(int vsr, const void *p);
45 extern void store_vsrn(int vsr, void *p);
46 extern void conv_sp_to_dp(const float *sp, double *dp);
47 extern void conv_dp_to_sp(const double *dp, float *sp);
48 #endif
49 
50 #ifdef __powerpc64__
51 /*
52  * Functions in quad.S
53  */
54 extern int do_lq(unsigned long ea, unsigned long *regs);
55 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
56 extern int do_lqarx(unsigned long ea, unsigned long *regs);
57 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
58 		    unsigned int *crp);
59 #endif
60 
61 #ifdef __LITTLE_ENDIAN__
62 #define IS_LE	1
63 #define IS_BE	0
64 #else
65 #define IS_LE	0
66 #define IS_BE	1
67 #endif
68 
69 /*
70  * Emulate the truncation of 64 bit values in 32-bit mode.
71  */
truncate_if_32bit(unsigned long msr,unsigned long val)72 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
73 							unsigned long val)
74 {
75 	if ((msr & MSR_64BIT) == 0)
76 		val &= 0xffffffffUL;
77 	return val;
78 }
79 
80 /*
81  * Determine whether a conditional branch instruction would branch.
82  */
branch_taken(unsigned int instr,const struct pt_regs * regs,struct instruction_op * op)83 static nokprobe_inline int branch_taken(unsigned int instr,
84 					const struct pt_regs *regs,
85 					struct instruction_op *op)
86 {
87 	unsigned int bo = (instr >> 21) & 0x1f;
88 	unsigned int bi;
89 
90 	if ((bo & 4) == 0) {
91 		/* decrement counter */
92 		op->type |= DECCTR;
93 		if (((bo >> 1) & 1) ^ (regs->ctr == 1))
94 			return 0;
95 	}
96 	if ((bo & 0x10) == 0) {
97 		/* check bit from CR */
98 		bi = (instr >> 16) & 0x1f;
99 		if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
100 			return 0;
101 	}
102 	return 1;
103 }
104 
address_ok(struct pt_regs * regs,unsigned long ea,int nb)105 static nokprobe_inline long address_ok(struct pt_regs *regs,
106 				       unsigned long ea, int nb)
107 {
108 	if (!user_mode(regs))
109 		return 1;
110 	if (access_ok((void __user *)ea, nb))
111 		return 1;
112 	if (access_ok((void __user *)ea, 1))
113 		/* Access overlaps the end of the user region */
114 		regs->dar = TASK_SIZE_MAX - 1;
115 	else
116 		regs->dar = ea;
117 	return 0;
118 }
119 
120 /*
121  * Calculate effective address for a D-form instruction
122  */
dform_ea(unsigned int instr,const struct pt_regs * regs)123 static nokprobe_inline unsigned long dform_ea(unsigned int instr,
124 					      const struct pt_regs *regs)
125 {
126 	int ra;
127 	unsigned long ea;
128 
129 	ra = (instr >> 16) & 0x1f;
130 	ea = (signed short) instr;		/* sign-extend */
131 	if (ra)
132 		ea += regs->gpr[ra];
133 
134 	return ea;
135 }
136 
137 #ifdef __powerpc64__
138 /*
139  * Calculate effective address for a DS-form instruction
140  */
dsform_ea(unsigned int instr,const struct pt_regs * regs)141 static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
142 					       const struct pt_regs *regs)
143 {
144 	int ra;
145 	unsigned long ea;
146 
147 	ra = (instr >> 16) & 0x1f;
148 	ea = (signed short) (instr & ~3);	/* sign-extend */
149 	if (ra)
150 		ea += regs->gpr[ra];
151 
152 	return ea;
153 }
154 
155 /*
156  * Calculate effective address for a DQ-form instruction
157  */
dqform_ea(unsigned int instr,const struct pt_regs * regs)158 static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
159 					       const struct pt_regs *regs)
160 {
161 	int ra;
162 	unsigned long ea;
163 
164 	ra = (instr >> 16) & 0x1f;
165 	ea = (signed short) (instr & ~0xf);	/* sign-extend */
166 	if (ra)
167 		ea += regs->gpr[ra];
168 
169 	return ea;
170 }
171 #endif /* __powerpc64 */
172 
173 /*
174  * Calculate effective address for an X-form instruction
175  */
xform_ea(unsigned int instr,const struct pt_regs * regs)176 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
177 					      const struct pt_regs *regs)
178 {
179 	int ra, rb;
180 	unsigned long ea;
181 
182 	ra = (instr >> 16) & 0x1f;
183 	rb = (instr >> 11) & 0x1f;
184 	ea = regs->gpr[rb];
185 	if (ra)
186 		ea += regs->gpr[ra];
187 
188 	return ea;
189 }
190 
191 /*
192  * Calculate effective address for a MLS:D-form / 8LS:D-form
193  * prefixed instruction
194  */
mlsd_8lsd_ea(unsigned int instr,unsigned int suffix,const struct pt_regs * regs)195 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
196 						  unsigned int suffix,
197 						  const struct pt_regs *regs)
198 {
199 	int ra, prefix_r;
200 	unsigned int  dd;
201 	unsigned long ea, d0, d1, d;
202 
203 	prefix_r = GET_PREFIX_R(instr);
204 	ra = GET_PREFIX_RA(suffix);
205 
206 	d0 = instr & 0x3ffff;
207 	d1 = suffix & 0xffff;
208 	d = (d0 << 16) | d1;
209 
210 	/*
211 	 * sign extend a 34 bit number
212 	 */
213 	dd = (unsigned int)(d >> 2);
214 	ea = (signed int)dd;
215 	ea = (ea << 2) | (d & 0x3);
216 
217 	if (!prefix_r && ra)
218 		ea += regs->gpr[ra];
219 	else if (!prefix_r && !ra)
220 		; /* Leave ea as is */
221 	else if (prefix_r)
222 		ea += regs->nip;
223 
224 	/*
225 	 * (prefix_r && ra) is an invalid form. Should already be
226 	 * checked for by caller!
227 	 */
228 
229 	return ea;
230 }
231 
232 /*
233  * Return the largest power of 2, not greater than sizeof(unsigned long),
234  * such that x is a multiple of it.
235  */
max_align(unsigned long x)236 static nokprobe_inline unsigned long max_align(unsigned long x)
237 {
238 	x |= sizeof(unsigned long);
239 	return x & -x;		/* isolates rightmost bit */
240 }
241 
byterev_2(unsigned long x)242 static nokprobe_inline unsigned long byterev_2(unsigned long x)
243 {
244 	return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
245 }
246 
byterev_4(unsigned long x)247 static nokprobe_inline unsigned long byterev_4(unsigned long x)
248 {
249 	return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
250 		((x & 0xff00) << 8) | ((x & 0xff) << 24);
251 }
252 
253 #ifdef __powerpc64__
byterev_8(unsigned long x)254 static nokprobe_inline unsigned long byterev_8(unsigned long x)
255 {
256 	return (byterev_4(x) << 32) | byterev_4(x >> 32);
257 }
258 #endif
259 
do_byte_reverse(void * ptr,int nb)260 static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
261 {
262 	switch (nb) {
263 	case 2:
264 		*(u16 *)ptr = byterev_2(*(u16 *)ptr);
265 		break;
266 	case 4:
267 		*(u32 *)ptr = byterev_4(*(u32 *)ptr);
268 		break;
269 #ifdef __powerpc64__
270 	case 8:
271 		*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
272 		break;
273 	case 16: {
274 		unsigned long *up = (unsigned long *)ptr;
275 		unsigned long tmp;
276 		tmp = byterev_8(up[0]);
277 		up[0] = byterev_8(up[1]);
278 		up[1] = tmp;
279 		break;
280 	}
281 	case 32: {
282 		unsigned long *up = (unsigned long *)ptr;
283 		unsigned long tmp;
284 
285 		tmp = byterev_8(up[0]);
286 		up[0] = byterev_8(up[3]);
287 		up[3] = tmp;
288 		tmp = byterev_8(up[2]);
289 		up[2] = byterev_8(up[1]);
290 		up[1] = tmp;
291 		break;
292 	}
293 
294 #endif
295 	default:
296 		WARN_ON_ONCE(1);
297 	}
298 }
299 
300 static __always_inline int
__read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)301 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
302 {
303 	unsigned long x = 0;
304 
305 	switch (nb) {
306 	case 1:
307 		unsafe_get_user(x, (unsigned char __user *)ea, Efault);
308 		break;
309 	case 2:
310 		unsafe_get_user(x, (unsigned short __user *)ea, Efault);
311 		break;
312 	case 4:
313 		unsafe_get_user(x, (unsigned int __user *)ea, Efault);
314 		break;
315 #ifdef __powerpc64__
316 	case 8:
317 		unsafe_get_user(x, (unsigned long __user *)ea, Efault);
318 		break;
319 #endif
320 	}
321 	*dest = x;
322 	return 0;
323 
324 Efault:
325 	regs->dar = ea;
326 	return -EFAULT;
327 }
328 
329 static nokprobe_inline int
read_mem_aligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)330 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
331 {
332 	int err;
333 
334 	if (is_kernel_addr(ea))
335 		return __read_mem_aligned(dest, ea, nb, regs);
336 
337 	if (user_read_access_begin((void __user *)ea, nb)) {
338 		err = __read_mem_aligned(dest, ea, nb, regs);
339 		user_read_access_end();
340 	} else {
341 		err = -EFAULT;
342 		regs->dar = ea;
343 	}
344 
345 	return err;
346 }
347 
348 /*
349  * Copy from userspace to a buffer, using the largest possible
350  * aligned accesses, up to sizeof(long).
351  */
__copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)352 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
353 {
354 	int c;
355 
356 	for (; nb > 0; nb -= c) {
357 		c = max_align(ea);
358 		if (c > nb)
359 			c = max_align(nb);
360 		switch (c) {
361 		case 1:
362 			unsafe_get_user(*dest, (u8 __user *)ea, Efault);
363 			break;
364 		case 2:
365 			unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
366 			break;
367 		case 4:
368 			unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
369 			break;
370 #ifdef __powerpc64__
371 		case 8:
372 			unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
373 			break;
374 #endif
375 		}
376 		dest += c;
377 		ea += c;
378 	}
379 	return 0;
380 
381 Efault:
382 	regs->dar = ea;
383 	return -EFAULT;
384 }
385 
copy_mem_in(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)386 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
387 {
388 	int err;
389 
390 	if (is_kernel_addr(ea))
391 		return __copy_mem_in(dest, ea, nb, regs);
392 
393 	if (user_read_access_begin((void __user *)ea, nb)) {
394 		err = __copy_mem_in(dest, ea, nb, regs);
395 		user_read_access_end();
396 	} else {
397 		err = -EFAULT;
398 		regs->dar = ea;
399 	}
400 
401 	return err;
402 }
403 
read_mem_unaligned(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)404 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
405 					      unsigned long ea, int nb,
406 					      struct pt_regs *regs)
407 {
408 	union {
409 		unsigned long ul;
410 		u8 b[sizeof(unsigned long)];
411 	} u;
412 	int i;
413 	int err;
414 
415 	u.ul = 0;
416 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
417 	err = copy_mem_in(&u.b[i], ea, nb, regs);
418 	if (!err)
419 		*dest = u.ul;
420 	return err;
421 }
422 
423 /*
424  * Read memory at address ea for nb bytes, return 0 for success
425  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
426  * If nb < sizeof(long), the result is right-justified on BE systems.
427  */
read_mem(unsigned long * dest,unsigned long ea,int nb,struct pt_regs * regs)428 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
429 			      struct pt_regs *regs)
430 {
431 	if (!address_ok(regs, ea, nb))
432 		return -EFAULT;
433 	if ((ea & (nb - 1)) == 0)
434 		return read_mem_aligned(dest, ea, nb, regs);
435 	return read_mem_unaligned(dest, ea, nb, regs);
436 }
437 NOKPROBE_SYMBOL(read_mem);
438 
439 static __always_inline int
__write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)440 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
441 {
442 	switch (nb) {
443 	case 1:
444 		unsafe_put_user(val, (unsigned char __user *)ea, Efault);
445 		break;
446 	case 2:
447 		unsafe_put_user(val, (unsigned short __user *)ea, Efault);
448 		break;
449 	case 4:
450 		unsafe_put_user(val, (unsigned int __user *)ea, Efault);
451 		break;
452 #ifdef __powerpc64__
453 	case 8:
454 		unsafe_put_user(val, (unsigned long __user *)ea, Efault);
455 		break;
456 #endif
457 	}
458 	return 0;
459 
460 Efault:
461 	regs->dar = ea;
462 	return -EFAULT;
463 }
464 
465 static nokprobe_inline int
write_mem_aligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)466 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
467 {
468 	int err;
469 
470 	if (is_kernel_addr(ea))
471 		return __write_mem_aligned(val, ea, nb, regs);
472 
473 	if (user_write_access_begin((void __user *)ea, nb)) {
474 		err = __write_mem_aligned(val, ea, nb, regs);
475 		user_write_access_end();
476 	} else {
477 		err = -EFAULT;
478 		regs->dar = ea;
479 	}
480 
481 	return err;
482 }
483 
484 /*
485  * Copy from a buffer to userspace, using the largest possible
486  * aligned accesses, up to sizeof(long).
487  */
__copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)488 static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
489 {
490 	int c;
491 
492 	for (; nb > 0; nb -= c) {
493 		c = max_align(ea);
494 		if (c > nb)
495 			c = max_align(nb);
496 		switch (c) {
497 		case 1:
498 			unsafe_put_user(*dest, (u8 __user *)ea, Efault);
499 			break;
500 		case 2:
501 			unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
502 			break;
503 		case 4:
504 			unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
505 			break;
506 #ifdef __powerpc64__
507 		case 8:
508 			unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
509 			break;
510 #endif
511 		}
512 		dest += c;
513 		ea += c;
514 	}
515 	return 0;
516 
517 Efault:
518 	regs->dar = ea;
519 	return -EFAULT;
520 }
521 
copy_mem_out(u8 * dest,unsigned long ea,int nb,struct pt_regs * regs)522 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
523 {
524 	int err;
525 
526 	if (is_kernel_addr(ea))
527 		return __copy_mem_out(dest, ea, nb, regs);
528 
529 	if (user_write_access_begin((void __user *)ea, nb)) {
530 		err = __copy_mem_out(dest, ea, nb, regs);
531 		user_write_access_end();
532 	} else {
533 		err = -EFAULT;
534 		regs->dar = ea;
535 	}
536 
537 	return err;
538 }
539 
write_mem_unaligned(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)540 static nokprobe_inline int write_mem_unaligned(unsigned long val,
541 					       unsigned long ea, int nb,
542 					       struct pt_regs *regs)
543 {
544 	union {
545 		unsigned long ul;
546 		u8 b[sizeof(unsigned long)];
547 	} u;
548 	int i;
549 
550 	u.ul = val;
551 	i = IS_BE ? sizeof(unsigned long) - nb : 0;
552 	return copy_mem_out(&u.b[i], ea, nb, regs);
553 }
554 
555 /*
556  * Write memory at address ea for nb bytes, return 0 for success
557  * or -EFAULT if an error occurred.  N.B. nb must be 1, 2, 4 or 8.
558  */
write_mem(unsigned long val,unsigned long ea,int nb,struct pt_regs * regs)559 static int write_mem(unsigned long val, unsigned long ea, int nb,
560 			       struct pt_regs *regs)
561 {
562 	if (!address_ok(regs, ea, nb))
563 		return -EFAULT;
564 	if ((ea & (nb - 1)) == 0)
565 		return write_mem_aligned(val, ea, nb, regs);
566 	return write_mem_unaligned(val, ea, nb, regs);
567 }
568 NOKPROBE_SYMBOL(write_mem);
569 
570 #ifdef CONFIG_PPC_FPU
571 /*
572  * These access either the real FP register or the image in the
573  * thread_struct, depending on regs->msr & MSR_FP.
574  */
do_fp_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)575 static int do_fp_load(struct instruction_op *op, unsigned long ea,
576 		      struct pt_regs *regs, bool cross_endian)
577 {
578 	int err, rn, nb;
579 	union {
580 		int i;
581 		unsigned int u;
582 		float f;
583 		double d[2];
584 		unsigned long l[2];
585 		u8 b[2 * sizeof(double)];
586 	} u;
587 
588 	nb = GETSIZE(op->type);
589 	if (nb > sizeof(u))
590 		return -EINVAL;
591 	if (!address_ok(regs, ea, nb))
592 		return -EFAULT;
593 	rn = op->reg;
594 	err = copy_mem_in(u.b, ea, nb, regs);
595 	if (err)
596 		return err;
597 	if (unlikely(cross_endian)) {
598 		do_byte_reverse(u.b, min(nb, 8));
599 		if (nb == 16)
600 			do_byte_reverse(&u.b[8], 8);
601 	}
602 	preempt_disable();
603 	if (nb == 4) {
604 		if (op->type & FPCONV)
605 			conv_sp_to_dp(&u.f, &u.d[0]);
606 		else if (op->type & SIGNEXT)
607 			u.l[0] = u.i;
608 		else
609 			u.l[0] = u.u;
610 	}
611 	if (regs->msr & MSR_FP)
612 		put_fpr(rn, &u.d[0]);
613 	else
614 		current->thread.TS_FPR(rn) = u.l[0];
615 	if (nb == 16) {
616 		/* lfdp */
617 		rn |= 1;
618 		if (regs->msr & MSR_FP)
619 			put_fpr(rn, &u.d[1]);
620 		else
621 			current->thread.TS_FPR(rn) = u.l[1];
622 	}
623 	preempt_enable();
624 	return 0;
625 }
626 NOKPROBE_SYMBOL(do_fp_load);
627 
do_fp_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)628 static int do_fp_store(struct instruction_op *op, unsigned long ea,
629 		       struct pt_regs *regs, bool cross_endian)
630 {
631 	int rn, nb;
632 	union {
633 		unsigned int u;
634 		float f;
635 		double d[2];
636 		unsigned long l[2];
637 		u8 b[2 * sizeof(double)];
638 	} u;
639 
640 	nb = GETSIZE(op->type);
641 	if (nb > sizeof(u))
642 		return -EINVAL;
643 	if (!address_ok(regs, ea, nb))
644 		return -EFAULT;
645 	rn = op->reg;
646 	preempt_disable();
647 	if (regs->msr & MSR_FP)
648 		get_fpr(rn, &u.d[0]);
649 	else
650 		u.l[0] = current->thread.TS_FPR(rn);
651 	if (nb == 4) {
652 		if (op->type & FPCONV)
653 			conv_dp_to_sp(&u.d[0], &u.f);
654 		else
655 			u.u = u.l[0];
656 	}
657 	if (nb == 16) {
658 		rn |= 1;
659 		if (regs->msr & MSR_FP)
660 			get_fpr(rn, &u.d[1]);
661 		else
662 			u.l[1] = current->thread.TS_FPR(rn);
663 	}
664 	preempt_enable();
665 	if (unlikely(cross_endian)) {
666 		do_byte_reverse(u.b, min(nb, 8));
667 		if (nb == 16)
668 			do_byte_reverse(&u.b[8], 8);
669 	}
670 	return copy_mem_out(u.b, ea, nb, regs);
671 }
672 NOKPROBE_SYMBOL(do_fp_store);
673 #endif
674 
675 #ifdef CONFIG_ALTIVEC
676 /* For Altivec/VMX, no need to worry about alignment */
do_vec_load(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)677 static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
678 				       int size, struct pt_regs *regs,
679 				       bool cross_endian)
680 {
681 	int err;
682 	union {
683 		__vector128 v;
684 		u8 b[sizeof(__vector128)];
685 	} u = {};
686 
687 	if (size > sizeof(u))
688 		return -EINVAL;
689 
690 	if (!address_ok(regs, ea & ~0xfUL, 16))
691 		return -EFAULT;
692 	/* align to multiple of size */
693 	ea &= ~(size - 1);
694 	err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
695 	if (err)
696 		return err;
697 	if (unlikely(cross_endian))
698 		do_byte_reverse(&u.b[ea & 0xf], size);
699 	preempt_disable();
700 	if (regs->msr & MSR_VEC)
701 		put_vr(rn, &u.v);
702 	else
703 		current->thread.vr_state.vr[rn] = u.v;
704 	preempt_enable();
705 	return 0;
706 }
707 
do_vec_store(int rn,unsigned long ea,int size,struct pt_regs * regs,bool cross_endian)708 static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
709 					int size, struct pt_regs *regs,
710 					bool cross_endian)
711 {
712 	union {
713 		__vector128 v;
714 		u8 b[sizeof(__vector128)];
715 	} u;
716 
717 	if (size > sizeof(u))
718 		return -EINVAL;
719 
720 	if (!address_ok(regs, ea & ~0xfUL, 16))
721 		return -EFAULT;
722 	/* align to multiple of size */
723 	ea &= ~(size - 1);
724 
725 	preempt_disable();
726 	if (regs->msr & MSR_VEC)
727 		get_vr(rn, &u.v);
728 	else
729 		u.v = current->thread.vr_state.vr[rn];
730 	preempt_enable();
731 	if (unlikely(cross_endian))
732 		do_byte_reverse(&u.b[ea & 0xf], size);
733 	return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
734 }
735 #endif /* CONFIG_ALTIVEC */
736 
737 #ifdef __powerpc64__
emulate_lq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)738 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea,
739 				      int reg, bool cross_endian)
740 {
741 	int err;
742 
743 	if (!address_ok(regs, ea, 16))
744 		return -EFAULT;
745 	/* if aligned, should be atomic */
746 	if ((ea & 0xf) == 0) {
747 		err = do_lq(ea, &regs->gpr[reg]);
748 	} else {
749 		err = read_mem(&regs->gpr[reg + IS_LE], ea, 8, regs);
750 		if (!err)
751 			err = read_mem(&regs->gpr[reg + IS_BE], ea + 8, 8, regs);
752 	}
753 	if (!err && unlikely(cross_endian))
754 		do_byte_reverse(&regs->gpr[reg], 16);
755 	return err;
756 }
757 
emulate_stq(struct pt_regs * regs,unsigned long ea,int reg,bool cross_endian)758 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
759 				       int reg, bool cross_endian)
760 {
761 	int err;
762 	unsigned long vals[2];
763 
764 	if (!address_ok(regs, ea, 16))
765 		return -EFAULT;
766 	vals[0] = regs->gpr[reg];
767 	vals[1] = regs->gpr[reg + 1];
768 	if (unlikely(cross_endian))
769 		do_byte_reverse(vals, 16);
770 
771 	/* if aligned, should be atomic */
772 	if ((ea & 0xf) == 0)
773 		return do_stq(ea, vals[0], vals[1]);
774 
775 	err = write_mem(vals[IS_LE], ea, 8, regs);
776 	if (!err)
777 		err = write_mem(vals[IS_BE], ea + 8, 8, regs);
778 	return err;
779 }
780 #endif /* __powerpc64 */
781 
782 #ifdef CONFIG_VSX
emulate_vsx_load(struct instruction_op * op,union vsx_reg * reg,const void * mem,bool rev)783 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
784 		      const void *mem, bool rev)
785 {
786 	int size, read_size;
787 	int i, j;
788 	const unsigned int *wp;
789 	const unsigned short *hp;
790 	const unsigned char *bp;
791 
792 	size = GETSIZE(op->type);
793 	reg->d[0] = reg->d[1] = 0;
794 
795 	switch (op->element_size) {
796 	case 32:
797 		/* [p]lxvp[x] */
798 	case 16:
799 		/* whole vector; lxv[x] or lxvl[l] */
800 		if (size == 0)
801 			break;
802 		memcpy(reg, mem, size);
803 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
804 			rev = !rev;
805 		if (rev)
806 			do_byte_reverse(reg, size);
807 		break;
808 	case 8:
809 		/* scalar loads, lxvd2x, lxvdsx */
810 		read_size = (size >= 8) ? 8 : size;
811 		i = IS_LE ? 8 : 8 - read_size;
812 		memcpy(&reg->b[i], mem, read_size);
813 		if (rev)
814 			do_byte_reverse(&reg->b[i], 8);
815 		if (size < 8) {
816 			if (op->type & SIGNEXT) {
817 				/* size == 4 is the only case here */
818 				reg->d[IS_LE] = (signed int) reg->d[IS_LE];
819 			} else if (op->vsx_flags & VSX_FPCONV) {
820 				preempt_disable();
821 				conv_sp_to_dp(&reg->fp[1 + IS_LE],
822 					      &reg->dp[IS_LE]);
823 				preempt_enable();
824 			}
825 		} else {
826 			if (size == 16) {
827 				unsigned long v = *(unsigned long *)(mem + 8);
828 				reg->d[IS_BE] = !rev ? v : byterev_8(v);
829 			} else if (op->vsx_flags & VSX_SPLAT)
830 				reg->d[IS_BE] = reg->d[IS_LE];
831 		}
832 		break;
833 	case 4:
834 		/* lxvw4x, lxvwsx */
835 		wp = mem;
836 		for (j = 0; j < size / 4; ++j) {
837 			i = IS_LE ? 3 - j : j;
838 			reg->w[i] = !rev ? *wp++ : byterev_4(*wp++);
839 		}
840 		if (op->vsx_flags & VSX_SPLAT) {
841 			u32 val = reg->w[IS_LE ? 3 : 0];
842 			for (; j < 4; ++j) {
843 				i = IS_LE ? 3 - j : j;
844 				reg->w[i] = val;
845 			}
846 		}
847 		break;
848 	case 2:
849 		/* lxvh8x */
850 		hp = mem;
851 		for (j = 0; j < size / 2; ++j) {
852 			i = IS_LE ? 7 - j : j;
853 			reg->h[i] = !rev ? *hp++ : byterev_2(*hp++);
854 		}
855 		break;
856 	case 1:
857 		/* lxvb16x */
858 		bp = mem;
859 		for (j = 0; j < size; ++j) {
860 			i = IS_LE ? 15 - j : j;
861 			reg->b[i] = *bp++;
862 		}
863 		break;
864 	}
865 }
866 EXPORT_SYMBOL_GPL(emulate_vsx_load);
867 NOKPROBE_SYMBOL(emulate_vsx_load);
868 
emulate_vsx_store(struct instruction_op * op,const union vsx_reg * reg,void * mem,bool rev)869 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
870 		       void *mem, bool rev)
871 {
872 	int size, write_size;
873 	int i, j;
874 	union vsx_reg buf;
875 	unsigned int *wp;
876 	unsigned short *hp;
877 	unsigned char *bp;
878 
879 	size = GETSIZE(op->type);
880 
881 	switch (op->element_size) {
882 	case 32:
883 		/* [p]stxvp[x] */
884 		if (size == 0)
885 			break;
886 		if (rev) {
887 			/* reverse 32 bytes */
888 			union vsx_reg buf32[2];
889 			buf32[0].d[0] = byterev_8(reg[1].d[1]);
890 			buf32[0].d[1] = byterev_8(reg[1].d[0]);
891 			buf32[1].d[0] = byterev_8(reg[0].d[1]);
892 			buf32[1].d[1] = byterev_8(reg[0].d[0]);
893 			memcpy(mem, buf32, size);
894 		} else {
895 			memcpy(mem, reg, size);
896 		}
897 		break;
898 	case 16:
899 		/* stxv, stxvx, stxvl, stxvll */
900 		if (size == 0)
901 			break;
902 		if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
903 			rev = !rev;
904 		if (rev) {
905 			/* reverse 16 bytes */
906 			buf.d[0] = byterev_8(reg->d[1]);
907 			buf.d[1] = byterev_8(reg->d[0]);
908 			reg = &buf;
909 		}
910 		memcpy(mem, reg, size);
911 		break;
912 	case 8:
913 		/* scalar stores, stxvd2x */
914 		write_size = (size >= 8) ? 8 : size;
915 		i = IS_LE ? 8 : 8 - write_size;
916 		if (size < 8 && op->vsx_flags & VSX_FPCONV) {
917 			buf.d[0] = buf.d[1] = 0;
918 			preempt_disable();
919 			conv_dp_to_sp(&reg->dp[IS_LE], &buf.fp[1 + IS_LE]);
920 			preempt_enable();
921 			reg = &buf;
922 		}
923 		memcpy(mem, &reg->b[i], write_size);
924 		if (size == 16)
925 			memcpy(mem + 8, &reg->d[IS_BE], 8);
926 		if (unlikely(rev)) {
927 			do_byte_reverse(mem, write_size);
928 			if (size == 16)
929 				do_byte_reverse(mem + 8, 8);
930 		}
931 		break;
932 	case 4:
933 		/* stxvw4x */
934 		wp = mem;
935 		for (j = 0; j < size / 4; ++j) {
936 			i = IS_LE ? 3 - j : j;
937 			*wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]);
938 		}
939 		break;
940 	case 2:
941 		/* stxvh8x */
942 		hp = mem;
943 		for (j = 0; j < size / 2; ++j) {
944 			i = IS_LE ? 7 - j : j;
945 			*hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]);
946 		}
947 		break;
948 	case 1:
949 		/* stvxb16x */
950 		bp = mem;
951 		for (j = 0; j < size; ++j) {
952 			i = IS_LE ? 15 - j : j;
953 			*bp++ = reg->b[i];
954 		}
955 		break;
956 	}
957 }
958 EXPORT_SYMBOL_GPL(emulate_vsx_store);
959 NOKPROBE_SYMBOL(emulate_vsx_store);
960 
do_vsx_load(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)961 static nokprobe_inline int do_vsx_load(struct instruction_op *op,
962 				       unsigned long ea, struct pt_regs *regs,
963 				       bool cross_endian)
964 {
965 	int reg = op->reg;
966 	int i, j, nr_vsx_regs;
967 	u8 mem[32];
968 	union vsx_reg buf[2];
969 	int size = GETSIZE(op->type);
970 
971 	if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
972 		return -EFAULT;
973 
974 	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
975 	emulate_vsx_load(op, buf, mem, cross_endian);
976 	preempt_disable();
977 	if (reg < 32) {
978 		/* FP regs + extensions */
979 		if (regs->msr & MSR_FP) {
980 			for (i = 0; i < nr_vsx_regs; i++) {
981 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
982 				load_vsrn(reg + i, &buf[j].v);
983 			}
984 		} else {
985 			for (i = 0; i < nr_vsx_regs; i++) {
986 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
987 				current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
988 				current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
989 			}
990 		}
991 	} else {
992 		if (regs->msr & MSR_VEC) {
993 			for (i = 0; i < nr_vsx_regs; i++) {
994 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
995 				load_vsrn(reg + i, &buf[j].v);
996 			}
997 		} else {
998 			for (i = 0; i < nr_vsx_regs; i++) {
999 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1000 				current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
1001 			}
1002 		}
1003 	}
1004 	preempt_enable();
1005 	return 0;
1006 }
1007 
do_vsx_store(struct instruction_op * op,unsigned long ea,struct pt_regs * regs,bool cross_endian)1008 static nokprobe_inline int do_vsx_store(struct instruction_op *op,
1009 					unsigned long ea, struct pt_regs *regs,
1010 					bool cross_endian)
1011 {
1012 	int reg = op->reg;
1013 	int i, j, nr_vsx_regs;
1014 	u8 mem[32];
1015 	union vsx_reg buf[2];
1016 	int size = GETSIZE(op->type);
1017 
1018 	if (!address_ok(regs, ea, size))
1019 		return -EFAULT;
1020 
1021 	nr_vsx_regs = max(1ul, size / sizeof(__vector128));
1022 	preempt_disable();
1023 	if (reg < 32) {
1024 		/* FP regs + extensions */
1025 		if (regs->msr & MSR_FP) {
1026 			for (i = 0; i < nr_vsx_regs; i++) {
1027 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1028 				store_vsrn(reg + i, &buf[j].v);
1029 			}
1030 		} else {
1031 			for (i = 0; i < nr_vsx_regs; i++) {
1032 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1033 				buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
1034 				buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
1035 			}
1036 		}
1037 	} else {
1038 		if (regs->msr & MSR_VEC) {
1039 			for (i = 0; i < nr_vsx_regs; i++) {
1040 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1041 				store_vsrn(reg + i, &buf[j].v);
1042 			}
1043 		} else {
1044 			for (i = 0; i < nr_vsx_regs; i++) {
1045 				j = IS_LE ? nr_vsx_regs - i - 1 : i;
1046 				buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
1047 			}
1048 		}
1049 	}
1050 	preempt_enable();
1051 	emulate_vsx_store(op, buf, mem, cross_endian);
1052 	return  copy_mem_out(mem, ea, size, regs);
1053 }
1054 #endif /* CONFIG_VSX */
1055 
__emulate_dcbz(unsigned long ea)1056 static __always_inline int __emulate_dcbz(unsigned long ea)
1057 {
1058 	unsigned long i;
1059 	unsigned long size = l1_dcache_bytes();
1060 
1061 	for (i = 0; i < size; i += sizeof(long))
1062 		unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
1063 
1064 	return 0;
1065 
1066 Efault:
1067 	return -EFAULT;
1068 }
1069 
emulate_dcbz(unsigned long ea,struct pt_regs * regs)1070 int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
1071 {
1072 	int err;
1073 	unsigned long size = l1_dcache_bytes();
1074 
1075 	ea = truncate_if_32bit(regs->msr, ea);
1076 	ea &= ~(size - 1);
1077 	if (!address_ok(regs, ea, size))
1078 		return -EFAULT;
1079 
1080 	if (is_kernel_addr(ea)) {
1081 		err = __emulate_dcbz(ea);
1082 	} else if (user_write_access_begin((void __user *)ea, size)) {
1083 		err = __emulate_dcbz(ea);
1084 		user_write_access_end();
1085 	} else {
1086 		err = -EFAULT;
1087 	}
1088 
1089 	if (err)
1090 		regs->dar = ea;
1091 
1092 
1093 	return err;
1094 }
1095 NOKPROBE_SYMBOL(emulate_dcbz);
1096 
1097 #define __put_user_asmx(x, addr, err, op, cr)		\
1098 	__asm__ __volatile__(				\
1099 		".machine push\n"			\
1100 		".machine power8\n"			\
1101 		"1:	" op " %2,0,%3\n"		\
1102 		".machine pop\n"			\
1103 		"	mfcr	%1\n"			\
1104 		"2:\n"					\
1105 		".section .fixup,\"ax\"\n"		\
1106 		"3:	li	%0,%4\n"		\
1107 		"	b	2b\n"			\
1108 		".previous\n"				\
1109 		EX_TABLE(1b, 3b)			\
1110 		: "=r" (err), "=r" (cr)			\
1111 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
1112 
1113 #define __get_user_asmx(x, addr, err, op)		\
1114 	__asm__ __volatile__(				\
1115 		".machine push\n"			\
1116 		".machine power8\n"			\
1117 		"1:	"op" %1,0,%2\n"			\
1118 		".machine pop\n"			\
1119 		"2:\n"					\
1120 		".section .fixup,\"ax\"\n"		\
1121 		"3:	li	%0,%3\n"		\
1122 		"	b	2b\n"			\
1123 		".previous\n"				\
1124 		EX_TABLE(1b, 3b)			\
1125 		: "=r" (err), "=r" (x)			\
1126 		: "r" (addr), "i" (-EFAULT), "0" (err))
1127 
1128 #define __cacheop_user_asmx(addr, err, op)		\
1129 	__asm__ __volatile__(				\
1130 		"1:	"op" 0,%1\n"			\
1131 		"2:\n"					\
1132 		".section .fixup,\"ax\"\n"		\
1133 		"3:	li	%0,%3\n"		\
1134 		"	b	2b\n"			\
1135 		".previous\n"				\
1136 		EX_TABLE(1b, 3b)			\
1137 		: "=r" (err)				\
1138 		: "r" (addr), "i" (-EFAULT), "0" (err))
1139 
set_cr0(const struct pt_regs * regs,struct instruction_op * op)1140 static nokprobe_inline void set_cr0(const struct pt_regs *regs,
1141 				    struct instruction_op *op)
1142 {
1143 	long val = op->val;
1144 
1145 	op->type |= SETCC;
1146 	op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
1147 	if (!(regs->msr & MSR_64BIT))
1148 		val = (int) val;
1149 	if (val < 0)
1150 		op->ccval |= 0x80000000;
1151 	else if (val > 0)
1152 		op->ccval |= 0x40000000;
1153 	else
1154 		op->ccval |= 0x20000000;
1155 }
1156 
set_ca32(struct instruction_op * op,bool val)1157 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val)
1158 {
1159 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1160 		if (val)
1161 			op->xerval |= XER_CA32;
1162 		else
1163 			op->xerval &= ~XER_CA32;
1164 	}
1165 }
1166 
add_with_carry(const struct pt_regs * regs,struct instruction_op * op,int rd,unsigned long val1,unsigned long val2,unsigned long carry_in)1167 static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
1168 				     struct instruction_op *op, int rd,
1169 				     unsigned long val1, unsigned long val2,
1170 				     unsigned long carry_in)
1171 {
1172 	unsigned long val = val1 + val2;
1173 
1174 	if (carry_in)
1175 		++val;
1176 	op->type = COMPUTE | SETREG | SETXER;
1177 	op->reg = rd;
1178 	op->val = val;
1179 	val = truncate_if_32bit(regs->msr, val);
1180 	val1 = truncate_if_32bit(regs->msr, val1);
1181 	op->xerval = regs->xer;
1182 	if (val < val1 || (carry_in && val == val1))
1183 		op->xerval |= XER_CA;
1184 	else
1185 		op->xerval &= ~XER_CA;
1186 
1187 	set_ca32(op, (unsigned int)val < (unsigned int)val1 ||
1188 			(carry_in && (unsigned int)val == (unsigned int)val1));
1189 }
1190 
do_cmp_signed(const struct pt_regs * regs,struct instruction_op * op,long v1,long v2,int crfld)1191 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
1192 					  struct instruction_op *op,
1193 					  long v1, long v2, int crfld)
1194 {
1195 	unsigned int crval, shift;
1196 
1197 	op->type = COMPUTE | SETCC;
1198 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1199 	if (v1 < v2)
1200 		crval |= 8;
1201 	else if (v1 > v2)
1202 		crval |= 4;
1203 	else
1204 		crval |= 2;
1205 	shift = (7 - crfld) * 4;
1206 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1207 }
1208 
do_cmp_unsigned(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2,int crfld)1209 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
1210 					    struct instruction_op *op,
1211 					    unsigned long v1,
1212 					    unsigned long v2, int crfld)
1213 {
1214 	unsigned int crval, shift;
1215 
1216 	op->type = COMPUTE | SETCC;
1217 	crval = (regs->xer >> 31) & 1;		/* get SO bit */
1218 	if (v1 < v2)
1219 		crval |= 8;
1220 	else if (v1 > v2)
1221 		crval |= 4;
1222 	else
1223 		crval |= 2;
1224 	shift = (7 - crfld) * 4;
1225 	op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift);
1226 }
1227 
do_cmpb(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1228 static nokprobe_inline void do_cmpb(const struct pt_regs *regs,
1229 				    struct instruction_op *op,
1230 				    unsigned long v1, unsigned long v2)
1231 {
1232 	unsigned long long out_val, mask;
1233 	int i;
1234 
1235 	out_val = 0;
1236 	for (i = 0; i < 8; i++) {
1237 		mask = 0xffUL << (i * 8);
1238 		if ((v1 & mask) == (v2 & mask))
1239 			out_val |= mask;
1240 	}
1241 	op->val = out_val;
1242 }
1243 
1244 /*
1245  * The size parameter is used to adjust the equivalent popcnt instruction.
1246  * popcntb = 8, popcntw = 32, popcntd = 64
1247  */
do_popcnt(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,int size)1248 static nokprobe_inline void do_popcnt(const struct pt_regs *regs,
1249 				      struct instruction_op *op,
1250 				      unsigned long v1, int size)
1251 {
1252 	unsigned long long out = v1;
1253 
1254 	out -= (out >> 1) & 0x5555555555555555ULL;
1255 	out = (0x3333333333333333ULL & out) +
1256 	      (0x3333333333333333ULL & (out >> 2));
1257 	out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1258 
1259 	if (size == 8) {	/* popcntb */
1260 		op->val = out;
1261 		return;
1262 	}
1263 	out += out >> 8;
1264 	out += out >> 16;
1265 	if (size == 32) {	/* popcntw */
1266 		op->val = out & 0x0000003f0000003fULL;
1267 		return;
1268 	}
1269 
1270 	out = (out + (out >> 32)) & 0x7f;
1271 	op->val = out;	/* popcntd */
1272 }
1273 
1274 #ifdef CONFIG_PPC64
do_bpermd(const struct pt_regs * regs,struct instruction_op * op,unsigned long v1,unsigned long v2)1275 static nokprobe_inline void do_bpermd(const struct pt_regs *regs,
1276 				      struct instruction_op *op,
1277 				      unsigned long v1, unsigned long v2)
1278 {
1279 	unsigned char perm, idx;
1280 	unsigned int i;
1281 
1282 	perm = 0;
1283 	for (i = 0; i < 8; i++) {
1284 		idx = (v1 >> (i * 8)) & 0xff;
1285 		if (idx < 64)
1286 			if (v2 & PPC_BIT(idx))
1287 				perm |= 1 << i;
1288 	}
1289 	op->val = perm;
1290 }
1291 #endif /* CONFIG_PPC64 */
1292 /*
1293  * The size parameter adjusts the equivalent prty instruction.
1294  * prtyw = 32, prtyd = 64
1295  */
do_prty(const struct pt_regs * regs,struct instruction_op * op,unsigned long v,int size)1296 static nokprobe_inline void do_prty(const struct pt_regs *regs,
1297 				    struct instruction_op *op,
1298 				    unsigned long v, int size)
1299 {
1300 	unsigned long long res = v ^ (v >> 8);
1301 
1302 	res ^= res >> 16;
1303 	if (size == 32) {		/* prtyw */
1304 		op->val = res & 0x0000000100000001ULL;
1305 		return;
1306 	}
1307 
1308 	res ^= res >> 32;
1309 	op->val = res & 1;	/*prtyd */
1310 }
1311 
trap_compare(long v1,long v2)1312 static nokprobe_inline int trap_compare(long v1, long v2)
1313 {
1314 	int ret = 0;
1315 
1316 	if (v1 < v2)
1317 		ret |= 0x10;
1318 	else if (v1 > v2)
1319 		ret |= 0x08;
1320 	else
1321 		ret |= 0x04;
1322 	if ((unsigned long)v1 < (unsigned long)v2)
1323 		ret |= 0x02;
1324 	else if ((unsigned long)v1 > (unsigned long)v2)
1325 		ret |= 0x01;
1326 	return ret;
1327 }
1328 
1329 /*
1330  * Elements of 32-bit rotate and mask instructions.
1331  */
1332 #define MASK32(mb, me)	((0xffffffffUL >> (mb)) + \
1333 			 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
1334 #ifdef __powerpc64__
1335 #define MASK64_L(mb)	(~0UL >> (mb))
1336 #define MASK64_R(me)	((signed long)-0x8000000000000000L >> (me))
1337 #define MASK64(mb, me)	(MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
1338 #define DATA32(x)	(((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
1339 #else
1340 #define DATA32(x)	(x)
1341 #endif
1342 #define ROTATE(x, n)	((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
1343 
1344 /*
1345  * Decode an instruction, and return information about it in *op
1346  * without changing *regs.
1347  * Integer arithmetic and logical instructions, branches, and barrier
1348  * instructions can be emulated just using the information in *op.
1349  *
1350  * Return value is 1 if the instruction can be emulated just by
1351  * updating *regs with the information in *op, -1 if we need the
1352  * GPRs but *regs doesn't contain the full register set, or 0
1353  * otherwise.
1354  */
analyse_instr(struct instruction_op * op,const struct pt_regs * regs,ppc_inst_t instr)1355 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
1356 		  ppc_inst_t instr)
1357 {
1358 #ifdef CONFIG_PPC64
1359 	unsigned int suffixopcode, prefixtype, prefix_r;
1360 #endif
1361 	unsigned int opcode, ra, rb, rc, rd, spr, u;
1362 	unsigned long int imm;
1363 	unsigned long int val, val2;
1364 	unsigned int mb, me, sh;
1365 	unsigned int word, suffix;
1366 	long ival;
1367 
1368 	word = ppc_inst_val(instr);
1369 	suffix = ppc_inst_suffix(instr);
1370 
1371 	op->type = COMPUTE;
1372 
1373 	opcode = ppc_inst_primary_opcode(instr);
1374 	switch (opcode) {
1375 	case 16:	/* bc */
1376 		op->type = BRANCH;
1377 		imm = (signed short)(word & 0xfffc);
1378 		if ((word & 2) == 0)
1379 			imm += regs->nip;
1380 		op->val = truncate_if_32bit(regs->msr, imm);
1381 		if (word & 1)
1382 			op->type |= SETLK;
1383 		if (branch_taken(word, regs, op))
1384 			op->type |= BRTAKEN;
1385 		return 1;
1386 	case 17:	/* sc */
1387 		if ((word & 0xfe2) == 2)
1388 			op->type = SYSCALL;
1389 		else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
1390 				(word & 0xfe3) == 1) {	/* scv */
1391 			op->type = SYSCALL_VECTORED_0;
1392 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1393 				goto unknown_opcode;
1394 		} else
1395 			op->type = UNKNOWN;
1396 		return 0;
1397 	case 18:	/* b */
1398 		op->type = BRANCH | BRTAKEN;
1399 		imm = word & 0x03fffffc;
1400 		if (imm & 0x02000000)
1401 			imm -= 0x04000000;
1402 		if ((word & 2) == 0)
1403 			imm += regs->nip;
1404 		op->val = truncate_if_32bit(regs->msr, imm);
1405 		if (word & 1)
1406 			op->type |= SETLK;
1407 		return 1;
1408 	case 19:
1409 		switch ((word >> 1) & 0x3ff) {
1410 		case 0:		/* mcrf */
1411 			op->type = COMPUTE + SETCC;
1412 			rd = 7 - ((word >> 23) & 0x7);
1413 			ra = 7 - ((word >> 18) & 0x7);
1414 			rd *= 4;
1415 			ra *= 4;
1416 			val = (regs->ccr >> ra) & 0xf;
1417 			op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
1418 			return 1;
1419 
1420 		case 16:	/* bclr */
1421 		case 528:	/* bcctr */
1422 			op->type = BRANCH;
1423 			imm = (word & 0x400)? regs->ctr: regs->link;
1424 			op->val = truncate_if_32bit(regs->msr, imm);
1425 			if (word & 1)
1426 				op->type |= SETLK;
1427 			if (branch_taken(word, regs, op))
1428 				op->type |= BRTAKEN;
1429 			return 1;
1430 
1431 		case 18:	/* rfid, scary */
1432 			if (regs->msr & MSR_PR)
1433 				goto priv;
1434 			op->type = RFI;
1435 			return 0;
1436 
1437 		case 150:	/* isync */
1438 			op->type = BARRIER | BARRIER_ISYNC;
1439 			return 1;
1440 
1441 		case 33:	/* crnor */
1442 		case 129:	/* crandc */
1443 		case 193:	/* crxor */
1444 		case 225:	/* crnand */
1445 		case 257:	/* crand */
1446 		case 289:	/* creqv */
1447 		case 417:	/* crorc */
1448 		case 449:	/* cror */
1449 			op->type = COMPUTE + SETCC;
1450 			ra = (word >> 16) & 0x1f;
1451 			rb = (word >> 11) & 0x1f;
1452 			rd = (word >> 21) & 0x1f;
1453 			ra = (regs->ccr >> (31 - ra)) & 1;
1454 			rb = (regs->ccr >> (31 - rb)) & 1;
1455 			val = (word >> (6 + ra * 2 + rb)) & 1;
1456 			op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
1457 				(val << (31 - rd));
1458 			return 1;
1459 		}
1460 		break;
1461 	case 31:
1462 		switch ((word >> 1) & 0x3ff) {
1463 		case 598:	/* sync */
1464 			op->type = BARRIER + BARRIER_SYNC;
1465 #ifdef __powerpc64__
1466 			switch ((word >> 21) & 3) {
1467 			case 1:		/* lwsync */
1468 				op->type = BARRIER + BARRIER_LWSYNC;
1469 				break;
1470 			case 2:		/* ptesync */
1471 				op->type = BARRIER + BARRIER_PTESYNC;
1472 				break;
1473 			}
1474 #endif
1475 			return 1;
1476 
1477 		case 854:	/* eieio */
1478 			op->type = BARRIER + BARRIER_EIEIO;
1479 			return 1;
1480 		}
1481 		break;
1482 	}
1483 
1484 	rd = (word >> 21) & 0x1f;
1485 	ra = (word >> 16) & 0x1f;
1486 	rb = (word >> 11) & 0x1f;
1487 	rc = (word >> 6) & 0x1f;
1488 
1489 	switch (opcode) {
1490 #ifdef __powerpc64__
1491 	case 1:
1492 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
1493 			goto unknown_opcode;
1494 
1495 		prefix_r = GET_PREFIX_R(word);
1496 		ra = GET_PREFIX_RA(suffix);
1497 		rd = (suffix >> 21) & 0x1f;
1498 		op->reg = rd;
1499 		op->val = regs->gpr[rd];
1500 		suffixopcode = get_op(suffix);
1501 		prefixtype = (word >> 24) & 0x3;
1502 		switch (prefixtype) {
1503 		case 2:
1504 			if (prefix_r && ra)
1505 				return 0;
1506 			switch (suffixopcode) {
1507 			case 14:	/* paddi */
1508 				op->type = COMPUTE | PREFIXED;
1509 				op->val = mlsd_8lsd_ea(word, suffix, regs);
1510 				goto compute_done;
1511 			}
1512 		}
1513 		break;
1514 	case 2:		/* tdi */
1515 		if (rd & trap_compare(regs->gpr[ra], (short) word))
1516 			goto trap;
1517 		return 1;
1518 #endif
1519 	case 3:		/* twi */
1520 		if (rd & trap_compare((int)regs->gpr[ra], (short) word))
1521 			goto trap;
1522 		return 1;
1523 
1524 #ifdef __powerpc64__
1525 	case 4:
1526 		/*
1527 		 * There are very many instructions with this primary opcode
1528 		 * introduced in the ISA as early as v2.03. However, the ones
1529 		 * we currently emulate were all introduced with ISA 3.0
1530 		 */
1531 		if (!cpu_has_feature(CPU_FTR_ARCH_300))
1532 			goto unknown_opcode;
1533 
1534 		switch (word & 0x3f) {
1535 		case 48:	/* maddhd */
1536 			asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
1537 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1538 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1539 			goto compute_done;
1540 
1541 		case 49:	/* maddhdu */
1542 			asm volatile(PPC_MADDHDU(%0, %1, %2, %3) :
1543 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1544 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1545 			goto compute_done;
1546 
1547 		case 51:	/* maddld */
1548 			asm volatile(PPC_MADDLD(%0, %1, %2, %3) :
1549 				     "=r" (op->val) : "r" (regs->gpr[ra]),
1550 				     "r" (regs->gpr[rb]), "r" (regs->gpr[rc]));
1551 			goto compute_done;
1552 		}
1553 
1554 		/*
1555 		 * There are other instructions from ISA 3.0 with the same
1556 		 * primary opcode which do not have emulation support yet.
1557 		 */
1558 		goto unknown_opcode;
1559 #endif
1560 
1561 	case 7:		/* mulli */
1562 		op->val = regs->gpr[ra] * (short) word;
1563 		goto compute_done;
1564 
1565 	case 8:		/* subfic */
1566 		imm = (short) word;
1567 		add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
1568 		return 1;
1569 
1570 	case 10:	/* cmpli */
1571 		imm = (unsigned short) word;
1572 		val = regs->gpr[ra];
1573 #ifdef __powerpc64__
1574 		if ((rd & 1) == 0)
1575 			val = (unsigned int) val;
1576 #endif
1577 		do_cmp_unsigned(regs, op, val, imm, rd >> 2);
1578 		return 1;
1579 
1580 	case 11:	/* cmpi */
1581 		imm = (short) word;
1582 		val = regs->gpr[ra];
1583 #ifdef __powerpc64__
1584 		if ((rd & 1) == 0)
1585 			val = (int) val;
1586 #endif
1587 		do_cmp_signed(regs, op, val, imm, rd >> 2);
1588 		return 1;
1589 
1590 	case 12:	/* addic */
1591 		imm = (short) word;
1592 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1593 		return 1;
1594 
1595 	case 13:	/* addic. */
1596 		imm = (short) word;
1597 		add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
1598 		set_cr0(regs, op);
1599 		return 1;
1600 
1601 	case 14:	/* addi */
1602 		imm = (short) word;
1603 		if (ra)
1604 			imm += regs->gpr[ra];
1605 		op->val = imm;
1606 		goto compute_done;
1607 
1608 	case 15:	/* addis */
1609 		imm = ((short) word) << 16;
1610 		if (ra)
1611 			imm += regs->gpr[ra];
1612 		op->val = imm;
1613 		goto compute_done;
1614 
1615 	case 19:
1616 		if (((word >> 1) & 0x1f) == 2) {
1617 			/* addpcis */
1618 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1619 				goto unknown_opcode;
1620 			imm = (short) (word & 0xffc1);	/* d0 + d2 fields */
1621 			imm |= (word >> 15) & 0x3e;	/* d1 field */
1622 			op->val = regs->nip + (imm << 16) + 4;
1623 			goto compute_done;
1624 		}
1625 		op->type = UNKNOWN;
1626 		return 0;
1627 
1628 	case 20:	/* rlwimi */
1629 		mb = (word >> 6) & 0x1f;
1630 		me = (word >> 1) & 0x1f;
1631 		val = DATA32(regs->gpr[rd]);
1632 		imm = MASK32(mb, me);
1633 		op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
1634 		goto logical_done;
1635 
1636 	case 21:	/* rlwinm */
1637 		mb = (word >> 6) & 0x1f;
1638 		me = (word >> 1) & 0x1f;
1639 		val = DATA32(regs->gpr[rd]);
1640 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1641 		goto logical_done;
1642 
1643 	case 23:	/* rlwnm */
1644 		mb = (word >> 6) & 0x1f;
1645 		me = (word >> 1) & 0x1f;
1646 		rb = regs->gpr[rb] & 0x1f;
1647 		val = DATA32(regs->gpr[rd]);
1648 		op->val = ROTATE(val, rb) & MASK32(mb, me);
1649 		goto logical_done;
1650 
1651 	case 24:	/* ori */
1652 		op->val = regs->gpr[rd] | (unsigned short) word;
1653 		goto logical_done_nocc;
1654 
1655 	case 25:	/* oris */
1656 		imm = (unsigned short) word;
1657 		op->val = regs->gpr[rd] | (imm << 16);
1658 		goto logical_done_nocc;
1659 
1660 	case 26:	/* xori */
1661 		op->val = regs->gpr[rd] ^ (unsigned short) word;
1662 		goto logical_done_nocc;
1663 
1664 	case 27:	/* xoris */
1665 		imm = (unsigned short) word;
1666 		op->val = regs->gpr[rd] ^ (imm << 16);
1667 		goto logical_done_nocc;
1668 
1669 	case 28:	/* andi. */
1670 		op->val = regs->gpr[rd] & (unsigned short) word;
1671 		set_cr0(regs, op);
1672 		goto logical_done_nocc;
1673 
1674 	case 29:	/* andis. */
1675 		imm = (unsigned short) word;
1676 		op->val = regs->gpr[rd] & (imm << 16);
1677 		set_cr0(regs, op);
1678 		goto logical_done_nocc;
1679 
1680 #ifdef __powerpc64__
1681 	case 30:	/* rld* */
1682 		mb = ((word >> 6) & 0x1f) | (word & 0x20);
1683 		val = regs->gpr[rd];
1684 		if ((word & 0x10) == 0) {
1685 			sh = rb | ((word & 2) << 4);
1686 			val = ROTATE(val, sh);
1687 			switch ((word >> 2) & 3) {
1688 			case 0:		/* rldicl */
1689 				val &= MASK64_L(mb);
1690 				break;
1691 			case 1:		/* rldicr */
1692 				val &= MASK64_R(mb);
1693 				break;
1694 			case 2:		/* rldic */
1695 				val &= MASK64(mb, 63 - sh);
1696 				break;
1697 			case 3:		/* rldimi */
1698 				imm = MASK64(mb, 63 - sh);
1699 				val = (regs->gpr[ra] & ~imm) |
1700 					(val & imm);
1701 			}
1702 			op->val = val;
1703 			goto logical_done;
1704 		} else {
1705 			sh = regs->gpr[rb] & 0x3f;
1706 			val = ROTATE(val, sh);
1707 			switch ((word >> 1) & 7) {
1708 			case 0:		/* rldcl */
1709 				op->val = val & MASK64_L(mb);
1710 				goto logical_done;
1711 			case 1:		/* rldcr */
1712 				op->val = val & MASK64_R(mb);
1713 				goto logical_done;
1714 			}
1715 		}
1716 #endif
1717 		op->type = UNKNOWN;	/* illegal instruction */
1718 		return 0;
1719 
1720 	case 31:
1721 		/* isel occupies 32 minor opcodes */
1722 		if (((word >> 1) & 0x1f) == 15) {
1723 			mb = (word >> 6) & 0x1f; /* bc field */
1724 			val = (regs->ccr >> (31 - mb)) & 1;
1725 			val2 = (ra) ? regs->gpr[ra] : 0;
1726 
1727 			op->val = (val) ? val2 : regs->gpr[rb];
1728 			goto compute_done;
1729 		}
1730 
1731 		switch ((word >> 1) & 0x3ff) {
1732 		case 4:		/* tw */
1733 			if (rd == 0x1f ||
1734 			    (rd & trap_compare((int)regs->gpr[ra],
1735 					       (int)regs->gpr[rb])))
1736 				goto trap;
1737 			return 1;
1738 #ifdef __powerpc64__
1739 		case 68:	/* td */
1740 			if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
1741 				goto trap;
1742 			return 1;
1743 #endif
1744 		case 83:	/* mfmsr */
1745 			if (regs->msr & MSR_PR)
1746 				goto priv;
1747 			op->type = MFMSR;
1748 			op->reg = rd;
1749 			return 0;
1750 		case 146:	/* mtmsr */
1751 			if (regs->msr & MSR_PR)
1752 				goto priv;
1753 			op->type = MTMSR;
1754 			op->reg = rd;
1755 			op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
1756 			return 0;
1757 #ifdef CONFIG_PPC64
1758 		case 178:	/* mtmsrd */
1759 			if (regs->msr & MSR_PR)
1760 				goto priv;
1761 			op->type = MTMSR;
1762 			op->reg = rd;
1763 			/* only MSR_EE and MSR_RI get changed if bit 15 set */
1764 			/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
1765 			imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
1766 			op->val = imm;
1767 			return 0;
1768 #endif
1769 
1770 		case 19:	/* mfcr */
1771 			imm = 0xffffffffUL;
1772 			if ((word >> 20) & 1) {
1773 				imm = 0xf0000000UL;
1774 				for (sh = 0; sh < 8; ++sh) {
1775 					if (word & (0x80000 >> sh))
1776 						break;
1777 					imm >>= 4;
1778 				}
1779 			}
1780 			op->val = regs->ccr & imm;
1781 			goto compute_done;
1782 
1783 		case 128:	/* setb */
1784 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1785 				goto unknown_opcode;
1786 			/*
1787 			 * 'ra' encodes the CR field number (bfa) in the top 3 bits.
1788 			 * Since each CR field is 4 bits,
1789 			 * we can simply mask off the bottom two bits (bfa * 4)
1790 			 * to yield the first bit in the CR field.
1791 			 */
1792 			ra = ra & ~0x3;
1793 			/* 'val' stores bits of the CR field (bfa) */
1794 			val = regs->ccr >> (CR0_SHIFT - ra);
1795 			/* checks if the LT bit of CR field (bfa) is set */
1796 			if (val & 8)
1797 				op->val = -1;
1798 			/* checks if the GT bit of CR field (bfa) is set */
1799 			else if (val & 4)
1800 				op->val = 1;
1801 			else
1802 				op->val = 0;
1803 			goto compute_done;
1804 
1805 		case 144:	/* mtcrf */
1806 			op->type = COMPUTE + SETCC;
1807 			imm = 0xf0000000UL;
1808 			val = regs->gpr[rd];
1809 			op->ccval = regs->ccr;
1810 			for (sh = 0; sh < 8; ++sh) {
1811 				if (word & (0x80000 >> sh))
1812 					op->ccval = (op->ccval & ~imm) |
1813 						(val & imm);
1814 				imm >>= 4;
1815 			}
1816 			return 1;
1817 
1818 		case 339:	/* mfspr */
1819 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1820 			op->type = MFSPR;
1821 			op->reg = rd;
1822 			op->spr = spr;
1823 			if (spr == SPRN_XER || spr == SPRN_LR ||
1824 			    spr == SPRN_CTR)
1825 				return 1;
1826 			return 0;
1827 
1828 		case 467:	/* mtspr */
1829 			spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
1830 			op->type = MTSPR;
1831 			op->val = regs->gpr[rd];
1832 			op->spr = spr;
1833 			if (spr == SPRN_XER || spr == SPRN_LR ||
1834 			    spr == SPRN_CTR)
1835 				return 1;
1836 			return 0;
1837 
1838 /*
1839  * Compare instructions
1840  */
1841 		case 0:	/* cmp */
1842 			val = regs->gpr[ra];
1843 			val2 = regs->gpr[rb];
1844 #ifdef __powerpc64__
1845 			if ((rd & 1) == 0) {
1846 				/* word (32-bit) compare */
1847 				val = (int) val;
1848 				val2 = (int) val2;
1849 			}
1850 #endif
1851 			do_cmp_signed(regs, op, val, val2, rd >> 2);
1852 			return 1;
1853 
1854 		case 32:	/* cmpl */
1855 			val = regs->gpr[ra];
1856 			val2 = regs->gpr[rb];
1857 #ifdef __powerpc64__
1858 			if ((rd & 1) == 0) {
1859 				/* word (32-bit) compare */
1860 				val = (unsigned int) val;
1861 				val2 = (unsigned int) val2;
1862 			}
1863 #endif
1864 			do_cmp_unsigned(regs, op, val, val2, rd >> 2);
1865 			return 1;
1866 
1867 		case 508: /* cmpb */
1868 			do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]);
1869 			goto logical_done_nocc;
1870 
1871 /*
1872  * Arithmetic instructions
1873  */
1874 		case 8:	/* subfc */
1875 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1876 				       regs->gpr[rb], 1);
1877 			goto arith_done;
1878 #ifdef __powerpc64__
1879 		case 9:	/* mulhdu */
1880 			asm("mulhdu %0,%1,%2" : "=r" (op->val) :
1881 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1882 			goto arith_done;
1883 #endif
1884 		case 10:	/* addc */
1885 			add_with_carry(regs, op, rd, regs->gpr[ra],
1886 				       regs->gpr[rb], 0);
1887 			goto arith_done;
1888 
1889 		case 11:	/* mulhwu */
1890 			asm("mulhwu %0,%1,%2" : "=r" (op->val) :
1891 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1892 			goto arith_done;
1893 
1894 		case 40:	/* subf */
1895 			op->val = regs->gpr[rb] - regs->gpr[ra];
1896 			goto arith_done;
1897 #ifdef __powerpc64__
1898 		case 73:	/* mulhd */
1899 			asm("mulhd %0,%1,%2" : "=r" (op->val) :
1900 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1901 			goto arith_done;
1902 #endif
1903 		case 75:	/* mulhw */
1904 			asm("mulhw %0,%1,%2" : "=r" (op->val) :
1905 			    "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1906 			goto arith_done;
1907 
1908 		case 104:	/* neg */
1909 			op->val = -regs->gpr[ra];
1910 			goto arith_done;
1911 
1912 		case 136:	/* subfe */
1913 			add_with_carry(regs, op, rd, ~regs->gpr[ra],
1914 				       regs->gpr[rb], regs->xer & XER_CA);
1915 			goto arith_done;
1916 
1917 		case 138:	/* adde */
1918 			add_with_carry(regs, op, rd, regs->gpr[ra],
1919 				       regs->gpr[rb], regs->xer & XER_CA);
1920 			goto arith_done;
1921 
1922 		case 200:	/* subfze */
1923 			add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L,
1924 				       regs->xer & XER_CA);
1925 			goto arith_done;
1926 
1927 		case 202:	/* addze */
1928 			add_with_carry(regs, op, rd, regs->gpr[ra], 0L,
1929 				       regs->xer & XER_CA);
1930 			goto arith_done;
1931 
1932 		case 232:	/* subfme */
1933 			add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L,
1934 				       regs->xer & XER_CA);
1935 			goto arith_done;
1936 #ifdef __powerpc64__
1937 		case 233:	/* mulld */
1938 			op->val = regs->gpr[ra] * regs->gpr[rb];
1939 			goto arith_done;
1940 #endif
1941 		case 234:	/* addme */
1942 			add_with_carry(regs, op, rd, regs->gpr[ra], -1L,
1943 				       regs->xer & XER_CA);
1944 			goto arith_done;
1945 
1946 		case 235:	/* mullw */
1947 			op->val = (long)(int) regs->gpr[ra] *
1948 				(int) regs->gpr[rb];
1949 
1950 			goto arith_done;
1951 #ifdef __powerpc64__
1952 		case 265:	/* modud */
1953 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1954 				goto unknown_opcode;
1955 			op->val = regs->gpr[ra] % regs->gpr[rb];
1956 			goto compute_done;
1957 #endif
1958 		case 266:	/* add */
1959 			op->val = regs->gpr[ra] + regs->gpr[rb];
1960 			goto arith_done;
1961 
1962 		case 267:	/* moduw */
1963 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
1964 				goto unknown_opcode;
1965 			op->val = (unsigned int) regs->gpr[ra] %
1966 				(unsigned int) regs->gpr[rb];
1967 			goto compute_done;
1968 #ifdef __powerpc64__
1969 		case 457:	/* divdu */
1970 			op->val = regs->gpr[ra] / regs->gpr[rb];
1971 			goto arith_done;
1972 #endif
1973 		case 459:	/* divwu */
1974 			op->val = (unsigned int) regs->gpr[ra] /
1975 				(unsigned int) regs->gpr[rb];
1976 			goto arith_done;
1977 #ifdef __powerpc64__
1978 		case 489:	/* divd */
1979 			op->val = (long int) regs->gpr[ra] /
1980 				(long int) regs->gpr[rb];
1981 			goto arith_done;
1982 #endif
1983 		case 491:	/* divw */
1984 			op->val = (int) regs->gpr[ra] /
1985 				(int) regs->gpr[rb];
1986 			goto arith_done;
1987 #ifdef __powerpc64__
1988 		case 425:	/* divde[.] */
1989 			asm volatile(PPC_DIVDE(%0, %1, %2) :
1990 				"=r" (op->val) : "r" (regs->gpr[ra]),
1991 				"r" (regs->gpr[rb]));
1992 			goto arith_done;
1993 		case 393:	/* divdeu[.] */
1994 			asm volatile(PPC_DIVDEU(%0, %1, %2) :
1995 				"=r" (op->val) : "r" (regs->gpr[ra]),
1996 				"r" (regs->gpr[rb]));
1997 			goto arith_done;
1998 #endif
1999 		case 755:	/* darn */
2000 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2001 				goto unknown_opcode;
2002 			switch (ra & 0x3) {
2003 			case 0:
2004 				/* 32-bit conditioned */
2005 				asm volatile(PPC_DARN(%0, 0) : "=r" (op->val));
2006 				goto compute_done;
2007 
2008 			case 1:
2009 				/* 64-bit conditioned */
2010 				asm volatile(PPC_DARN(%0, 1) : "=r" (op->val));
2011 				goto compute_done;
2012 
2013 			case 2:
2014 				/* 64-bit raw */
2015 				asm volatile(PPC_DARN(%0, 2) : "=r" (op->val));
2016 				goto compute_done;
2017 			}
2018 
2019 			goto unknown_opcode;
2020 #ifdef __powerpc64__
2021 		case 777:	/* modsd */
2022 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2023 				goto unknown_opcode;
2024 			op->val = (long int) regs->gpr[ra] %
2025 				(long int) regs->gpr[rb];
2026 			goto compute_done;
2027 #endif
2028 		case 779:	/* modsw */
2029 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2030 				goto unknown_opcode;
2031 			op->val = (int) regs->gpr[ra] %
2032 				(int) regs->gpr[rb];
2033 			goto compute_done;
2034 
2035 
2036 /*
2037  * Logical instructions
2038  */
2039 		case 26:	/* cntlzw */
2040 			val = (unsigned int) regs->gpr[rd];
2041 			op->val = ( val ? __builtin_clz(val) : 32 );
2042 			goto logical_done;
2043 #ifdef __powerpc64__
2044 		case 58:	/* cntlzd */
2045 			val = regs->gpr[rd];
2046 			op->val = ( val ? __builtin_clzl(val) : 64 );
2047 			goto logical_done;
2048 #endif
2049 		case 28:	/* and */
2050 			op->val = regs->gpr[rd] & regs->gpr[rb];
2051 			goto logical_done;
2052 
2053 		case 60:	/* andc */
2054 			op->val = regs->gpr[rd] & ~regs->gpr[rb];
2055 			goto logical_done;
2056 
2057 		case 122:	/* popcntb */
2058 			do_popcnt(regs, op, regs->gpr[rd], 8);
2059 			goto logical_done_nocc;
2060 
2061 		case 124:	/* nor */
2062 			op->val = ~(regs->gpr[rd] | regs->gpr[rb]);
2063 			goto logical_done;
2064 
2065 		case 154:	/* prtyw */
2066 			do_prty(regs, op, regs->gpr[rd], 32);
2067 			goto logical_done_nocc;
2068 
2069 		case 186:	/* prtyd */
2070 			do_prty(regs, op, regs->gpr[rd], 64);
2071 			goto logical_done_nocc;
2072 #ifdef CONFIG_PPC64
2073 		case 252:	/* bpermd */
2074 			do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]);
2075 			goto logical_done_nocc;
2076 #endif
2077 		case 284:	/* xor */
2078 			op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]);
2079 			goto logical_done;
2080 
2081 		case 316:	/* xor */
2082 			op->val = regs->gpr[rd] ^ regs->gpr[rb];
2083 			goto logical_done;
2084 
2085 		case 378:	/* popcntw */
2086 			do_popcnt(regs, op, regs->gpr[rd], 32);
2087 			goto logical_done_nocc;
2088 
2089 		case 412:	/* orc */
2090 			op->val = regs->gpr[rd] | ~regs->gpr[rb];
2091 			goto logical_done;
2092 
2093 		case 444:	/* or */
2094 			op->val = regs->gpr[rd] | regs->gpr[rb];
2095 			goto logical_done;
2096 
2097 		case 476:	/* nand */
2098 			op->val = ~(regs->gpr[rd] & regs->gpr[rb]);
2099 			goto logical_done;
2100 #ifdef CONFIG_PPC64
2101 		case 506:	/* popcntd */
2102 			do_popcnt(regs, op, regs->gpr[rd], 64);
2103 			goto logical_done_nocc;
2104 #endif
2105 		case 538:	/* cnttzw */
2106 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2107 				goto unknown_opcode;
2108 			val = (unsigned int) regs->gpr[rd];
2109 			op->val = (val ? __builtin_ctz(val) : 32);
2110 			goto logical_done;
2111 #ifdef __powerpc64__
2112 		case 570:	/* cnttzd */
2113 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2114 				goto unknown_opcode;
2115 			val = regs->gpr[rd];
2116 			op->val = (val ? __builtin_ctzl(val) : 64);
2117 			goto logical_done;
2118 #endif
2119 		case 922:	/* extsh */
2120 			op->val = (signed short) regs->gpr[rd];
2121 			goto logical_done;
2122 
2123 		case 954:	/* extsb */
2124 			op->val = (signed char) regs->gpr[rd];
2125 			goto logical_done;
2126 #ifdef __powerpc64__
2127 		case 986:	/* extsw */
2128 			op->val = (signed int) regs->gpr[rd];
2129 			goto logical_done;
2130 #endif
2131 
2132 /*
2133  * Shift instructions
2134  */
2135 		case 24:	/* slw */
2136 			sh = regs->gpr[rb] & 0x3f;
2137 			if (sh < 32)
2138 				op->val = (regs->gpr[rd] << sh) & 0xffffffffUL;
2139 			else
2140 				op->val = 0;
2141 			goto logical_done;
2142 
2143 		case 536:	/* srw */
2144 			sh = regs->gpr[rb] & 0x3f;
2145 			if (sh < 32)
2146 				op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh;
2147 			else
2148 				op->val = 0;
2149 			goto logical_done;
2150 
2151 		case 792:	/* sraw */
2152 			op->type = COMPUTE + SETREG + SETXER;
2153 			sh = regs->gpr[rb] & 0x3f;
2154 			ival = (signed int) regs->gpr[rd];
2155 			op->val = ival >> (sh < 32 ? sh : 31);
2156 			op->xerval = regs->xer;
2157 			if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
2158 				op->xerval |= XER_CA;
2159 			else
2160 				op->xerval &= ~XER_CA;
2161 			set_ca32(op, op->xerval & XER_CA);
2162 			goto logical_done;
2163 
2164 		case 824:	/* srawi */
2165 			op->type = COMPUTE + SETREG + SETXER;
2166 			sh = rb;
2167 			ival = (signed int) regs->gpr[rd];
2168 			op->val = ival >> sh;
2169 			op->xerval = regs->xer;
2170 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2171 				op->xerval |= XER_CA;
2172 			else
2173 				op->xerval &= ~XER_CA;
2174 			set_ca32(op, op->xerval & XER_CA);
2175 			goto logical_done;
2176 
2177 #ifdef __powerpc64__
2178 		case 27:	/* sld */
2179 			sh = regs->gpr[rb] & 0x7f;
2180 			if (sh < 64)
2181 				op->val = regs->gpr[rd] << sh;
2182 			else
2183 				op->val = 0;
2184 			goto logical_done;
2185 
2186 		case 539:	/* srd */
2187 			sh = regs->gpr[rb] & 0x7f;
2188 			if (sh < 64)
2189 				op->val = regs->gpr[rd] >> sh;
2190 			else
2191 				op->val = 0;
2192 			goto logical_done;
2193 
2194 		case 794:	/* srad */
2195 			op->type = COMPUTE + SETREG + SETXER;
2196 			sh = regs->gpr[rb] & 0x7f;
2197 			ival = (signed long int) regs->gpr[rd];
2198 			op->val = ival >> (sh < 64 ? sh : 63);
2199 			op->xerval = regs->xer;
2200 			if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
2201 				op->xerval |= XER_CA;
2202 			else
2203 				op->xerval &= ~XER_CA;
2204 			set_ca32(op, op->xerval & XER_CA);
2205 			goto logical_done;
2206 
2207 		case 826:	/* sradi with sh_5 = 0 */
2208 		case 827:	/* sradi with sh_5 = 1 */
2209 			op->type = COMPUTE + SETREG + SETXER;
2210 			sh = rb | ((word & 2) << 4);
2211 			ival = (signed long int) regs->gpr[rd];
2212 			op->val = ival >> sh;
2213 			op->xerval = regs->xer;
2214 			if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
2215 				op->xerval |= XER_CA;
2216 			else
2217 				op->xerval &= ~XER_CA;
2218 			set_ca32(op, op->xerval & XER_CA);
2219 			goto logical_done;
2220 
2221 		case 890:	/* extswsli with sh_5 = 0 */
2222 		case 891:	/* extswsli with sh_5 = 1 */
2223 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2224 				goto unknown_opcode;
2225 			op->type = COMPUTE + SETREG;
2226 			sh = rb | ((word & 2) << 4);
2227 			val = (signed int) regs->gpr[rd];
2228 			if (sh)
2229 				op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
2230 			else
2231 				op->val = val;
2232 			goto logical_done;
2233 
2234 #endif /* __powerpc64__ */
2235 
2236 /*
2237  * Cache instructions
2238  */
2239 		case 54:	/* dcbst */
2240 			op->type = MKOP(CACHEOP, DCBST, 0);
2241 			op->ea = xform_ea(word, regs);
2242 			return 0;
2243 
2244 		case 86:	/* dcbf */
2245 			op->type = MKOP(CACHEOP, DCBF, 0);
2246 			op->ea = xform_ea(word, regs);
2247 			return 0;
2248 
2249 		case 246:	/* dcbtst */
2250 			op->type = MKOP(CACHEOP, DCBTST, 0);
2251 			op->ea = xform_ea(word, regs);
2252 			op->reg = rd;
2253 			return 0;
2254 
2255 		case 278:	/* dcbt */
2256 			op->type = MKOP(CACHEOP, DCBTST, 0);
2257 			op->ea = xform_ea(word, regs);
2258 			op->reg = rd;
2259 			return 0;
2260 
2261 		case 982:	/* icbi */
2262 			op->type = MKOP(CACHEOP, ICBI, 0);
2263 			op->ea = xform_ea(word, regs);
2264 			return 0;
2265 
2266 		case 1014:	/* dcbz */
2267 			op->type = MKOP(CACHEOP, DCBZ, 0);
2268 			op->ea = xform_ea(word, regs);
2269 			return 0;
2270 		}
2271 		break;
2272 	}
2273 
2274 /*
2275  * Loads and stores.
2276  */
2277 	op->type = UNKNOWN;
2278 	op->update_reg = ra;
2279 	op->reg = rd;
2280 	op->val = regs->gpr[rd];
2281 	u = (word >> 20) & UPDATE;
2282 	op->vsx_flags = 0;
2283 
2284 	switch (opcode) {
2285 	case 31:
2286 		u = word & UPDATE;
2287 		op->ea = xform_ea(word, regs);
2288 		switch ((word >> 1) & 0x3ff) {
2289 		case 20:	/* lwarx */
2290 			op->type = MKOP(LARX, 0, 4);
2291 			break;
2292 
2293 		case 150:	/* stwcx. */
2294 			op->type = MKOP(STCX, 0, 4);
2295 			break;
2296 
2297 #ifdef CONFIG_PPC_HAS_LBARX_LHARX
2298 		case 52:	/* lbarx */
2299 			op->type = MKOP(LARX, 0, 1);
2300 			break;
2301 
2302 		case 694:	/* stbcx. */
2303 			op->type = MKOP(STCX, 0, 1);
2304 			break;
2305 
2306 		case 116:	/* lharx */
2307 			op->type = MKOP(LARX, 0, 2);
2308 			break;
2309 
2310 		case 726:	/* sthcx. */
2311 			op->type = MKOP(STCX, 0, 2);
2312 			break;
2313 #endif
2314 #ifdef __powerpc64__
2315 		case 84:	/* ldarx */
2316 			op->type = MKOP(LARX, 0, 8);
2317 			break;
2318 
2319 		case 214:	/* stdcx. */
2320 			op->type = MKOP(STCX, 0, 8);
2321 			break;
2322 
2323 		case 276:	/* lqarx */
2324 			if (!((rd & 1) || rd == ra || rd == rb))
2325 				op->type = MKOP(LARX, 0, 16);
2326 			break;
2327 
2328 		case 182:	/* stqcx. */
2329 			if (!(rd & 1))
2330 				op->type = MKOP(STCX, 0, 16);
2331 			break;
2332 #endif
2333 
2334 		case 23:	/* lwzx */
2335 		case 55:	/* lwzux */
2336 			op->type = MKOP(LOAD, u, 4);
2337 			break;
2338 
2339 		case 87:	/* lbzx */
2340 		case 119:	/* lbzux */
2341 			op->type = MKOP(LOAD, u, 1);
2342 			break;
2343 
2344 #ifdef CONFIG_ALTIVEC
2345 		/*
2346 		 * Note: for the load/store vector element instructions,
2347 		 * bits of the EA say which field of the VMX register to use.
2348 		 */
2349 		case 7:		/* lvebx */
2350 			op->type = MKOP(LOAD_VMX, 0, 1);
2351 			op->element_size = 1;
2352 			break;
2353 
2354 		case 39:	/* lvehx */
2355 			op->type = MKOP(LOAD_VMX, 0, 2);
2356 			op->element_size = 2;
2357 			break;
2358 
2359 		case 71:	/* lvewx */
2360 			op->type = MKOP(LOAD_VMX, 0, 4);
2361 			op->element_size = 4;
2362 			break;
2363 
2364 		case 103:	/* lvx */
2365 		case 359:	/* lvxl */
2366 			op->type = MKOP(LOAD_VMX, 0, 16);
2367 			op->element_size = 16;
2368 			break;
2369 
2370 		case 135:	/* stvebx */
2371 			op->type = MKOP(STORE_VMX, 0, 1);
2372 			op->element_size = 1;
2373 			break;
2374 
2375 		case 167:	/* stvehx */
2376 			op->type = MKOP(STORE_VMX, 0, 2);
2377 			op->element_size = 2;
2378 			break;
2379 
2380 		case 199:	/* stvewx */
2381 			op->type = MKOP(STORE_VMX, 0, 4);
2382 			op->element_size = 4;
2383 			break;
2384 
2385 		case 231:	/* stvx */
2386 		case 487:	/* stvxl */
2387 			op->type = MKOP(STORE_VMX, 0, 16);
2388 			break;
2389 #endif /* CONFIG_ALTIVEC */
2390 
2391 #ifdef __powerpc64__
2392 		case 21:	/* ldx */
2393 		case 53:	/* ldux */
2394 			op->type = MKOP(LOAD, u, 8);
2395 			break;
2396 
2397 		case 149:	/* stdx */
2398 		case 181:	/* stdux */
2399 			op->type = MKOP(STORE, u, 8);
2400 			break;
2401 #endif
2402 
2403 		case 151:	/* stwx */
2404 		case 183:	/* stwux */
2405 			op->type = MKOP(STORE, u, 4);
2406 			break;
2407 
2408 		case 215:	/* stbx */
2409 		case 247:	/* stbux */
2410 			op->type = MKOP(STORE, u, 1);
2411 			break;
2412 
2413 		case 279:	/* lhzx */
2414 		case 311:	/* lhzux */
2415 			op->type = MKOP(LOAD, u, 2);
2416 			break;
2417 
2418 #ifdef __powerpc64__
2419 		case 341:	/* lwax */
2420 		case 373:	/* lwaux */
2421 			op->type = MKOP(LOAD, SIGNEXT | u, 4);
2422 			break;
2423 #endif
2424 
2425 		case 343:	/* lhax */
2426 		case 375:	/* lhaux */
2427 			op->type = MKOP(LOAD, SIGNEXT | u, 2);
2428 			break;
2429 
2430 		case 407:	/* sthx */
2431 		case 439:	/* sthux */
2432 			op->type = MKOP(STORE, u, 2);
2433 			break;
2434 
2435 #ifdef __powerpc64__
2436 		case 532:	/* ldbrx */
2437 			op->type = MKOP(LOAD, BYTEREV, 8);
2438 			break;
2439 
2440 #endif
2441 		case 533:	/* lswx */
2442 			op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
2443 			break;
2444 
2445 		case 534:	/* lwbrx */
2446 			op->type = MKOP(LOAD, BYTEREV, 4);
2447 			break;
2448 
2449 		case 597:	/* lswi */
2450 			if (rb == 0)
2451 				rb = 32;	/* # bytes to load */
2452 			op->type = MKOP(LOAD_MULTI, 0, rb);
2453 			op->ea = ra ? regs->gpr[ra] : 0;
2454 			break;
2455 
2456 #ifdef CONFIG_PPC_FPU
2457 		case 535:	/* lfsx */
2458 		case 567:	/* lfsux */
2459 			op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2460 			break;
2461 
2462 		case 599:	/* lfdx */
2463 		case 631:	/* lfdux */
2464 			op->type = MKOP(LOAD_FP, u, 8);
2465 			break;
2466 
2467 		case 663:	/* stfsx */
2468 		case 695:	/* stfsux */
2469 			op->type = MKOP(STORE_FP, u | FPCONV, 4);
2470 			break;
2471 
2472 		case 727:	/* stfdx */
2473 		case 759:	/* stfdux */
2474 			op->type = MKOP(STORE_FP, u, 8);
2475 			break;
2476 
2477 #ifdef __powerpc64__
2478 		case 791:	/* lfdpx */
2479 			op->type = MKOP(LOAD_FP, 0, 16);
2480 			break;
2481 
2482 		case 855:	/* lfiwax */
2483 			op->type = MKOP(LOAD_FP, SIGNEXT, 4);
2484 			break;
2485 
2486 		case 887:	/* lfiwzx */
2487 			op->type = MKOP(LOAD_FP, 0, 4);
2488 			break;
2489 
2490 		case 919:	/* stfdpx */
2491 			op->type = MKOP(STORE_FP, 0, 16);
2492 			break;
2493 
2494 		case 983:	/* stfiwx */
2495 			op->type = MKOP(STORE_FP, 0, 4);
2496 			break;
2497 #endif /* __powerpc64 */
2498 #endif /* CONFIG_PPC_FPU */
2499 
2500 #ifdef __powerpc64__
2501 		case 660:	/* stdbrx */
2502 			op->type = MKOP(STORE, BYTEREV, 8);
2503 			op->val = byterev_8(regs->gpr[rd]);
2504 			break;
2505 
2506 #endif
2507 		case 661:	/* stswx */
2508 			op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
2509 			break;
2510 
2511 		case 662:	/* stwbrx */
2512 			op->type = MKOP(STORE, BYTEREV, 4);
2513 			op->val = byterev_4(regs->gpr[rd]);
2514 			break;
2515 
2516 		case 725:	/* stswi */
2517 			if (rb == 0)
2518 				rb = 32;	/* # bytes to store */
2519 			op->type = MKOP(STORE_MULTI, 0, rb);
2520 			op->ea = ra ? regs->gpr[ra] : 0;
2521 			break;
2522 
2523 		case 790:	/* lhbrx */
2524 			op->type = MKOP(LOAD, BYTEREV, 2);
2525 			break;
2526 
2527 		case 918:	/* sthbrx */
2528 			op->type = MKOP(STORE, BYTEREV, 2);
2529 			op->val = byterev_2(regs->gpr[rd]);
2530 			break;
2531 
2532 #ifdef CONFIG_VSX
2533 		case 12:	/* lxsiwzx */
2534 			op->reg = rd | ((word & 1) << 5);
2535 			op->type = MKOP(LOAD_VSX, 0, 4);
2536 			op->element_size = 8;
2537 			break;
2538 
2539 		case 76:	/* lxsiwax */
2540 			op->reg = rd | ((word & 1) << 5);
2541 			op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
2542 			op->element_size = 8;
2543 			break;
2544 
2545 		case 140:	/* stxsiwx */
2546 			op->reg = rd | ((word & 1) << 5);
2547 			op->type = MKOP(STORE_VSX, 0, 4);
2548 			op->element_size = 8;
2549 			break;
2550 
2551 		case 268:	/* lxvx */
2552 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2553 				goto unknown_opcode;
2554 			op->reg = rd | ((word & 1) << 5);
2555 			op->type = MKOP(LOAD_VSX, 0, 16);
2556 			op->element_size = 16;
2557 			op->vsx_flags = VSX_CHECK_VEC;
2558 			break;
2559 
2560 		case 269:	/* lxvl */
2561 		case 301: {	/* lxvll */
2562 			int nb;
2563 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2564 				goto unknown_opcode;
2565 			op->reg = rd | ((word & 1) << 5);
2566 			op->ea = ra ? regs->gpr[ra] : 0;
2567 			nb = regs->gpr[rb] & 0xff;
2568 			if (nb > 16)
2569 				nb = 16;
2570 			op->type = MKOP(LOAD_VSX, 0, nb);
2571 			op->element_size = 16;
2572 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2573 				VSX_CHECK_VEC;
2574 			break;
2575 		}
2576 		case 332:	/* lxvdsx */
2577 			op->reg = rd | ((word & 1) << 5);
2578 			op->type = MKOP(LOAD_VSX, 0, 8);
2579 			op->element_size = 8;
2580 			op->vsx_flags = VSX_SPLAT;
2581 			break;
2582 
2583 		case 333:       /* lxvpx */
2584 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2585 				goto unknown_opcode;
2586 			op->reg = VSX_REGISTER_XTP(rd);
2587 			op->type = MKOP(LOAD_VSX, 0, 32);
2588 			op->element_size = 32;
2589 			break;
2590 
2591 		case 364:	/* lxvwsx */
2592 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2593 				goto unknown_opcode;
2594 			op->reg = rd | ((word & 1) << 5);
2595 			op->type = MKOP(LOAD_VSX, 0, 4);
2596 			op->element_size = 4;
2597 			op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
2598 			break;
2599 
2600 		case 396:	/* stxvx */
2601 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2602 				goto unknown_opcode;
2603 			op->reg = rd | ((word & 1) << 5);
2604 			op->type = MKOP(STORE_VSX, 0, 16);
2605 			op->element_size = 16;
2606 			op->vsx_flags = VSX_CHECK_VEC;
2607 			break;
2608 
2609 		case 397:	/* stxvl */
2610 		case 429: {	/* stxvll */
2611 			int nb;
2612 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2613 				goto unknown_opcode;
2614 			op->reg = rd | ((word & 1) << 5);
2615 			op->ea = ra ? regs->gpr[ra] : 0;
2616 			nb = regs->gpr[rb] & 0xff;
2617 			if (nb > 16)
2618 				nb = 16;
2619 			op->type = MKOP(STORE_VSX, 0, nb);
2620 			op->element_size = 16;
2621 			op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
2622 				VSX_CHECK_VEC;
2623 			break;
2624 		}
2625 		case 461:       /* stxvpx */
2626 			if (!cpu_has_feature(CPU_FTR_ARCH_31))
2627 				goto unknown_opcode;
2628 			op->reg = VSX_REGISTER_XTP(rd);
2629 			op->type = MKOP(STORE_VSX, 0, 32);
2630 			op->element_size = 32;
2631 			break;
2632 		case 524:	/* lxsspx */
2633 			op->reg = rd | ((word & 1) << 5);
2634 			op->type = MKOP(LOAD_VSX, 0, 4);
2635 			op->element_size = 8;
2636 			op->vsx_flags = VSX_FPCONV;
2637 			break;
2638 
2639 		case 588:	/* lxsdx */
2640 			op->reg = rd | ((word & 1) << 5);
2641 			op->type = MKOP(LOAD_VSX, 0, 8);
2642 			op->element_size = 8;
2643 			break;
2644 
2645 		case 652:	/* stxsspx */
2646 			op->reg = rd | ((word & 1) << 5);
2647 			op->type = MKOP(STORE_VSX, 0, 4);
2648 			op->element_size = 8;
2649 			op->vsx_flags = VSX_FPCONV;
2650 			break;
2651 
2652 		case 716:	/* stxsdx */
2653 			op->reg = rd | ((word & 1) << 5);
2654 			op->type = MKOP(STORE_VSX, 0, 8);
2655 			op->element_size = 8;
2656 			break;
2657 
2658 		case 780:	/* lxvw4x */
2659 			op->reg = rd | ((word & 1) << 5);
2660 			op->type = MKOP(LOAD_VSX, 0, 16);
2661 			op->element_size = 4;
2662 			break;
2663 
2664 		case 781:	/* lxsibzx */
2665 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2666 				goto unknown_opcode;
2667 			op->reg = rd | ((word & 1) << 5);
2668 			op->type = MKOP(LOAD_VSX, 0, 1);
2669 			op->element_size = 8;
2670 			op->vsx_flags = VSX_CHECK_VEC;
2671 			break;
2672 
2673 		case 812:	/* lxvh8x */
2674 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2675 				goto unknown_opcode;
2676 			op->reg = rd | ((word & 1) << 5);
2677 			op->type = MKOP(LOAD_VSX, 0, 16);
2678 			op->element_size = 2;
2679 			op->vsx_flags = VSX_CHECK_VEC;
2680 			break;
2681 
2682 		case 813:	/* lxsihzx */
2683 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2684 				goto unknown_opcode;
2685 			op->reg = rd | ((word & 1) << 5);
2686 			op->type = MKOP(LOAD_VSX, 0, 2);
2687 			op->element_size = 8;
2688 			op->vsx_flags = VSX_CHECK_VEC;
2689 			break;
2690 
2691 		case 844:	/* lxvd2x */
2692 			op->reg = rd | ((word & 1) << 5);
2693 			op->type = MKOP(LOAD_VSX, 0, 16);
2694 			op->element_size = 8;
2695 			break;
2696 
2697 		case 876:	/* lxvb16x */
2698 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2699 				goto unknown_opcode;
2700 			op->reg = rd | ((word & 1) << 5);
2701 			op->type = MKOP(LOAD_VSX, 0, 16);
2702 			op->element_size = 1;
2703 			op->vsx_flags = VSX_CHECK_VEC;
2704 			break;
2705 
2706 		case 908:	/* stxvw4x */
2707 			op->reg = rd | ((word & 1) << 5);
2708 			op->type = MKOP(STORE_VSX, 0, 16);
2709 			op->element_size = 4;
2710 			break;
2711 
2712 		case 909:	/* stxsibx */
2713 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2714 				goto unknown_opcode;
2715 			op->reg = rd | ((word & 1) << 5);
2716 			op->type = MKOP(STORE_VSX, 0, 1);
2717 			op->element_size = 8;
2718 			op->vsx_flags = VSX_CHECK_VEC;
2719 			break;
2720 
2721 		case 940:	/* stxvh8x */
2722 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2723 				goto unknown_opcode;
2724 			op->reg = rd | ((word & 1) << 5);
2725 			op->type = MKOP(STORE_VSX, 0, 16);
2726 			op->element_size = 2;
2727 			op->vsx_flags = VSX_CHECK_VEC;
2728 			break;
2729 
2730 		case 941:	/* stxsihx */
2731 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2732 				goto unknown_opcode;
2733 			op->reg = rd | ((word & 1) << 5);
2734 			op->type = MKOP(STORE_VSX, 0, 2);
2735 			op->element_size = 8;
2736 			op->vsx_flags = VSX_CHECK_VEC;
2737 			break;
2738 
2739 		case 972:	/* stxvd2x */
2740 			op->reg = rd | ((word & 1) << 5);
2741 			op->type = MKOP(STORE_VSX, 0, 16);
2742 			op->element_size = 8;
2743 			break;
2744 
2745 		case 1004:	/* stxvb16x */
2746 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2747 				goto unknown_opcode;
2748 			op->reg = rd | ((word & 1) << 5);
2749 			op->type = MKOP(STORE_VSX, 0, 16);
2750 			op->element_size = 1;
2751 			op->vsx_flags = VSX_CHECK_VEC;
2752 			break;
2753 
2754 #endif /* CONFIG_VSX */
2755 		}
2756 		break;
2757 
2758 	case 32:	/* lwz */
2759 	case 33:	/* lwzu */
2760 		op->type = MKOP(LOAD, u, 4);
2761 		op->ea = dform_ea(word, regs);
2762 		break;
2763 
2764 	case 34:	/* lbz */
2765 	case 35:	/* lbzu */
2766 		op->type = MKOP(LOAD, u, 1);
2767 		op->ea = dform_ea(word, regs);
2768 		break;
2769 
2770 	case 36:	/* stw */
2771 	case 37:	/* stwu */
2772 		op->type = MKOP(STORE, u, 4);
2773 		op->ea = dform_ea(word, regs);
2774 		break;
2775 
2776 	case 38:	/* stb */
2777 	case 39:	/* stbu */
2778 		op->type = MKOP(STORE, u, 1);
2779 		op->ea = dform_ea(word, regs);
2780 		break;
2781 
2782 	case 40:	/* lhz */
2783 	case 41:	/* lhzu */
2784 		op->type = MKOP(LOAD, u, 2);
2785 		op->ea = dform_ea(word, regs);
2786 		break;
2787 
2788 	case 42:	/* lha */
2789 	case 43:	/* lhau */
2790 		op->type = MKOP(LOAD, SIGNEXT | u, 2);
2791 		op->ea = dform_ea(word, regs);
2792 		break;
2793 
2794 	case 44:	/* sth */
2795 	case 45:	/* sthu */
2796 		op->type = MKOP(STORE, u, 2);
2797 		op->ea = dform_ea(word, regs);
2798 		break;
2799 
2800 	case 46:	/* lmw */
2801 		if (ra >= rd)
2802 			break;		/* invalid form, ra in range to load */
2803 		op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
2804 		op->ea = dform_ea(word, regs);
2805 		break;
2806 
2807 	case 47:	/* stmw */
2808 		op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
2809 		op->ea = dform_ea(word, regs);
2810 		break;
2811 
2812 #ifdef CONFIG_PPC_FPU
2813 	case 48:	/* lfs */
2814 	case 49:	/* lfsu */
2815 		op->type = MKOP(LOAD_FP, u | FPCONV, 4);
2816 		op->ea = dform_ea(word, regs);
2817 		break;
2818 
2819 	case 50:	/* lfd */
2820 	case 51:	/* lfdu */
2821 		op->type = MKOP(LOAD_FP, u, 8);
2822 		op->ea = dform_ea(word, regs);
2823 		break;
2824 
2825 	case 52:	/* stfs */
2826 	case 53:	/* stfsu */
2827 		op->type = MKOP(STORE_FP, u | FPCONV, 4);
2828 		op->ea = dform_ea(word, regs);
2829 		break;
2830 
2831 	case 54:	/* stfd */
2832 	case 55:	/* stfdu */
2833 		op->type = MKOP(STORE_FP, u, 8);
2834 		op->ea = dform_ea(word, regs);
2835 		break;
2836 #endif
2837 
2838 #ifdef __powerpc64__
2839 	case 56:	/* lq */
2840 		if (!((rd & 1) || (rd == ra)))
2841 			op->type = MKOP(LOAD, 0, 16);
2842 		op->ea = dqform_ea(word, regs);
2843 		break;
2844 #endif
2845 
2846 #ifdef CONFIG_VSX
2847 	case 57:	/* lfdp, lxsd, lxssp */
2848 		op->ea = dsform_ea(word, regs);
2849 		switch (word & 3) {
2850 		case 0:		/* lfdp */
2851 			if (rd & 1)
2852 				break;		/* reg must be even */
2853 			op->type = MKOP(LOAD_FP, 0, 16);
2854 			break;
2855 		case 2:		/* lxsd */
2856 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2857 				goto unknown_opcode;
2858 			op->reg = rd + 32;
2859 			op->type = MKOP(LOAD_VSX, 0, 8);
2860 			op->element_size = 8;
2861 			op->vsx_flags = VSX_CHECK_VEC;
2862 			break;
2863 		case 3:		/* lxssp */
2864 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2865 				goto unknown_opcode;
2866 			op->reg = rd + 32;
2867 			op->type = MKOP(LOAD_VSX, 0, 4);
2868 			op->element_size = 8;
2869 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2870 			break;
2871 		}
2872 		break;
2873 #endif /* CONFIG_VSX */
2874 
2875 #ifdef __powerpc64__
2876 	case 58:	/* ld[u], lwa */
2877 		op->ea = dsform_ea(word, regs);
2878 		switch (word & 3) {
2879 		case 0:		/* ld */
2880 			op->type = MKOP(LOAD, 0, 8);
2881 			break;
2882 		case 1:		/* ldu */
2883 			op->type = MKOP(LOAD, UPDATE, 8);
2884 			break;
2885 		case 2:		/* lwa */
2886 			op->type = MKOP(LOAD, SIGNEXT, 4);
2887 			break;
2888 		}
2889 		break;
2890 #endif
2891 
2892 #ifdef CONFIG_VSX
2893 	case 6:
2894 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2895 			goto unknown_opcode;
2896 		op->ea = dqform_ea(word, regs);
2897 		op->reg = VSX_REGISTER_XTP(rd);
2898 		op->element_size = 32;
2899 		switch (word & 0xf) {
2900 		case 0:         /* lxvp */
2901 			op->type = MKOP(LOAD_VSX, 0, 32);
2902 			break;
2903 		case 1:         /* stxvp */
2904 			op->type = MKOP(STORE_VSX, 0, 32);
2905 			break;
2906 		}
2907 		break;
2908 
2909 	case 61:	/* stfdp, lxv, stxsd, stxssp, stxv */
2910 		switch (word & 7) {
2911 		case 0:		/* stfdp with LSB of DS field = 0 */
2912 		case 4:		/* stfdp with LSB of DS field = 1 */
2913 			op->ea = dsform_ea(word, regs);
2914 			op->type = MKOP(STORE_FP, 0, 16);
2915 			break;
2916 
2917 		case 1:		/* lxv */
2918 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2919 				goto unknown_opcode;
2920 			op->ea = dqform_ea(word, regs);
2921 			if (word & 8)
2922 				op->reg = rd + 32;
2923 			op->type = MKOP(LOAD_VSX, 0, 16);
2924 			op->element_size = 16;
2925 			op->vsx_flags = VSX_CHECK_VEC;
2926 			break;
2927 
2928 		case 2:		/* stxsd with LSB of DS field = 0 */
2929 		case 6:		/* stxsd with LSB of DS field = 1 */
2930 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2931 				goto unknown_opcode;
2932 			op->ea = dsform_ea(word, regs);
2933 			op->reg = rd + 32;
2934 			op->type = MKOP(STORE_VSX, 0, 8);
2935 			op->element_size = 8;
2936 			op->vsx_flags = VSX_CHECK_VEC;
2937 			break;
2938 
2939 		case 3:		/* stxssp with LSB of DS field = 0 */
2940 		case 7:		/* stxssp with LSB of DS field = 1 */
2941 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2942 				goto unknown_opcode;
2943 			op->ea = dsform_ea(word, regs);
2944 			op->reg = rd + 32;
2945 			op->type = MKOP(STORE_VSX, 0, 4);
2946 			op->element_size = 8;
2947 			op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
2948 			break;
2949 
2950 		case 5:		/* stxv */
2951 			if (!cpu_has_feature(CPU_FTR_ARCH_300))
2952 				goto unknown_opcode;
2953 			op->ea = dqform_ea(word, regs);
2954 			if (word & 8)
2955 				op->reg = rd + 32;
2956 			op->type = MKOP(STORE_VSX, 0, 16);
2957 			op->element_size = 16;
2958 			op->vsx_flags = VSX_CHECK_VEC;
2959 			break;
2960 		}
2961 		break;
2962 #endif /* CONFIG_VSX */
2963 
2964 #ifdef __powerpc64__
2965 	case 62:	/* std[u] */
2966 		op->ea = dsform_ea(word, regs);
2967 		switch (word & 3) {
2968 		case 0:		/* std */
2969 			op->type = MKOP(STORE, 0, 8);
2970 			break;
2971 		case 1:		/* stdu */
2972 			op->type = MKOP(STORE, UPDATE, 8);
2973 			break;
2974 		case 2:		/* stq */
2975 			if (!(rd & 1))
2976 				op->type = MKOP(STORE, 0, 16);
2977 			break;
2978 		}
2979 		break;
2980 	case 1: /* Prefixed instructions */
2981 		if (!cpu_has_feature(CPU_FTR_ARCH_31))
2982 			goto unknown_opcode;
2983 
2984 		prefix_r = GET_PREFIX_R(word);
2985 		ra = GET_PREFIX_RA(suffix);
2986 		op->update_reg = ra;
2987 		rd = (suffix >> 21) & 0x1f;
2988 		op->reg = rd;
2989 		op->val = regs->gpr[rd];
2990 
2991 		suffixopcode = get_op(suffix);
2992 		prefixtype = (word >> 24) & 0x3;
2993 		switch (prefixtype) {
2994 		case 0: /* Type 00  Eight-Byte Load/Store */
2995 			if (prefix_r && ra)
2996 				break;
2997 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
2998 			switch (suffixopcode) {
2999 			case 41:	/* plwa */
3000 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
3001 				break;
3002 #ifdef CONFIG_VSX
3003 			case 42:        /* plxsd */
3004 				op->reg = rd + 32;
3005 				op->type = MKOP(LOAD_VSX, PREFIXED, 8);
3006 				op->element_size = 8;
3007 				op->vsx_flags = VSX_CHECK_VEC;
3008 				break;
3009 			case 43:	/* plxssp */
3010 				op->reg = rd + 32;
3011 				op->type = MKOP(LOAD_VSX, PREFIXED, 4);
3012 				op->element_size = 8;
3013 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3014 				break;
3015 			case 46:	/* pstxsd */
3016 				op->reg = rd + 32;
3017 				op->type = MKOP(STORE_VSX, PREFIXED, 8);
3018 				op->element_size = 8;
3019 				op->vsx_flags = VSX_CHECK_VEC;
3020 				break;
3021 			case 47:	/* pstxssp */
3022 				op->reg = rd + 32;
3023 				op->type = MKOP(STORE_VSX, PREFIXED, 4);
3024 				op->element_size = 8;
3025 				op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
3026 				break;
3027 			case 51:	/* plxv1 */
3028 				op->reg += 32;
3029 				fallthrough;
3030 			case 50:	/* plxv0 */
3031 				op->type = MKOP(LOAD_VSX, PREFIXED, 16);
3032 				op->element_size = 16;
3033 				op->vsx_flags = VSX_CHECK_VEC;
3034 				break;
3035 			case 55:	/* pstxv1 */
3036 				op->reg = rd + 32;
3037 				fallthrough;
3038 			case 54:	/* pstxv0 */
3039 				op->type = MKOP(STORE_VSX, PREFIXED, 16);
3040 				op->element_size = 16;
3041 				op->vsx_flags = VSX_CHECK_VEC;
3042 				break;
3043 #endif /* CONFIG_VSX */
3044 			case 56:        /* plq */
3045 				op->type = MKOP(LOAD, PREFIXED, 16);
3046 				break;
3047 			case 57:	/* pld */
3048 				op->type = MKOP(LOAD, PREFIXED, 8);
3049 				break;
3050 #ifdef CONFIG_VSX
3051 			case 58:        /* plxvp */
3052 				op->reg = VSX_REGISTER_XTP(rd);
3053 				op->type = MKOP(LOAD_VSX, PREFIXED, 32);
3054 				op->element_size = 32;
3055 				break;
3056 #endif /* CONFIG_VSX */
3057 			case 60:        /* pstq */
3058 				op->type = MKOP(STORE, PREFIXED, 16);
3059 				break;
3060 			case 61:	/* pstd */
3061 				op->type = MKOP(STORE, PREFIXED, 8);
3062 				break;
3063 #ifdef CONFIG_VSX
3064 			case 62:        /* pstxvp */
3065 				op->reg = VSX_REGISTER_XTP(rd);
3066 				op->type = MKOP(STORE_VSX, PREFIXED, 32);
3067 				op->element_size = 32;
3068 				break;
3069 #endif /* CONFIG_VSX */
3070 			}
3071 			break;
3072 		case 1: /* Type 01 Eight-Byte Register-to-Register */
3073 			break;
3074 		case 2: /* Type 10 Modified Load/Store */
3075 			if (prefix_r && ra)
3076 				break;
3077 			op->ea = mlsd_8lsd_ea(word, suffix, regs);
3078 			switch (suffixopcode) {
3079 			case 32:	/* plwz */
3080 				op->type = MKOP(LOAD, PREFIXED, 4);
3081 				break;
3082 			case 34:	/* plbz */
3083 				op->type = MKOP(LOAD, PREFIXED, 1);
3084 				break;
3085 			case 36:	/* pstw */
3086 				op->type = MKOP(STORE, PREFIXED, 4);
3087 				break;
3088 			case 38:	/* pstb */
3089 				op->type = MKOP(STORE, PREFIXED, 1);
3090 				break;
3091 			case 40:	/* plhz */
3092 				op->type = MKOP(LOAD, PREFIXED, 2);
3093 				break;
3094 			case 42:	/* plha */
3095 				op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
3096 				break;
3097 			case 44:	/* psth */
3098 				op->type = MKOP(STORE, PREFIXED, 2);
3099 				break;
3100 			case 48:        /* plfs */
3101 				op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
3102 				break;
3103 			case 50:        /* plfd */
3104 				op->type = MKOP(LOAD_FP, PREFIXED, 8);
3105 				break;
3106 			case 52:        /* pstfs */
3107 				op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
3108 				break;
3109 			case 54:        /* pstfd */
3110 				op->type = MKOP(STORE_FP, PREFIXED, 8);
3111 				break;
3112 			}
3113 			break;
3114 		case 3: /* Type 11 Modified Register-to-Register */
3115 			break;
3116 		}
3117 #endif /* __powerpc64__ */
3118 
3119 	}
3120 
3121 	if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
3122 		switch (GETTYPE(op->type)) {
3123 		case LOAD:
3124 			if (ra == rd)
3125 				goto unknown_opcode;
3126 			fallthrough;
3127 		case STORE:
3128 		case LOAD_FP:
3129 		case STORE_FP:
3130 			if (ra == 0)
3131 				goto unknown_opcode;
3132 		}
3133 	}
3134 
3135 #ifdef CONFIG_VSX
3136 	if ((GETTYPE(op->type) == LOAD_VSX ||
3137 	     GETTYPE(op->type) == STORE_VSX) &&
3138 	    !cpu_has_feature(CPU_FTR_VSX)) {
3139 		return -1;
3140 	}
3141 #endif /* CONFIG_VSX */
3142 
3143 	return 0;
3144 
3145  unknown_opcode:
3146 	op->type = UNKNOWN;
3147 	return 0;
3148 
3149  logical_done:
3150 	if (word & 1)
3151 		set_cr0(regs, op);
3152  logical_done_nocc:
3153 	op->reg = ra;
3154 	op->type |= SETREG;
3155 	return 1;
3156 
3157  arith_done:
3158 	if (word & 1)
3159 		set_cr0(regs, op);
3160  compute_done:
3161 	op->reg = rd;
3162 	op->type |= SETREG;
3163 	return 1;
3164 
3165  priv:
3166 	op->type = INTERRUPT | 0x700;
3167 	op->val = SRR1_PROGPRIV;
3168 	return 0;
3169 
3170  trap:
3171 	op->type = INTERRUPT | 0x700;
3172 	op->val = SRR1_PROGTRAP;
3173 	return 0;
3174 }
3175 EXPORT_SYMBOL_GPL(analyse_instr);
3176 NOKPROBE_SYMBOL(analyse_instr);
3177 
3178 /*
3179  * For PPC32 we always use stwu with r1 to change the stack pointer.
3180  * So this emulated store may corrupt the exception frame, now we
3181  * have to provide the exception frame trampoline, which is pushed
3182  * below the kprobed function stack. So we only update gpr[1] but
3183  * don't emulate the real store operation. We will do real store
3184  * operation safely in exception return code by checking this flag.
3185  */
handle_stack_update(unsigned long ea,struct pt_regs * regs)3186 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
3187 {
3188 	/*
3189 	 * Check if we already set since that means we'll
3190 	 * lose the previous value.
3191 	 */
3192 	WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
3193 	set_thread_flag(TIF_EMULATE_STACK_STORE);
3194 	return 0;
3195 }
3196 
do_signext(unsigned long * valp,int size)3197 static nokprobe_inline void do_signext(unsigned long *valp, int size)
3198 {
3199 	switch (size) {
3200 	case 2:
3201 		*valp = (signed short) *valp;
3202 		break;
3203 	case 4:
3204 		*valp = (signed int) *valp;
3205 		break;
3206 	}
3207 }
3208 
do_byterev(unsigned long * valp,int size)3209 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
3210 {
3211 	switch (size) {
3212 	case 2:
3213 		*valp = byterev_2(*valp);
3214 		break;
3215 	case 4:
3216 		*valp = byterev_4(*valp);
3217 		break;
3218 #ifdef __powerpc64__
3219 	case 8:
3220 		*valp = byterev_8(*valp);
3221 		break;
3222 #endif
3223 	}
3224 }
3225 
3226 /*
3227  * Emulate an instruction that can be executed just by updating
3228  * fields in *regs.
3229  */
emulate_update_regs(struct pt_regs * regs,struct instruction_op * op)3230 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
3231 {
3232 	unsigned long next_pc;
3233 
3234 	next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
3235 	switch (GETTYPE(op->type)) {
3236 	case COMPUTE:
3237 		if (op->type & SETREG)
3238 			regs->gpr[op->reg] = op->val;
3239 		if (op->type & SETCC)
3240 			regs->ccr = op->ccval;
3241 		if (op->type & SETXER)
3242 			regs->xer = op->xerval;
3243 		break;
3244 
3245 	case BRANCH:
3246 		if (op->type & SETLK)
3247 			regs->link = next_pc;
3248 		if (op->type & BRTAKEN)
3249 			next_pc = op->val;
3250 		if (op->type & DECCTR)
3251 			--regs->ctr;
3252 		break;
3253 
3254 	case BARRIER:
3255 		switch (op->type & BARRIER_MASK) {
3256 		case BARRIER_SYNC:
3257 			mb();
3258 			break;
3259 		case BARRIER_ISYNC:
3260 			isync();
3261 			break;
3262 		case BARRIER_EIEIO:
3263 			eieio();
3264 			break;
3265 #ifdef CONFIG_PPC64
3266 		case BARRIER_LWSYNC:
3267 			asm volatile("lwsync" : : : "memory");
3268 			break;
3269 		case BARRIER_PTESYNC:
3270 			asm volatile("ptesync" : : : "memory");
3271 			break;
3272 #endif
3273 		}
3274 		break;
3275 
3276 	case MFSPR:
3277 		switch (op->spr) {
3278 		case SPRN_XER:
3279 			regs->gpr[op->reg] = regs->xer & 0xffffffffUL;
3280 			break;
3281 		case SPRN_LR:
3282 			regs->gpr[op->reg] = regs->link;
3283 			break;
3284 		case SPRN_CTR:
3285 			regs->gpr[op->reg] = regs->ctr;
3286 			break;
3287 		default:
3288 			WARN_ON_ONCE(1);
3289 		}
3290 		break;
3291 
3292 	case MTSPR:
3293 		switch (op->spr) {
3294 		case SPRN_XER:
3295 			regs->xer = op->val & 0xffffffffUL;
3296 			break;
3297 		case SPRN_LR:
3298 			regs->link = op->val;
3299 			break;
3300 		case SPRN_CTR:
3301 			regs->ctr = op->val;
3302 			break;
3303 		default:
3304 			WARN_ON_ONCE(1);
3305 		}
3306 		break;
3307 
3308 	default:
3309 		WARN_ON_ONCE(1);
3310 	}
3311 	regs_set_return_ip(regs, next_pc);
3312 }
3313 NOKPROBE_SYMBOL(emulate_update_regs);
3314 
3315 /*
3316  * Emulate a previously-analysed load or store instruction.
3317  * Return values are:
3318  * 0 = instruction emulated successfully
3319  * -EFAULT = address out of range or access faulted (regs->dar
3320  *	     contains the faulting address)
3321  * -EACCES = misaligned access, instruction requires alignment
3322  * -EINVAL = unknown operation in *op
3323  */
emulate_loadstore(struct pt_regs * regs,struct instruction_op * op)3324 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
3325 {
3326 	int err, size, type;
3327 	int i, rd, nb;
3328 	unsigned int cr;
3329 	unsigned long val;
3330 	unsigned long ea;
3331 	bool cross_endian;
3332 
3333 	err = 0;
3334 	size = GETSIZE(op->type);
3335 	type = GETTYPE(op->type);
3336 	cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
3337 	ea = truncate_if_32bit(regs->msr, op->ea);
3338 
3339 	switch (type) {
3340 	case LARX:
3341 		if (ea & (size - 1))
3342 			return -EACCES;		/* can't handle misaligned */
3343 		if (!address_ok(regs, ea, size))
3344 			return -EFAULT;
3345 		err = 0;
3346 		val = 0;
3347 		switch (size) {
3348 #ifdef CONFIG_PPC_HAS_LBARX_LHARX
3349 		case 1:
3350 			__get_user_asmx(val, ea, err, "lbarx");
3351 			break;
3352 		case 2:
3353 			__get_user_asmx(val, ea, err, "lharx");
3354 			break;
3355 #endif
3356 		case 4:
3357 			__get_user_asmx(val, ea, err, "lwarx");
3358 			break;
3359 #ifdef __powerpc64__
3360 		case 8:
3361 			__get_user_asmx(val, ea, err, "ldarx");
3362 			break;
3363 		case 16:
3364 			err = do_lqarx(ea, &regs->gpr[op->reg]);
3365 			break;
3366 #endif
3367 		default:
3368 			return -EINVAL;
3369 		}
3370 		if (err) {
3371 			regs->dar = ea;
3372 			break;
3373 		}
3374 		if (size < 16)
3375 			regs->gpr[op->reg] = val;
3376 		break;
3377 
3378 	case STCX:
3379 		if (ea & (size - 1))
3380 			return -EACCES;		/* can't handle misaligned */
3381 		if (!address_ok(regs, ea, size))
3382 			return -EFAULT;
3383 		err = 0;
3384 		switch (size) {
3385 #ifdef __powerpc64__
3386 		case 1:
3387 			__put_user_asmx(op->val, ea, err, "stbcx.", cr);
3388 			break;
3389 		case 2:
3390 			__put_user_asmx(op->val, ea, err, "sthcx.", cr);
3391 			break;
3392 #endif
3393 		case 4:
3394 			__put_user_asmx(op->val, ea, err, "stwcx.", cr);
3395 			break;
3396 #ifdef __powerpc64__
3397 		case 8:
3398 			__put_user_asmx(op->val, ea, err, "stdcx.", cr);
3399 			break;
3400 		case 16:
3401 			err = do_stqcx(ea, regs->gpr[op->reg],
3402 				       regs->gpr[op->reg + 1], &cr);
3403 			break;
3404 #endif
3405 		default:
3406 			return -EINVAL;
3407 		}
3408 		if (!err)
3409 			regs->ccr = (regs->ccr & 0x0fffffff) |
3410 				(cr & 0xe0000000) |
3411 				((regs->xer >> 3) & 0x10000000);
3412 		else
3413 			regs->dar = ea;
3414 		break;
3415 
3416 	case LOAD:
3417 #ifdef __powerpc64__
3418 		if (size == 16) {
3419 			err = emulate_lq(regs, ea, op->reg, cross_endian);
3420 			break;
3421 		}
3422 #endif
3423 		err = read_mem(&regs->gpr[op->reg], ea, size, regs);
3424 		if (!err) {
3425 			if (op->type & SIGNEXT)
3426 				do_signext(&regs->gpr[op->reg], size);
3427 			if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV))
3428 				do_byterev(&regs->gpr[op->reg], size);
3429 		}
3430 		break;
3431 
3432 #ifdef CONFIG_PPC_FPU
3433 	case LOAD_FP:
3434 		/*
3435 		 * If the instruction is in userspace, we can emulate it even
3436 		 * if the VMX state is not live, because we have the state
3437 		 * stored in the thread_struct.  If the instruction is in
3438 		 * the kernel, we must not touch the state in the thread_struct.
3439 		 */
3440 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3441 			return 0;
3442 		err = do_fp_load(op, ea, regs, cross_endian);
3443 		break;
3444 #endif
3445 #ifdef CONFIG_ALTIVEC
3446 	case LOAD_VMX:
3447 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3448 			return 0;
3449 		err = do_vec_load(op->reg, ea, size, regs, cross_endian);
3450 		break;
3451 #endif
3452 #ifdef CONFIG_VSX
3453 	case LOAD_VSX: {
3454 		unsigned long msrbit = MSR_VSX;
3455 
3456 		/*
3457 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3458 		 * when the target of the instruction is a vector register.
3459 		 */
3460 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3461 			msrbit = MSR_VEC;
3462 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3463 			return 0;
3464 		err = do_vsx_load(op, ea, regs, cross_endian);
3465 		break;
3466 	}
3467 #endif
3468 	case LOAD_MULTI:
3469 		if (!address_ok(regs, ea, size))
3470 			return -EFAULT;
3471 		rd = op->reg;
3472 		for (i = 0; i < size; i += 4) {
3473 			unsigned int v32 = 0;
3474 
3475 			nb = size - i;
3476 			if (nb > 4)
3477 				nb = 4;
3478 			err = copy_mem_in((u8 *) &v32, ea, nb, regs);
3479 			if (err)
3480 				break;
3481 			if (unlikely(cross_endian))
3482 				v32 = byterev_4(v32);
3483 			regs->gpr[rd] = v32;
3484 			ea += 4;
3485 			/* reg number wraps from 31 to 0 for lsw[ix] */
3486 			rd = (rd + 1) & 0x1f;
3487 		}
3488 		break;
3489 
3490 	case STORE:
3491 #ifdef __powerpc64__
3492 		if (size == 16) {
3493 			err = emulate_stq(regs, ea, op->reg, cross_endian);
3494 			break;
3495 		}
3496 #endif
3497 		if ((op->type & UPDATE) && size == sizeof(long) &&
3498 		    op->reg == 1 && op->update_reg == 1 &&
3499 		    !(regs->msr & MSR_PR) &&
3500 		    ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
3501 			err = handle_stack_update(ea, regs);
3502 			break;
3503 		}
3504 		if (unlikely(cross_endian))
3505 			do_byterev(&op->val, size);
3506 		err = write_mem(op->val, ea, size, regs);
3507 		break;
3508 
3509 #ifdef CONFIG_PPC_FPU
3510 	case STORE_FP:
3511 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
3512 			return 0;
3513 		err = do_fp_store(op, ea, regs, cross_endian);
3514 		break;
3515 #endif
3516 #ifdef CONFIG_ALTIVEC
3517 	case STORE_VMX:
3518 		if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
3519 			return 0;
3520 		err = do_vec_store(op->reg, ea, size, regs, cross_endian);
3521 		break;
3522 #endif
3523 #ifdef CONFIG_VSX
3524 	case STORE_VSX: {
3525 		unsigned long msrbit = MSR_VSX;
3526 
3527 		/*
3528 		 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX
3529 		 * when the target of the instruction is a vector register.
3530 		 */
3531 		if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
3532 			msrbit = MSR_VEC;
3533 		if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
3534 			return 0;
3535 		err = do_vsx_store(op, ea, regs, cross_endian);
3536 		break;
3537 	}
3538 #endif
3539 	case STORE_MULTI:
3540 		if (!address_ok(regs, ea, size))
3541 			return -EFAULT;
3542 		rd = op->reg;
3543 		for (i = 0; i < size; i += 4) {
3544 			unsigned int v32 = regs->gpr[rd];
3545 
3546 			nb = size - i;
3547 			if (nb > 4)
3548 				nb = 4;
3549 			if (unlikely(cross_endian))
3550 				v32 = byterev_4(v32);
3551 			err = copy_mem_out((u8 *) &v32, ea, nb, regs);
3552 			if (err)
3553 				break;
3554 			ea += 4;
3555 			/* reg number wraps from 31 to 0 for stsw[ix] */
3556 			rd = (rd + 1) & 0x1f;
3557 		}
3558 		break;
3559 
3560 	default:
3561 		return -EINVAL;
3562 	}
3563 
3564 	if (err)
3565 		return err;
3566 
3567 	if (op->type & UPDATE)
3568 		regs->gpr[op->update_reg] = op->ea;
3569 
3570 	return 0;
3571 }
3572 NOKPROBE_SYMBOL(emulate_loadstore);
3573 
3574 /*
3575  * Emulate instructions that cause a transfer of control,
3576  * loads and stores, and a few other instructions.
3577  * Returns 1 if the step was emulated, 0 if not,
3578  * or -1 if the instruction is one that should not be stepped,
3579  * such as an rfid, or a mtmsrd that would clear MSR_RI.
3580  */
emulate_step(struct pt_regs * regs,ppc_inst_t instr)3581 int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
3582 {
3583 	struct instruction_op op;
3584 	int r, err, type;
3585 	unsigned long val;
3586 	unsigned long ea;
3587 
3588 	r = analyse_instr(&op, regs, instr);
3589 	if (r < 0)
3590 		return r;
3591 	if (r > 0) {
3592 		emulate_update_regs(regs, &op);
3593 		return 1;
3594 	}
3595 
3596 	err = 0;
3597 	type = GETTYPE(op.type);
3598 
3599 	if (OP_IS_LOAD_STORE(type)) {
3600 		err = emulate_loadstore(regs, &op);
3601 		if (err)
3602 			return 0;
3603 		goto instr_done;
3604 	}
3605 
3606 	switch (type) {
3607 	case CACHEOP:
3608 		ea = truncate_if_32bit(regs->msr, op.ea);
3609 		if (!address_ok(regs, ea, 8))
3610 			return 0;
3611 		switch (op.type & CACHEOP_MASK) {
3612 		case DCBST:
3613 			__cacheop_user_asmx(ea, err, "dcbst");
3614 			break;
3615 		case DCBF:
3616 			__cacheop_user_asmx(ea, err, "dcbf");
3617 			break;
3618 		case DCBTST:
3619 			if (op.reg == 0)
3620 				prefetchw((void *) ea);
3621 			break;
3622 		case DCBT:
3623 			if (op.reg == 0)
3624 				prefetch((void *) ea);
3625 			break;
3626 		case ICBI:
3627 			__cacheop_user_asmx(ea, err, "icbi");
3628 			break;
3629 		case DCBZ:
3630 			err = emulate_dcbz(ea, regs);
3631 			break;
3632 		}
3633 		if (err) {
3634 			regs->dar = ea;
3635 			return 0;
3636 		}
3637 		goto instr_done;
3638 
3639 	case MFMSR:
3640 		regs->gpr[op.reg] = regs->msr & MSR_MASK;
3641 		goto instr_done;
3642 
3643 	case MTMSR:
3644 		val = regs->gpr[op.reg];
3645 		if ((val & MSR_RI) == 0)
3646 			/* can't step mtmsr[d] that would clear MSR_RI */
3647 			return -1;
3648 		/* here op.val is the mask of bits to change */
3649 		regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
3650 		goto instr_done;
3651 
3652 	case SYSCALL:	/* sc */
3653 		/*
3654 		 * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
3655 		 * single step a system call instruction:
3656 		 *
3657 		 *   Successful completion for an instruction means that the
3658 		 *   instruction caused no other interrupt. Thus a Trace
3659 		 *   interrupt never occurs for a System Call or System Call
3660 		 *   Vectored instruction, or for a Trap instruction that
3661 		 *   traps.
3662 		 */
3663 		return -1;
3664 	case SYSCALL_VECTORED_0:	/* scv 0 */
3665 		return -1;
3666 	case RFI:
3667 		return -1;
3668 	}
3669 	return 0;
3670 
3671  instr_done:
3672 	regs_set_return_ip(regs,
3673 		truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
3674 	return 1;
3675 }
3676 NOKPROBE_SYMBOL(emulate_step);
3677