1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Single-step support. 4 * 5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 6 */ 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/ptrace.h> 10 #include <linux/prefetch.h> 11 #include <asm/sstep.h> 12 #include <asm/processor.h> 13 #include <linux/uaccess.h> 14 #include <asm/cpu_has_feature.h> 15 #include <asm/cputable.h> 16 #include <asm/disassemble.h> 17 18 extern char system_call_common[]; 19 extern char system_call_vectored_emulate[]; 20 21 #ifdef CONFIG_PPC64 22 /* Bits in SRR1 that are copied from MSR */ 23 #define MSR_MASK 0xffffffff87c0ffffUL 24 #else 25 #define MSR_MASK 0x87c0ffff 26 #endif 27 28 /* Bits in XER */ 29 #define XER_SO 0x80000000U 30 #define XER_OV 0x40000000U 31 #define XER_CA 0x20000000U 32 #define XER_OV32 0x00080000U 33 #define XER_CA32 0x00040000U 34 35 #ifdef CONFIG_VSX 36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) 37 #endif 38 39 #ifdef CONFIG_PPC_FPU 40 /* 41 * Functions in ldstfp.S 42 */ 43 extern void get_fpr(int rn, double *p); 44 extern void put_fpr(int rn, const double *p); 45 extern void get_vr(int rn, __vector128 *p); 46 extern void put_vr(int rn, __vector128 *p); 47 extern void load_vsrn(int vsr, const void *p); 48 extern void store_vsrn(int vsr, void *p); 49 extern void conv_sp_to_dp(const float *sp, double *dp); 50 extern void conv_dp_to_sp(const double *dp, float *sp); 51 #endif 52 53 #ifdef __powerpc64__ 54 /* 55 * Functions in quad.S 56 */ 57 extern int do_lq(unsigned long ea, unsigned long *regs); 58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); 59 extern int do_lqarx(unsigned long ea, unsigned long *regs); 60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, 61 unsigned int *crp); 62 #endif 63 64 #ifdef __LITTLE_ENDIAN__ 65 #define IS_LE 1 66 #define IS_BE 0 67 #else 68 #define IS_LE 0 69 #define IS_BE 1 70 #endif 71 72 /* 73 * Emulate the truncation of 64 bit values in 32-bit mode. 74 */ 75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, 76 unsigned long val) 77 { 78 #ifdef __powerpc64__ 79 if ((msr & MSR_64BIT) == 0) 80 val &= 0xffffffffUL; 81 #endif 82 return val; 83 } 84 85 /* 86 * Determine whether a conditional branch instruction would branch. 87 */ 88 static nokprobe_inline int branch_taken(unsigned int instr, 89 const struct pt_regs *regs, 90 struct instruction_op *op) 91 { 92 unsigned int bo = (instr >> 21) & 0x1f; 93 unsigned int bi; 94 95 if ((bo & 4) == 0) { 96 /* decrement counter */ 97 op->type |= DECCTR; 98 if (((bo >> 1) & 1) ^ (regs->ctr == 1)) 99 return 0; 100 } 101 if ((bo & 0x10) == 0) { 102 /* check bit from CR */ 103 bi = (instr >> 16) & 0x1f; 104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 105 return 0; 106 } 107 return 1; 108 } 109 110 static nokprobe_inline long address_ok(struct pt_regs *regs, 111 unsigned long ea, int nb) 112 { 113 if (!user_mode(regs)) 114 return 1; 115 if (__access_ok(ea, nb)) 116 return 1; 117 if (__access_ok(ea, 1)) 118 /* Access overlaps the end of the user region */ 119 regs->dar = TASK_SIZE_MAX - 1; 120 else 121 regs->dar = ea; 122 return 0; 123 } 124 125 /* 126 * Calculate effective address for a D-form instruction 127 */ 128 static nokprobe_inline unsigned long dform_ea(unsigned int instr, 129 const struct pt_regs *regs) 130 { 131 int ra; 132 unsigned long ea; 133 134 ra = (instr >> 16) & 0x1f; 135 ea = (signed short) instr; /* sign-extend */ 136 if (ra) 137 ea += regs->gpr[ra]; 138 139 return ea; 140 } 141 142 #ifdef __powerpc64__ 143 /* 144 * Calculate effective address for a DS-form instruction 145 */ 146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 147 const struct pt_regs *regs) 148 { 149 int ra; 150 unsigned long ea; 151 152 ra = (instr >> 16) & 0x1f; 153 ea = (signed short) (instr & ~3); /* sign-extend */ 154 if (ra) 155 ea += regs->gpr[ra]; 156 157 return ea; 158 } 159 160 /* 161 * Calculate effective address for a DQ-form instruction 162 */ 163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 164 const struct pt_regs *regs) 165 { 166 int ra; 167 unsigned long ea; 168 169 ra = (instr >> 16) & 0x1f; 170 ea = (signed short) (instr & ~0xf); /* sign-extend */ 171 if (ra) 172 ea += regs->gpr[ra]; 173 174 return ea; 175 } 176 #endif /* __powerpc64 */ 177 178 /* 179 * Calculate effective address for an X-form instruction 180 */ 181 static nokprobe_inline unsigned long xform_ea(unsigned int instr, 182 const struct pt_regs *regs) 183 { 184 int ra, rb; 185 unsigned long ea; 186 187 ra = (instr >> 16) & 0x1f; 188 rb = (instr >> 11) & 0x1f; 189 ea = regs->gpr[rb]; 190 if (ra) 191 ea += regs->gpr[ra]; 192 193 return ea; 194 } 195 196 /* 197 * Calculate effective address for a MLS:D-form / 8LS:D-form 198 * prefixed instruction 199 */ 200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, 201 unsigned int suffix, 202 const struct pt_regs *regs) 203 { 204 int ra, prefix_r; 205 unsigned int dd; 206 unsigned long ea, d0, d1, d; 207 208 prefix_r = GET_PREFIX_R(instr); 209 ra = GET_PREFIX_RA(suffix); 210 211 d0 = instr & 0x3ffff; 212 d1 = suffix & 0xffff; 213 d = (d0 << 16) | d1; 214 215 /* 216 * sign extend a 34 bit number 217 */ 218 dd = (unsigned int)(d >> 2); 219 ea = (signed int)dd; 220 ea = (ea << 2) | (d & 0x3); 221 222 if (!prefix_r && ra) 223 ea += regs->gpr[ra]; 224 else if (!prefix_r && !ra) 225 ; /* Leave ea as is */ 226 else if (prefix_r) 227 ea += regs->nip; 228 229 /* 230 * (prefix_r && ra) is an invalid form. Should already be 231 * checked for by caller! 232 */ 233 234 return ea; 235 } 236 237 /* 238 * Return the largest power of 2, not greater than sizeof(unsigned long), 239 * such that x is a multiple of it. 240 */ 241 static nokprobe_inline unsigned long max_align(unsigned long x) 242 { 243 x |= sizeof(unsigned long); 244 return x & -x; /* isolates rightmost bit */ 245 } 246 247 static nokprobe_inline unsigned long byterev_2(unsigned long x) 248 { 249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 250 } 251 252 static nokprobe_inline unsigned long byterev_4(unsigned long x) 253 { 254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 255 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 256 } 257 258 #ifdef __powerpc64__ 259 static nokprobe_inline unsigned long byterev_8(unsigned long x) 260 { 261 return (byterev_4(x) << 32) | byterev_4(x >> 32); 262 } 263 #endif 264 265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb) 266 { 267 switch (nb) { 268 case 2: 269 *(u16 *)ptr = byterev_2(*(u16 *)ptr); 270 break; 271 case 4: 272 *(u32 *)ptr = byterev_4(*(u32 *)ptr); 273 break; 274 #ifdef __powerpc64__ 275 case 8: 276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); 277 break; 278 case 16: { 279 unsigned long *up = (unsigned long *)ptr; 280 unsigned long tmp; 281 tmp = byterev_8(up[0]); 282 up[0] = byterev_8(up[1]); 283 up[1] = tmp; 284 break; 285 } 286 case 32: { 287 unsigned long *up = (unsigned long *)ptr; 288 unsigned long tmp; 289 290 tmp = byterev_8(up[0]); 291 up[0] = byterev_8(up[3]); 292 up[3] = tmp; 293 tmp = byterev_8(up[2]); 294 up[2] = byterev_8(up[1]); 295 up[1] = tmp; 296 break; 297 } 298 299 #endif 300 default: 301 WARN_ON_ONCE(1); 302 } 303 } 304 305 static __always_inline int 306 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 307 { 308 unsigned long x = 0; 309 310 switch (nb) { 311 case 1: 312 unsafe_get_user(x, (unsigned char __user *)ea, Efault); 313 break; 314 case 2: 315 unsafe_get_user(x, (unsigned short __user *)ea, Efault); 316 break; 317 case 4: 318 unsafe_get_user(x, (unsigned int __user *)ea, Efault); 319 break; 320 #ifdef __powerpc64__ 321 case 8: 322 unsafe_get_user(x, (unsigned long __user *)ea, Efault); 323 break; 324 #endif 325 } 326 *dest = x; 327 return 0; 328 329 Efault: 330 regs->dar = ea; 331 return -EFAULT; 332 } 333 334 static nokprobe_inline int 335 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 336 { 337 int err; 338 339 if (is_kernel_addr(ea)) 340 return __read_mem_aligned(dest, ea, nb, regs); 341 342 if (user_read_access_begin((void __user *)ea, nb)) { 343 err = __read_mem_aligned(dest, ea, nb, regs); 344 user_read_access_end(); 345 } else { 346 err = -EFAULT; 347 regs->dar = ea; 348 } 349 350 return err; 351 } 352 353 /* 354 * Copy from userspace to a buffer, using the largest possible 355 * aligned accesses, up to sizeof(long). 356 */ 357 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 358 { 359 int c; 360 361 for (; nb > 0; nb -= c) { 362 c = max_align(ea); 363 if (c > nb) 364 c = max_align(nb); 365 switch (c) { 366 case 1: 367 unsafe_get_user(*dest, (u8 __user *)ea, Efault); 368 break; 369 case 2: 370 unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); 371 break; 372 case 4: 373 unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); 374 break; 375 #ifdef __powerpc64__ 376 case 8: 377 unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); 378 break; 379 #endif 380 } 381 dest += c; 382 ea += c; 383 } 384 return 0; 385 386 Efault: 387 regs->dar = ea; 388 return -EFAULT; 389 } 390 391 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 392 { 393 int err; 394 395 if (is_kernel_addr(ea)) 396 return __copy_mem_in(dest, ea, nb, regs); 397 398 if (user_read_access_begin((void __user *)ea, nb)) { 399 err = __copy_mem_in(dest, ea, nb, regs); 400 user_read_access_end(); 401 } else { 402 err = -EFAULT; 403 regs->dar = ea; 404 } 405 406 return err; 407 } 408 409 static nokprobe_inline int read_mem_unaligned(unsigned long *dest, 410 unsigned long ea, int nb, 411 struct pt_regs *regs) 412 { 413 union { 414 unsigned long ul; 415 u8 b[sizeof(unsigned long)]; 416 } u; 417 int i; 418 int err; 419 420 u.ul = 0; 421 i = IS_BE ? sizeof(unsigned long) - nb : 0; 422 err = copy_mem_in(&u.b[i], ea, nb, regs); 423 if (!err) 424 *dest = u.ul; 425 return err; 426 } 427 428 /* 429 * Read memory at address ea for nb bytes, return 0 for success 430 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 431 * If nb < sizeof(long), the result is right-justified on BE systems. 432 */ 433 static int read_mem(unsigned long *dest, unsigned long ea, int nb, 434 struct pt_regs *regs) 435 { 436 if (!address_ok(regs, ea, nb)) 437 return -EFAULT; 438 if ((ea & (nb - 1)) == 0) 439 return read_mem_aligned(dest, ea, nb, regs); 440 return read_mem_unaligned(dest, ea, nb, regs); 441 } 442 NOKPROBE_SYMBOL(read_mem); 443 444 static __always_inline int 445 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 446 { 447 switch (nb) { 448 case 1: 449 unsafe_put_user(val, (unsigned char __user *)ea, Efault); 450 break; 451 case 2: 452 unsafe_put_user(val, (unsigned short __user *)ea, Efault); 453 break; 454 case 4: 455 unsafe_put_user(val, (unsigned int __user *)ea, Efault); 456 break; 457 #ifdef __powerpc64__ 458 case 8: 459 unsafe_put_user(val, (unsigned long __user *)ea, Efault); 460 break; 461 #endif 462 } 463 return 0; 464 465 Efault: 466 regs->dar = ea; 467 return -EFAULT; 468 } 469 470 static nokprobe_inline int 471 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 472 { 473 int err; 474 475 if (is_kernel_addr(ea)) 476 return __write_mem_aligned(val, ea, nb, regs); 477 478 if (user_write_access_begin((void __user *)ea, nb)) { 479 err = __write_mem_aligned(val, ea, nb, regs); 480 user_write_access_end(); 481 } else { 482 err = -EFAULT; 483 regs->dar = ea; 484 } 485 486 return err; 487 } 488 489 /* 490 * Copy from a buffer to userspace, using the largest possible 491 * aligned accesses, up to sizeof(long). 492 */ 493 static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 494 { 495 int c; 496 497 for (; nb > 0; nb -= c) { 498 c = max_align(ea); 499 if (c > nb) 500 c = max_align(nb); 501 switch (c) { 502 case 1: 503 unsafe_put_user(*dest, (u8 __user *)ea, Efault); 504 break; 505 case 2: 506 unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); 507 break; 508 case 4: 509 unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); 510 break; 511 #ifdef __powerpc64__ 512 case 8: 513 unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); 514 break; 515 #endif 516 } 517 dest += c; 518 ea += c; 519 } 520 return 0; 521 522 Efault: 523 regs->dar = ea; 524 return -EFAULT; 525 } 526 527 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 528 { 529 int err; 530 531 if (is_kernel_addr(ea)) 532 return __copy_mem_out(dest, ea, nb, regs); 533 534 if (user_write_access_begin((void __user *)ea, nb)) { 535 err = __copy_mem_out(dest, ea, nb, regs); 536 user_write_access_end(); 537 } else { 538 err = -EFAULT; 539 regs->dar = ea; 540 } 541 542 return err; 543 } 544 545 static nokprobe_inline int write_mem_unaligned(unsigned long val, 546 unsigned long ea, int nb, 547 struct pt_regs *regs) 548 { 549 union { 550 unsigned long ul; 551 u8 b[sizeof(unsigned long)]; 552 } u; 553 int i; 554 555 u.ul = val; 556 i = IS_BE ? sizeof(unsigned long) - nb : 0; 557 return copy_mem_out(&u.b[i], ea, nb, regs); 558 } 559 560 /* 561 * Write memory at address ea for nb bytes, return 0 for success 562 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 563 */ 564 static int write_mem(unsigned long val, unsigned long ea, int nb, 565 struct pt_regs *regs) 566 { 567 if (!address_ok(regs, ea, nb)) 568 return -EFAULT; 569 if ((ea & (nb - 1)) == 0) 570 return write_mem_aligned(val, ea, nb, regs); 571 return write_mem_unaligned(val, ea, nb, regs); 572 } 573 NOKPROBE_SYMBOL(write_mem); 574 575 #ifdef CONFIG_PPC_FPU 576 /* 577 * These access either the real FP register or the image in the 578 * thread_struct, depending on regs->msr & MSR_FP. 579 */ 580 static int do_fp_load(struct instruction_op *op, unsigned long ea, 581 struct pt_regs *regs, bool cross_endian) 582 { 583 int err, rn, nb; 584 union { 585 int i; 586 unsigned int u; 587 float f; 588 double d[2]; 589 unsigned long l[2]; 590 u8 b[2 * sizeof(double)]; 591 } u; 592 593 nb = GETSIZE(op->type); 594 if (!address_ok(regs, ea, nb)) 595 return -EFAULT; 596 rn = op->reg; 597 err = copy_mem_in(u.b, ea, nb, regs); 598 if (err) 599 return err; 600 if (unlikely(cross_endian)) { 601 do_byte_reverse(u.b, min(nb, 8)); 602 if (nb == 16) 603 do_byte_reverse(&u.b[8], 8); 604 } 605 preempt_disable(); 606 if (nb == 4) { 607 if (op->type & FPCONV) 608 conv_sp_to_dp(&u.f, &u.d[0]); 609 else if (op->type & SIGNEXT) 610 u.l[0] = u.i; 611 else 612 u.l[0] = u.u; 613 } 614 if (regs->msr & MSR_FP) 615 put_fpr(rn, &u.d[0]); 616 else 617 current->thread.TS_FPR(rn) = u.l[0]; 618 if (nb == 16) { 619 /* lfdp */ 620 rn |= 1; 621 if (regs->msr & MSR_FP) 622 put_fpr(rn, &u.d[1]); 623 else 624 current->thread.TS_FPR(rn) = u.l[1]; 625 } 626 preempt_enable(); 627 return 0; 628 } 629 NOKPROBE_SYMBOL(do_fp_load); 630 631 static int do_fp_store(struct instruction_op *op, unsigned long ea, 632 struct pt_regs *regs, bool cross_endian) 633 { 634 int rn, nb; 635 union { 636 unsigned int u; 637 float f; 638 double d[2]; 639 unsigned long l[2]; 640 u8 b[2 * sizeof(double)]; 641 } u; 642 643 nb = GETSIZE(op->type); 644 if (!address_ok(regs, ea, nb)) 645 return -EFAULT; 646 rn = op->reg; 647 preempt_disable(); 648 if (regs->msr & MSR_FP) 649 get_fpr(rn, &u.d[0]); 650 else 651 u.l[0] = current->thread.TS_FPR(rn); 652 if (nb == 4) { 653 if (op->type & FPCONV) 654 conv_dp_to_sp(&u.d[0], &u.f); 655 else 656 u.u = u.l[0]; 657 } 658 if (nb == 16) { 659 rn |= 1; 660 if (regs->msr & MSR_FP) 661 get_fpr(rn, &u.d[1]); 662 else 663 u.l[1] = current->thread.TS_FPR(rn); 664 } 665 preempt_enable(); 666 if (unlikely(cross_endian)) { 667 do_byte_reverse(u.b, min(nb, 8)); 668 if (nb == 16) 669 do_byte_reverse(&u.b[8], 8); 670 } 671 return copy_mem_out(u.b, ea, nb, regs); 672 } 673 NOKPROBE_SYMBOL(do_fp_store); 674 #endif 675 676 #ifdef CONFIG_ALTIVEC 677 /* For Altivec/VMX, no need to worry about alignment */ 678 static nokprobe_inline int do_vec_load(int rn, unsigned long ea, 679 int size, struct pt_regs *regs, 680 bool cross_endian) 681 { 682 int err; 683 union { 684 __vector128 v; 685 u8 b[sizeof(__vector128)]; 686 } u = {}; 687 688 if (!address_ok(regs, ea & ~0xfUL, 16)) 689 return -EFAULT; 690 /* align to multiple of size */ 691 ea &= ~(size - 1); 692 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); 693 if (err) 694 return err; 695 if (unlikely(cross_endian)) 696 do_byte_reverse(&u.b[ea & 0xf], size); 697 preempt_disable(); 698 if (regs->msr & MSR_VEC) 699 put_vr(rn, &u.v); 700 else 701 current->thread.vr_state.vr[rn] = u.v; 702 preempt_enable(); 703 return 0; 704 } 705 706 static nokprobe_inline int do_vec_store(int rn, unsigned long ea, 707 int size, struct pt_regs *regs, 708 bool cross_endian) 709 { 710 union { 711 __vector128 v; 712 u8 b[sizeof(__vector128)]; 713 } u; 714 715 if (!address_ok(regs, ea & ~0xfUL, 16)) 716 return -EFAULT; 717 /* align to multiple of size */ 718 ea &= ~(size - 1); 719 720 preempt_disable(); 721 if (regs->msr & MSR_VEC) 722 get_vr(rn, &u.v); 723 else 724 u.v = current->thread.vr_state.vr[rn]; 725 preempt_enable(); 726 if (unlikely(cross_endian)) 727 do_byte_reverse(&u.b[ea & 0xf], size); 728 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 729 } 730 #endif /* CONFIG_ALTIVEC */ 731 732 #ifdef __powerpc64__ 733 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, 734 int reg, bool cross_endian) 735 { 736 int err; 737 738 if (!address_ok(regs, ea, 16)) 739 return -EFAULT; 740 /* if aligned, should be atomic */ 741 if ((ea & 0xf) == 0) { 742 err = do_lq(ea, ®s->gpr[reg]); 743 } else { 744 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); 745 if (!err) 746 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); 747 } 748 if (!err && unlikely(cross_endian)) 749 do_byte_reverse(®s->gpr[reg], 16); 750 return err; 751 } 752 753 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, 754 int reg, bool cross_endian) 755 { 756 int err; 757 unsigned long vals[2]; 758 759 if (!address_ok(regs, ea, 16)) 760 return -EFAULT; 761 vals[0] = regs->gpr[reg]; 762 vals[1] = regs->gpr[reg + 1]; 763 if (unlikely(cross_endian)) 764 do_byte_reverse(vals, 16); 765 766 /* if aligned, should be atomic */ 767 if ((ea & 0xf) == 0) 768 return do_stq(ea, vals[0], vals[1]); 769 770 err = write_mem(vals[IS_LE], ea, 8, regs); 771 if (!err) 772 err = write_mem(vals[IS_BE], ea + 8, 8, regs); 773 return err; 774 } 775 #endif /* __powerpc64 */ 776 777 #ifdef CONFIG_VSX 778 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, 779 const void *mem, bool rev) 780 { 781 int size, read_size; 782 int i, j; 783 const unsigned int *wp; 784 const unsigned short *hp; 785 const unsigned char *bp; 786 787 size = GETSIZE(op->type); 788 reg->d[0] = reg->d[1] = 0; 789 790 switch (op->element_size) { 791 case 32: 792 /* [p]lxvp[x] */ 793 case 16: 794 /* whole vector; lxv[x] or lxvl[l] */ 795 if (size == 0) 796 break; 797 memcpy(reg, mem, size); 798 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 799 rev = !rev; 800 if (rev) 801 do_byte_reverse(reg, size); 802 break; 803 case 8: 804 /* scalar loads, lxvd2x, lxvdsx */ 805 read_size = (size >= 8) ? 8 : size; 806 i = IS_LE ? 8 : 8 - read_size; 807 memcpy(®->b[i], mem, read_size); 808 if (rev) 809 do_byte_reverse(®->b[i], 8); 810 if (size < 8) { 811 if (op->type & SIGNEXT) { 812 /* size == 4 is the only case here */ 813 reg->d[IS_LE] = (signed int) reg->d[IS_LE]; 814 } else if (op->vsx_flags & VSX_FPCONV) { 815 preempt_disable(); 816 conv_sp_to_dp(®->fp[1 + IS_LE], 817 ®->dp[IS_LE]); 818 preempt_enable(); 819 } 820 } else { 821 if (size == 16) { 822 unsigned long v = *(unsigned long *)(mem + 8); 823 reg->d[IS_BE] = !rev ? v : byterev_8(v); 824 } else if (op->vsx_flags & VSX_SPLAT) 825 reg->d[IS_BE] = reg->d[IS_LE]; 826 } 827 break; 828 case 4: 829 /* lxvw4x, lxvwsx */ 830 wp = mem; 831 for (j = 0; j < size / 4; ++j) { 832 i = IS_LE ? 3 - j : j; 833 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); 834 } 835 if (op->vsx_flags & VSX_SPLAT) { 836 u32 val = reg->w[IS_LE ? 3 : 0]; 837 for (; j < 4; ++j) { 838 i = IS_LE ? 3 - j : j; 839 reg->w[i] = val; 840 } 841 } 842 break; 843 case 2: 844 /* lxvh8x */ 845 hp = mem; 846 for (j = 0; j < size / 2; ++j) { 847 i = IS_LE ? 7 - j : j; 848 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); 849 } 850 break; 851 case 1: 852 /* lxvb16x */ 853 bp = mem; 854 for (j = 0; j < size; ++j) { 855 i = IS_LE ? 15 - j : j; 856 reg->b[i] = *bp++; 857 } 858 break; 859 } 860 } 861 EXPORT_SYMBOL_GPL(emulate_vsx_load); 862 NOKPROBE_SYMBOL(emulate_vsx_load); 863 864 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, 865 void *mem, bool rev) 866 { 867 int size, write_size; 868 int i, j; 869 union vsx_reg buf; 870 unsigned int *wp; 871 unsigned short *hp; 872 unsigned char *bp; 873 874 size = GETSIZE(op->type); 875 876 switch (op->element_size) { 877 case 32: 878 /* [p]stxvp[x] */ 879 if (size == 0) 880 break; 881 if (rev) { 882 /* reverse 32 bytes */ 883 union vsx_reg buf32[2]; 884 buf32[0].d[0] = byterev_8(reg[1].d[1]); 885 buf32[0].d[1] = byterev_8(reg[1].d[0]); 886 buf32[1].d[0] = byterev_8(reg[0].d[1]); 887 buf32[1].d[1] = byterev_8(reg[0].d[0]); 888 memcpy(mem, buf32, size); 889 } else { 890 memcpy(mem, reg, size); 891 } 892 break; 893 case 16: 894 /* stxv, stxvx, stxvl, stxvll */ 895 if (size == 0) 896 break; 897 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 898 rev = !rev; 899 if (rev) { 900 /* reverse 16 bytes */ 901 buf.d[0] = byterev_8(reg->d[1]); 902 buf.d[1] = byterev_8(reg->d[0]); 903 reg = &buf; 904 } 905 memcpy(mem, reg, size); 906 break; 907 case 8: 908 /* scalar stores, stxvd2x */ 909 write_size = (size >= 8) ? 8 : size; 910 i = IS_LE ? 8 : 8 - write_size; 911 if (size < 8 && op->vsx_flags & VSX_FPCONV) { 912 buf.d[0] = buf.d[1] = 0; 913 preempt_disable(); 914 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); 915 preempt_enable(); 916 reg = &buf; 917 } 918 memcpy(mem, ®->b[i], write_size); 919 if (size == 16) 920 memcpy(mem + 8, ®->d[IS_BE], 8); 921 if (unlikely(rev)) { 922 do_byte_reverse(mem, write_size); 923 if (size == 16) 924 do_byte_reverse(mem + 8, 8); 925 } 926 break; 927 case 4: 928 /* stxvw4x */ 929 wp = mem; 930 for (j = 0; j < size / 4; ++j) { 931 i = IS_LE ? 3 - j : j; 932 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); 933 } 934 break; 935 case 2: 936 /* stxvh8x */ 937 hp = mem; 938 for (j = 0; j < size / 2; ++j) { 939 i = IS_LE ? 7 - j : j; 940 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); 941 } 942 break; 943 case 1: 944 /* stvxb16x */ 945 bp = mem; 946 for (j = 0; j < size; ++j) { 947 i = IS_LE ? 15 - j : j; 948 *bp++ = reg->b[i]; 949 } 950 break; 951 } 952 } 953 EXPORT_SYMBOL_GPL(emulate_vsx_store); 954 NOKPROBE_SYMBOL(emulate_vsx_store); 955 956 static nokprobe_inline int do_vsx_load(struct instruction_op *op, 957 unsigned long ea, struct pt_regs *regs, 958 bool cross_endian) 959 { 960 int reg = op->reg; 961 int i, j, nr_vsx_regs; 962 u8 mem[32]; 963 union vsx_reg buf[2]; 964 int size = GETSIZE(op->type); 965 966 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) 967 return -EFAULT; 968 969 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 970 emulate_vsx_load(op, buf, mem, cross_endian); 971 preempt_disable(); 972 if (reg < 32) { 973 /* FP regs + extensions */ 974 if (regs->msr & MSR_FP) { 975 for (i = 0; i < nr_vsx_regs; i++) { 976 j = IS_LE ? nr_vsx_regs - i - 1 : i; 977 load_vsrn(reg + i, &buf[j].v); 978 } 979 } else { 980 for (i = 0; i < nr_vsx_regs; i++) { 981 j = IS_LE ? nr_vsx_regs - i - 1 : i; 982 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; 983 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; 984 } 985 } 986 } else { 987 if (regs->msr & MSR_VEC) { 988 for (i = 0; i < nr_vsx_regs; i++) { 989 j = IS_LE ? nr_vsx_regs - i - 1 : i; 990 load_vsrn(reg + i, &buf[j].v); 991 } 992 } else { 993 for (i = 0; i < nr_vsx_regs; i++) { 994 j = IS_LE ? nr_vsx_regs - i - 1 : i; 995 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; 996 } 997 } 998 } 999 preempt_enable(); 1000 return 0; 1001 } 1002 1003 static nokprobe_inline int do_vsx_store(struct instruction_op *op, 1004 unsigned long ea, struct pt_regs *regs, 1005 bool cross_endian) 1006 { 1007 int reg = op->reg; 1008 int i, j, nr_vsx_regs; 1009 u8 mem[32]; 1010 union vsx_reg buf[2]; 1011 int size = GETSIZE(op->type); 1012 1013 if (!address_ok(regs, ea, size)) 1014 return -EFAULT; 1015 1016 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 1017 preempt_disable(); 1018 if (reg < 32) { 1019 /* FP regs + extensions */ 1020 if (regs->msr & MSR_FP) { 1021 for (i = 0; i < nr_vsx_regs; i++) { 1022 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1023 store_vsrn(reg + i, &buf[j].v); 1024 } 1025 } else { 1026 for (i = 0; i < nr_vsx_regs; i++) { 1027 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1028 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; 1029 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; 1030 } 1031 } 1032 } else { 1033 if (regs->msr & MSR_VEC) { 1034 for (i = 0; i < nr_vsx_regs; i++) { 1035 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1036 store_vsrn(reg + i, &buf[j].v); 1037 } 1038 } else { 1039 for (i = 0; i < nr_vsx_regs; i++) { 1040 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1041 buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; 1042 } 1043 } 1044 } 1045 preempt_enable(); 1046 emulate_vsx_store(op, buf, mem, cross_endian); 1047 return copy_mem_out(mem, ea, size, regs); 1048 } 1049 #endif /* CONFIG_VSX */ 1050 1051 static int __emulate_dcbz(unsigned long ea) 1052 { 1053 unsigned long i; 1054 unsigned long size = l1_dcache_bytes(); 1055 1056 for (i = 0; i < size; i += sizeof(long)) 1057 unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); 1058 1059 return 0; 1060 1061 Efault: 1062 return -EFAULT; 1063 } 1064 1065 int emulate_dcbz(unsigned long ea, struct pt_regs *regs) 1066 { 1067 int err; 1068 unsigned long size; 1069 1070 #ifdef __powerpc64__ 1071 size = ppc64_caches.l1d.block_size; 1072 if (!(regs->msr & MSR_64BIT)) 1073 ea &= 0xffffffffUL; 1074 #else 1075 size = L1_CACHE_BYTES; 1076 #endif 1077 ea &= ~(size - 1); 1078 if (!address_ok(regs, ea, size)) 1079 return -EFAULT; 1080 1081 if (is_kernel_addr(ea)) { 1082 err = __emulate_dcbz(ea); 1083 } else if (user_write_access_begin((void __user *)ea, size)) { 1084 err = __emulate_dcbz(ea); 1085 user_write_access_end(); 1086 } else { 1087 err = -EFAULT; 1088 } 1089 1090 if (err) 1091 regs->dar = ea; 1092 1093 1094 return err; 1095 } 1096 NOKPROBE_SYMBOL(emulate_dcbz); 1097 1098 #define __put_user_asmx(x, addr, err, op, cr) \ 1099 __asm__ __volatile__( \ 1100 "1: " op " %2,0,%3\n" \ 1101 " mfcr %1\n" \ 1102 "2:\n" \ 1103 ".section .fixup,\"ax\"\n" \ 1104 "3: li %0,%4\n" \ 1105 " b 2b\n" \ 1106 ".previous\n" \ 1107 EX_TABLE(1b, 3b) \ 1108 : "=r" (err), "=r" (cr) \ 1109 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 1110 1111 #define __get_user_asmx(x, addr, err, op) \ 1112 __asm__ __volatile__( \ 1113 "1: "op" %1,0,%2\n" \ 1114 "2:\n" \ 1115 ".section .fixup,\"ax\"\n" \ 1116 "3: li %0,%3\n" \ 1117 " b 2b\n" \ 1118 ".previous\n" \ 1119 EX_TABLE(1b, 3b) \ 1120 : "=r" (err), "=r" (x) \ 1121 : "r" (addr), "i" (-EFAULT), "0" (err)) 1122 1123 #define __cacheop_user_asmx(addr, err, op) \ 1124 __asm__ __volatile__( \ 1125 "1: "op" 0,%1\n" \ 1126 "2:\n" \ 1127 ".section .fixup,\"ax\"\n" \ 1128 "3: li %0,%3\n" \ 1129 " b 2b\n" \ 1130 ".previous\n" \ 1131 EX_TABLE(1b, 3b) \ 1132 : "=r" (err) \ 1133 : "r" (addr), "i" (-EFAULT), "0" (err)) 1134 1135 static nokprobe_inline void set_cr0(const struct pt_regs *regs, 1136 struct instruction_op *op) 1137 { 1138 long val = op->val; 1139 1140 op->type |= SETCC; 1141 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 1142 #ifdef __powerpc64__ 1143 if (!(regs->msr & MSR_64BIT)) 1144 val = (int) val; 1145 #endif 1146 if (val < 0) 1147 op->ccval |= 0x80000000; 1148 else if (val > 0) 1149 op->ccval |= 0x40000000; 1150 else 1151 op->ccval |= 0x20000000; 1152 } 1153 1154 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) 1155 { 1156 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1157 if (val) 1158 op->xerval |= XER_CA32; 1159 else 1160 op->xerval &= ~XER_CA32; 1161 } 1162 } 1163 1164 static nokprobe_inline void add_with_carry(const struct pt_regs *regs, 1165 struct instruction_op *op, int rd, 1166 unsigned long val1, unsigned long val2, 1167 unsigned long carry_in) 1168 { 1169 unsigned long val = val1 + val2; 1170 1171 if (carry_in) 1172 ++val; 1173 op->type = COMPUTE + SETREG + SETXER; 1174 op->reg = rd; 1175 op->val = val; 1176 #ifdef __powerpc64__ 1177 if (!(regs->msr & MSR_64BIT)) { 1178 val = (unsigned int) val; 1179 val1 = (unsigned int) val1; 1180 } 1181 #endif 1182 op->xerval = regs->xer; 1183 if (val < val1 || (carry_in && val == val1)) 1184 op->xerval |= XER_CA; 1185 else 1186 op->xerval &= ~XER_CA; 1187 1188 set_ca32(op, (unsigned int)val < (unsigned int)val1 || 1189 (carry_in && (unsigned int)val == (unsigned int)val1)); 1190 } 1191 1192 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, 1193 struct instruction_op *op, 1194 long v1, long v2, int crfld) 1195 { 1196 unsigned int crval, shift; 1197 1198 op->type = COMPUTE + SETCC; 1199 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1200 if (v1 < v2) 1201 crval |= 8; 1202 else if (v1 > v2) 1203 crval |= 4; 1204 else 1205 crval |= 2; 1206 shift = (7 - crfld) * 4; 1207 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1208 } 1209 1210 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, 1211 struct instruction_op *op, 1212 unsigned long v1, 1213 unsigned long v2, int crfld) 1214 { 1215 unsigned int crval, shift; 1216 1217 op->type = COMPUTE + SETCC; 1218 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1219 if (v1 < v2) 1220 crval |= 8; 1221 else if (v1 > v2) 1222 crval |= 4; 1223 else 1224 crval |= 2; 1225 shift = (7 - crfld) * 4; 1226 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1227 } 1228 1229 static nokprobe_inline void do_cmpb(const struct pt_regs *regs, 1230 struct instruction_op *op, 1231 unsigned long v1, unsigned long v2) 1232 { 1233 unsigned long long out_val, mask; 1234 int i; 1235 1236 out_val = 0; 1237 for (i = 0; i < 8; i++) { 1238 mask = 0xffUL << (i * 8); 1239 if ((v1 & mask) == (v2 & mask)) 1240 out_val |= mask; 1241 } 1242 op->val = out_val; 1243 } 1244 1245 /* 1246 * The size parameter is used to adjust the equivalent popcnt instruction. 1247 * popcntb = 8, popcntw = 32, popcntd = 64 1248 */ 1249 static nokprobe_inline void do_popcnt(const struct pt_regs *regs, 1250 struct instruction_op *op, 1251 unsigned long v1, int size) 1252 { 1253 unsigned long long out = v1; 1254 1255 out -= (out >> 1) & 0x5555555555555555ULL; 1256 out = (0x3333333333333333ULL & out) + 1257 (0x3333333333333333ULL & (out >> 2)); 1258 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1259 1260 if (size == 8) { /* popcntb */ 1261 op->val = out; 1262 return; 1263 } 1264 out += out >> 8; 1265 out += out >> 16; 1266 if (size == 32) { /* popcntw */ 1267 op->val = out & 0x0000003f0000003fULL; 1268 return; 1269 } 1270 1271 out = (out + (out >> 32)) & 0x7f; 1272 op->val = out; /* popcntd */ 1273 } 1274 1275 #ifdef CONFIG_PPC64 1276 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, 1277 struct instruction_op *op, 1278 unsigned long v1, unsigned long v2) 1279 { 1280 unsigned char perm, idx; 1281 unsigned int i; 1282 1283 perm = 0; 1284 for (i = 0; i < 8; i++) { 1285 idx = (v1 >> (i * 8)) & 0xff; 1286 if (idx < 64) 1287 if (v2 & PPC_BIT(idx)) 1288 perm |= 1 << i; 1289 } 1290 op->val = perm; 1291 } 1292 #endif /* CONFIG_PPC64 */ 1293 /* 1294 * The size parameter adjusts the equivalent prty instruction. 1295 * prtyw = 32, prtyd = 64 1296 */ 1297 static nokprobe_inline void do_prty(const struct pt_regs *regs, 1298 struct instruction_op *op, 1299 unsigned long v, int size) 1300 { 1301 unsigned long long res = v ^ (v >> 8); 1302 1303 res ^= res >> 16; 1304 if (size == 32) { /* prtyw */ 1305 op->val = res & 0x0000000100000001ULL; 1306 return; 1307 } 1308 1309 res ^= res >> 32; 1310 op->val = res & 1; /*prtyd */ 1311 } 1312 1313 static nokprobe_inline int trap_compare(long v1, long v2) 1314 { 1315 int ret = 0; 1316 1317 if (v1 < v2) 1318 ret |= 0x10; 1319 else if (v1 > v2) 1320 ret |= 0x08; 1321 else 1322 ret |= 0x04; 1323 if ((unsigned long)v1 < (unsigned long)v2) 1324 ret |= 0x02; 1325 else if ((unsigned long)v1 > (unsigned long)v2) 1326 ret |= 0x01; 1327 return ret; 1328 } 1329 1330 /* 1331 * Elements of 32-bit rotate and mask instructions. 1332 */ 1333 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 1334 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 1335 #ifdef __powerpc64__ 1336 #define MASK64_L(mb) (~0UL >> (mb)) 1337 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 1338 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 1339 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 1340 #else 1341 #define DATA32(x) (x) 1342 #endif 1343 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 1344 1345 /* 1346 * Decode an instruction, and return information about it in *op 1347 * without changing *regs. 1348 * Integer arithmetic and logical instructions, branches, and barrier 1349 * instructions can be emulated just using the information in *op. 1350 * 1351 * Return value is 1 if the instruction can be emulated just by 1352 * updating *regs with the information in *op, -1 if we need the 1353 * GPRs but *regs doesn't contain the full register set, or 0 1354 * otherwise. 1355 */ 1356 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1357 ppc_inst_t instr) 1358 { 1359 #ifdef CONFIG_PPC64 1360 unsigned int suffixopcode, prefixtype, prefix_r; 1361 #endif 1362 unsigned int opcode, ra, rb, rc, rd, spr, u; 1363 unsigned long int imm; 1364 unsigned long int val, val2; 1365 unsigned int mb, me, sh; 1366 unsigned int word, suffix; 1367 long ival; 1368 1369 word = ppc_inst_val(instr); 1370 suffix = ppc_inst_suffix(instr); 1371 1372 op->type = COMPUTE; 1373 1374 opcode = ppc_inst_primary_opcode(instr); 1375 switch (opcode) { 1376 case 16: /* bc */ 1377 op->type = BRANCH; 1378 imm = (signed short)(word & 0xfffc); 1379 if ((word & 2) == 0) 1380 imm += regs->nip; 1381 op->val = truncate_if_32bit(regs->msr, imm); 1382 if (word & 1) 1383 op->type |= SETLK; 1384 if (branch_taken(word, regs, op)) 1385 op->type |= BRTAKEN; 1386 return 1; 1387 #ifdef CONFIG_PPC64 1388 case 17: /* sc */ 1389 if ((word & 0xfe2) == 2) 1390 op->type = SYSCALL; 1391 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 1392 (word & 0xfe3) == 1) { /* scv */ 1393 op->type = SYSCALL_VECTORED_0; 1394 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1395 goto unknown_opcode; 1396 } else 1397 op->type = UNKNOWN; 1398 return 0; 1399 #endif 1400 case 18: /* b */ 1401 op->type = BRANCH | BRTAKEN; 1402 imm = word & 0x03fffffc; 1403 if (imm & 0x02000000) 1404 imm -= 0x04000000; 1405 if ((word & 2) == 0) 1406 imm += regs->nip; 1407 op->val = truncate_if_32bit(regs->msr, imm); 1408 if (word & 1) 1409 op->type |= SETLK; 1410 return 1; 1411 case 19: 1412 switch ((word >> 1) & 0x3ff) { 1413 case 0: /* mcrf */ 1414 op->type = COMPUTE + SETCC; 1415 rd = 7 - ((word >> 23) & 0x7); 1416 ra = 7 - ((word >> 18) & 0x7); 1417 rd *= 4; 1418 ra *= 4; 1419 val = (regs->ccr >> ra) & 0xf; 1420 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 1421 return 1; 1422 1423 case 16: /* bclr */ 1424 case 528: /* bcctr */ 1425 op->type = BRANCH; 1426 imm = (word & 0x400)? regs->ctr: regs->link; 1427 op->val = truncate_if_32bit(regs->msr, imm); 1428 if (word & 1) 1429 op->type |= SETLK; 1430 if (branch_taken(word, regs, op)) 1431 op->type |= BRTAKEN; 1432 return 1; 1433 1434 case 18: /* rfid, scary */ 1435 if (regs->msr & MSR_PR) 1436 goto priv; 1437 op->type = RFI; 1438 return 0; 1439 1440 case 150: /* isync */ 1441 op->type = BARRIER | BARRIER_ISYNC; 1442 return 1; 1443 1444 case 33: /* crnor */ 1445 case 129: /* crandc */ 1446 case 193: /* crxor */ 1447 case 225: /* crnand */ 1448 case 257: /* crand */ 1449 case 289: /* creqv */ 1450 case 417: /* crorc */ 1451 case 449: /* cror */ 1452 op->type = COMPUTE + SETCC; 1453 ra = (word >> 16) & 0x1f; 1454 rb = (word >> 11) & 0x1f; 1455 rd = (word >> 21) & 0x1f; 1456 ra = (regs->ccr >> (31 - ra)) & 1; 1457 rb = (regs->ccr >> (31 - rb)) & 1; 1458 val = (word >> (6 + ra * 2 + rb)) & 1; 1459 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | 1460 (val << (31 - rd)); 1461 return 1; 1462 } 1463 break; 1464 case 31: 1465 switch ((word >> 1) & 0x3ff) { 1466 case 598: /* sync */ 1467 op->type = BARRIER + BARRIER_SYNC; 1468 #ifdef __powerpc64__ 1469 switch ((word >> 21) & 3) { 1470 case 1: /* lwsync */ 1471 op->type = BARRIER + BARRIER_LWSYNC; 1472 break; 1473 case 2: /* ptesync */ 1474 op->type = BARRIER + BARRIER_PTESYNC; 1475 break; 1476 } 1477 #endif 1478 return 1; 1479 1480 case 854: /* eieio */ 1481 op->type = BARRIER + BARRIER_EIEIO; 1482 return 1; 1483 } 1484 break; 1485 } 1486 1487 rd = (word >> 21) & 0x1f; 1488 ra = (word >> 16) & 0x1f; 1489 rb = (word >> 11) & 0x1f; 1490 rc = (word >> 6) & 0x1f; 1491 1492 switch (opcode) { 1493 #ifdef __powerpc64__ 1494 case 1: 1495 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 1496 goto unknown_opcode; 1497 1498 prefix_r = GET_PREFIX_R(word); 1499 ra = GET_PREFIX_RA(suffix); 1500 rd = (suffix >> 21) & 0x1f; 1501 op->reg = rd; 1502 op->val = regs->gpr[rd]; 1503 suffixopcode = get_op(suffix); 1504 prefixtype = (word >> 24) & 0x3; 1505 switch (prefixtype) { 1506 case 2: 1507 if (prefix_r && ra) 1508 return 0; 1509 switch (suffixopcode) { 1510 case 14: /* paddi */ 1511 op->type = COMPUTE | PREFIXED; 1512 op->val = mlsd_8lsd_ea(word, suffix, regs); 1513 goto compute_done; 1514 } 1515 } 1516 break; 1517 case 2: /* tdi */ 1518 if (rd & trap_compare(regs->gpr[ra], (short) word)) 1519 goto trap; 1520 return 1; 1521 #endif 1522 case 3: /* twi */ 1523 if (rd & trap_compare((int)regs->gpr[ra], (short) word)) 1524 goto trap; 1525 return 1; 1526 1527 #ifdef __powerpc64__ 1528 case 4: 1529 /* 1530 * There are very many instructions with this primary opcode 1531 * introduced in the ISA as early as v2.03. However, the ones 1532 * we currently emulate were all introduced with ISA 3.0 1533 */ 1534 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1535 goto unknown_opcode; 1536 1537 switch (word & 0x3f) { 1538 case 48: /* maddhd */ 1539 asm volatile(PPC_MADDHD(%0, %1, %2, %3) : 1540 "=r" (op->val) : "r" (regs->gpr[ra]), 1541 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1542 goto compute_done; 1543 1544 case 49: /* maddhdu */ 1545 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : 1546 "=r" (op->val) : "r" (regs->gpr[ra]), 1547 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1548 goto compute_done; 1549 1550 case 51: /* maddld */ 1551 asm volatile(PPC_MADDLD(%0, %1, %2, %3) : 1552 "=r" (op->val) : "r" (regs->gpr[ra]), 1553 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1554 goto compute_done; 1555 } 1556 1557 /* 1558 * There are other instructions from ISA 3.0 with the same 1559 * primary opcode which do not have emulation support yet. 1560 */ 1561 goto unknown_opcode; 1562 #endif 1563 1564 case 7: /* mulli */ 1565 op->val = regs->gpr[ra] * (short) word; 1566 goto compute_done; 1567 1568 case 8: /* subfic */ 1569 imm = (short) word; 1570 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); 1571 return 1; 1572 1573 case 10: /* cmpli */ 1574 imm = (unsigned short) word; 1575 val = regs->gpr[ra]; 1576 #ifdef __powerpc64__ 1577 if ((rd & 1) == 0) 1578 val = (unsigned int) val; 1579 #endif 1580 do_cmp_unsigned(regs, op, val, imm, rd >> 2); 1581 return 1; 1582 1583 case 11: /* cmpi */ 1584 imm = (short) word; 1585 val = regs->gpr[ra]; 1586 #ifdef __powerpc64__ 1587 if ((rd & 1) == 0) 1588 val = (int) val; 1589 #endif 1590 do_cmp_signed(regs, op, val, imm, rd >> 2); 1591 return 1; 1592 1593 case 12: /* addic */ 1594 imm = (short) word; 1595 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1596 return 1; 1597 1598 case 13: /* addic. */ 1599 imm = (short) word; 1600 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1601 set_cr0(regs, op); 1602 return 1; 1603 1604 case 14: /* addi */ 1605 imm = (short) word; 1606 if (ra) 1607 imm += regs->gpr[ra]; 1608 op->val = imm; 1609 goto compute_done; 1610 1611 case 15: /* addis */ 1612 imm = ((short) word) << 16; 1613 if (ra) 1614 imm += regs->gpr[ra]; 1615 op->val = imm; 1616 goto compute_done; 1617 1618 case 19: 1619 if (((word >> 1) & 0x1f) == 2) { 1620 /* addpcis */ 1621 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1622 goto unknown_opcode; 1623 imm = (short) (word & 0xffc1); /* d0 + d2 fields */ 1624 imm |= (word >> 15) & 0x3e; /* d1 field */ 1625 op->val = regs->nip + (imm << 16) + 4; 1626 goto compute_done; 1627 } 1628 op->type = UNKNOWN; 1629 return 0; 1630 1631 case 20: /* rlwimi */ 1632 mb = (word >> 6) & 0x1f; 1633 me = (word >> 1) & 0x1f; 1634 val = DATA32(regs->gpr[rd]); 1635 imm = MASK32(mb, me); 1636 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 1637 goto logical_done; 1638 1639 case 21: /* rlwinm */ 1640 mb = (word >> 6) & 0x1f; 1641 me = (word >> 1) & 0x1f; 1642 val = DATA32(regs->gpr[rd]); 1643 op->val = ROTATE(val, rb) & MASK32(mb, me); 1644 goto logical_done; 1645 1646 case 23: /* rlwnm */ 1647 mb = (word >> 6) & 0x1f; 1648 me = (word >> 1) & 0x1f; 1649 rb = regs->gpr[rb] & 0x1f; 1650 val = DATA32(regs->gpr[rd]); 1651 op->val = ROTATE(val, rb) & MASK32(mb, me); 1652 goto logical_done; 1653 1654 case 24: /* ori */ 1655 op->val = regs->gpr[rd] | (unsigned short) word; 1656 goto logical_done_nocc; 1657 1658 case 25: /* oris */ 1659 imm = (unsigned short) word; 1660 op->val = regs->gpr[rd] | (imm << 16); 1661 goto logical_done_nocc; 1662 1663 case 26: /* xori */ 1664 op->val = regs->gpr[rd] ^ (unsigned short) word; 1665 goto logical_done_nocc; 1666 1667 case 27: /* xoris */ 1668 imm = (unsigned short) word; 1669 op->val = regs->gpr[rd] ^ (imm << 16); 1670 goto logical_done_nocc; 1671 1672 case 28: /* andi. */ 1673 op->val = regs->gpr[rd] & (unsigned short) word; 1674 set_cr0(regs, op); 1675 goto logical_done_nocc; 1676 1677 case 29: /* andis. */ 1678 imm = (unsigned short) word; 1679 op->val = regs->gpr[rd] & (imm << 16); 1680 set_cr0(regs, op); 1681 goto logical_done_nocc; 1682 1683 #ifdef __powerpc64__ 1684 case 30: /* rld* */ 1685 mb = ((word >> 6) & 0x1f) | (word & 0x20); 1686 val = regs->gpr[rd]; 1687 if ((word & 0x10) == 0) { 1688 sh = rb | ((word & 2) << 4); 1689 val = ROTATE(val, sh); 1690 switch ((word >> 2) & 3) { 1691 case 0: /* rldicl */ 1692 val &= MASK64_L(mb); 1693 break; 1694 case 1: /* rldicr */ 1695 val &= MASK64_R(mb); 1696 break; 1697 case 2: /* rldic */ 1698 val &= MASK64(mb, 63 - sh); 1699 break; 1700 case 3: /* rldimi */ 1701 imm = MASK64(mb, 63 - sh); 1702 val = (regs->gpr[ra] & ~imm) | 1703 (val & imm); 1704 } 1705 op->val = val; 1706 goto logical_done; 1707 } else { 1708 sh = regs->gpr[rb] & 0x3f; 1709 val = ROTATE(val, sh); 1710 switch ((word >> 1) & 7) { 1711 case 0: /* rldcl */ 1712 op->val = val & MASK64_L(mb); 1713 goto logical_done; 1714 case 1: /* rldcr */ 1715 op->val = val & MASK64_R(mb); 1716 goto logical_done; 1717 } 1718 } 1719 #endif 1720 op->type = UNKNOWN; /* illegal instruction */ 1721 return 0; 1722 1723 case 31: 1724 /* isel occupies 32 minor opcodes */ 1725 if (((word >> 1) & 0x1f) == 15) { 1726 mb = (word >> 6) & 0x1f; /* bc field */ 1727 val = (regs->ccr >> (31 - mb)) & 1; 1728 val2 = (ra) ? regs->gpr[ra] : 0; 1729 1730 op->val = (val) ? val2 : regs->gpr[rb]; 1731 goto compute_done; 1732 } 1733 1734 switch ((word >> 1) & 0x3ff) { 1735 case 4: /* tw */ 1736 if (rd == 0x1f || 1737 (rd & trap_compare((int)regs->gpr[ra], 1738 (int)regs->gpr[rb]))) 1739 goto trap; 1740 return 1; 1741 #ifdef __powerpc64__ 1742 case 68: /* td */ 1743 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 1744 goto trap; 1745 return 1; 1746 #endif 1747 case 83: /* mfmsr */ 1748 if (regs->msr & MSR_PR) 1749 goto priv; 1750 op->type = MFMSR; 1751 op->reg = rd; 1752 return 0; 1753 case 146: /* mtmsr */ 1754 if (regs->msr & MSR_PR) 1755 goto priv; 1756 op->type = MTMSR; 1757 op->reg = rd; 1758 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 1759 return 0; 1760 #ifdef CONFIG_PPC64 1761 case 178: /* mtmsrd */ 1762 if (regs->msr & MSR_PR) 1763 goto priv; 1764 op->type = MTMSR; 1765 op->reg = rd; 1766 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 1767 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 1768 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 1769 op->val = imm; 1770 return 0; 1771 #endif 1772 1773 case 19: /* mfcr */ 1774 imm = 0xffffffffUL; 1775 if ((word >> 20) & 1) { 1776 imm = 0xf0000000UL; 1777 for (sh = 0; sh < 8; ++sh) { 1778 if (word & (0x80000 >> sh)) 1779 break; 1780 imm >>= 4; 1781 } 1782 } 1783 op->val = regs->ccr & imm; 1784 goto compute_done; 1785 1786 case 128: /* setb */ 1787 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1788 goto unknown_opcode; 1789 /* 1790 * 'ra' encodes the CR field number (bfa) in the top 3 bits. 1791 * Since each CR field is 4 bits, 1792 * we can simply mask off the bottom two bits (bfa * 4) 1793 * to yield the first bit in the CR field. 1794 */ 1795 ra = ra & ~0x3; 1796 /* 'val' stores bits of the CR field (bfa) */ 1797 val = regs->ccr >> (CR0_SHIFT - ra); 1798 /* checks if the LT bit of CR field (bfa) is set */ 1799 if (val & 8) 1800 op->val = -1; 1801 /* checks if the GT bit of CR field (bfa) is set */ 1802 else if (val & 4) 1803 op->val = 1; 1804 else 1805 op->val = 0; 1806 goto compute_done; 1807 1808 case 144: /* mtcrf */ 1809 op->type = COMPUTE + SETCC; 1810 imm = 0xf0000000UL; 1811 val = regs->gpr[rd]; 1812 op->ccval = regs->ccr; 1813 for (sh = 0; sh < 8; ++sh) { 1814 if (word & (0x80000 >> sh)) 1815 op->ccval = (op->ccval & ~imm) | 1816 (val & imm); 1817 imm >>= 4; 1818 } 1819 return 1; 1820 1821 case 339: /* mfspr */ 1822 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1823 op->type = MFSPR; 1824 op->reg = rd; 1825 op->spr = spr; 1826 if (spr == SPRN_XER || spr == SPRN_LR || 1827 spr == SPRN_CTR) 1828 return 1; 1829 return 0; 1830 1831 case 467: /* mtspr */ 1832 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1833 op->type = MTSPR; 1834 op->val = regs->gpr[rd]; 1835 op->spr = spr; 1836 if (spr == SPRN_XER || spr == SPRN_LR || 1837 spr == SPRN_CTR) 1838 return 1; 1839 return 0; 1840 1841 /* 1842 * Compare instructions 1843 */ 1844 case 0: /* cmp */ 1845 val = regs->gpr[ra]; 1846 val2 = regs->gpr[rb]; 1847 #ifdef __powerpc64__ 1848 if ((rd & 1) == 0) { 1849 /* word (32-bit) compare */ 1850 val = (int) val; 1851 val2 = (int) val2; 1852 } 1853 #endif 1854 do_cmp_signed(regs, op, val, val2, rd >> 2); 1855 return 1; 1856 1857 case 32: /* cmpl */ 1858 val = regs->gpr[ra]; 1859 val2 = regs->gpr[rb]; 1860 #ifdef __powerpc64__ 1861 if ((rd & 1) == 0) { 1862 /* word (32-bit) compare */ 1863 val = (unsigned int) val; 1864 val2 = (unsigned int) val2; 1865 } 1866 #endif 1867 do_cmp_unsigned(regs, op, val, val2, rd >> 2); 1868 return 1; 1869 1870 case 508: /* cmpb */ 1871 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); 1872 goto logical_done_nocc; 1873 1874 /* 1875 * Arithmetic instructions 1876 */ 1877 case 8: /* subfc */ 1878 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1879 regs->gpr[rb], 1); 1880 goto arith_done; 1881 #ifdef __powerpc64__ 1882 case 9: /* mulhdu */ 1883 asm("mulhdu %0,%1,%2" : "=r" (op->val) : 1884 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1885 goto arith_done; 1886 #endif 1887 case 10: /* addc */ 1888 add_with_carry(regs, op, rd, regs->gpr[ra], 1889 regs->gpr[rb], 0); 1890 goto arith_done; 1891 1892 case 11: /* mulhwu */ 1893 asm("mulhwu %0,%1,%2" : "=r" (op->val) : 1894 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1895 goto arith_done; 1896 1897 case 40: /* subf */ 1898 op->val = regs->gpr[rb] - regs->gpr[ra]; 1899 goto arith_done; 1900 #ifdef __powerpc64__ 1901 case 73: /* mulhd */ 1902 asm("mulhd %0,%1,%2" : "=r" (op->val) : 1903 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1904 goto arith_done; 1905 #endif 1906 case 75: /* mulhw */ 1907 asm("mulhw %0,%1,%2" : "=r" (op->val) : 1908 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1909 goto arith_done; 1910 1911 case 104: /* neg */ 1912 op->val = -regs->gpr[ra]; 1913 goto arith_done; 1914 1915 case 136: /* subfe */ 1916 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1917 regs->gpr[rb], regs->xer & XER_CA); 1918 goto arith_done; 1919 1920 case 138: /* adde */ 1921 add_with_carry(regs, op, rd, regs->gpr[ra], 1922 regs->gpr[rb], regs->xer & XER_CA); 1923 goto arith_done; 1924 1925 case 200: /* subfze */ 1926 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, 1927 regs->xer & XER_CA); 1928 goto arith_done; 1929 1930 case 202: /* addze */ 1931 add_with_carry(regs, op, rd, regs->gpr[ra], 0L, 1932 regs->xer & XER_CA); 1933 goto arith_done; 1934 1935 case 232: /* subfme */ 1936 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, 1937 regs->xer & XER_CA); 1938 goto arith_done; 1939 #ifdef __powerpc64__ 1940 case 233: /* mulld */ 1941 op->val = regs->gpr[ra] * regs->gpr[rb]; 1942 goto arith_done; 1943 #endif 1944 case 234: /* addme */ 1945 add_with_carry(regs, op, rd, regs->gpr[ra], -1L, 1946 regs->xer & XER_CA); 1947 goto arith_done; 1948 1949 case 235: /* mullw */ 1950 op->val = (long)(int) regs->gpr[ra] * 1951 (int) regs->gpr[rb]; 1952 1953 goto arith_done; 1954 #ifdef __powerpc64__ 1955 case 265: /* modud */ 1956 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1957 goto unknown_opcode; 1958 op->val = regs->gpr[ra] % regs->gpr[rb]; 1959 goto compute_done; 1960 #endif 1961 case 266: /* add */ 1962 op->val = regs->gpr[ra] + regs->gpr[rb]; 1963 goto arith_done; 1964 1965 case 267: /* moduw */ 1966 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1967 goto unknown_opcode; 1968 op->val = (unsigned int) regs->gpr[ra] % 1969 (unsigned int) regs->gpr[rb]; 1970 goto compute_done; 1971 #ifdef __powerpc64__ 1972 case 457: /* divdu */ 1973 op->val = regs->gpr[ra] / regs->gpr[rb]; 1974 goto arith_done; 1975 #endif 1976 case 459: /* divwu */ 1977 op->val = (unsigned int) regs->gpr[ra] / 1978 (unsigned int) regs->gpr[rb]; 1979 goto arith_done; 1980 #ifdef __powerpc64__ 1981 case 489: /* divd */ 1982 op->val = (long int) regs->gpr[ra] / 1983 (long int) regs->gpr[rb]; 1984 goto arith_done; 1985 #endif 1986 case 491: /* divw */ 1987 op->val = (int) regs->gpr[ra] / 1988 (int) regs->gpr[rb]; 1989 goto arith_done; 1990 #ifdef __powerpc64__ 1991 case 425: /* divde[.] */ 1992 asm volatile(PPC_DIVDE(%0, %1, %2) : 1993 "=r" (op->val) : "r" (regs->gpr[ra]), 1994 "r" (regs->gpr[rb])); 1995 goto arith_done; 1996 case 393: /* divdeu[.] */ 1997 asm volatile(PPC_DIVDEU(%0, %1, %2) : 1998 "=r" (op->val) : "r" (regs->gpr[ra]), 1999 "r" (regs->gpr[rb])); 2000 goto arith_done; 2001 #endif 2002 case 755: /* darn */ 2003 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2004 goto unknown_opcode; 2005 switch (ra & 0x3) { 2006 case 0: 2007 /* 32-bit conditioned */ 2008 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); 2009 goto compute_done; 2010 2011 case 1: 2012 /* 64-bit conditioned */ 2013 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); 2014 goto compute_done; 2015 2016 case 2: 2017 /* 64-bit raw */ 2018 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); 2019 goto compute_done; 2020 } 2021 2022 goto unknown_opcode; 2023 #ifdef __powerpc64__ 2024 case 777: /* modsd */ 2025 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2026 goto unknown_opcode; 2027 op->val = (long int) regs->gpr[ra] % 2028 (long int) regs->gpr[rb]; 2029 goto compute_done; 2030 #endif 2031 case 779: /* modsw */ 2032 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2033 goto unknown_opcode; 2034 op->val = (int) regs->gpr[ra] % 2035 (int) regs->gpr[rb]; 2036 goto compute_done; 2037 2038 2039 /* 2040 * Logical instructions 2041 */ 2042 case 26: /* cntlzw */ 2043 val = (unsigned int) regs->gpr[rd]; 2044 op->val = ( val ? __builtin_clz(val) : 32 ); 2045 goto logical_done; 2046 #ifdef __powerpc64__ 2047 case 58: /* cntlzd */ 2048 val = regs->gpr[rd]; 2049 op->val = ( val ? __builtin_clzl(val) : 64 ); 2050 goto logical_done; 2051 #endif 2052 case 28: /* and */ 2053 op->val = regs->gpr[rd] & regs->gpr[rb]; 2054 goto logical_done; 2055 2056 case 60: /* andc */ 2057 op->val = regs->gpr[rd] & ~regs->gpr[rb]; 2058 goto logical_done; 2059 2060 case 122: /* popcntb */ 2061 do_popcnt(regs, op, regs->gpr[rd], 8); 2062 goto logical_done_nocc; 2063 2064 case 124: /* nor */ 2065 op->val = ~(regs->gpr[rd] | regs->gpr[rb]); 2066 goto logical_done; 2067 2068 case 154: /* prtyw */ 2069 do_prty(regs, op, regs->gpr[rd], 32); 2070 goto logical_done_nocc; 2071 2072 case 186: /* prtyd */ 2073 do_prty(regs, op, regs->gpr[rd], 64); 2074 goto logical_done_nocc; 2075 #ifdef CONFIG_PPC64 2076 case 252: /* bpermd */ 2077 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); 2078 goto logical_done_nocc; 2079 #endif 2080 case 284: /* xor */ 2081 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); 2082 goto logical_done; 2083 2084 case 316: /* xor */ 2085 op->val = regs->gpr[rd] ^ regs->gpr[rb]; 2086 goto logical_done; 2087 2088 case 378: /* popcntw */ 2089 do_popcnt(regs, op, regs->gpr[rd], 32); 2090 goto logical_done_nocc; 2091 2092 case 412: /* orc */ 2093 op->val = regs->gpr[rd] | ~regs->gpr[rb]; 2094 goto logical_done; 2095 2096 case 444: /* or */ 2097 op->val = regs->gpr[rd] | regs->gpr[rb]; 2098 goto logical_done; 2099 2100 case 476: /* nand */ 2101 op->val = ~(regs->gpr[rd] & regs->gpr[rb]); 2102 goto logical_done; 2103 #ifdef CONFIG_PPC64 2104 case 506: /* popcntd */ 2105 do_popcnt(regs, op, regs->gpr[rd], 64); 2106 goto logical_done_nocc; 2107 #endif 2108 case 538: /* cnttzw */ 2109 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2110 goto unknown_opcode; 2111 val = (unsigned int) regs->gpr[rd]; 2112 op->val = (val ? __builtin_ctz(val) : 32); 2113 goto logical_done; 2114 #ifdef __powerpc64__ 2115 case 570: /* cnttzd */ 2116 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2117 goto unknown_opcode; 2118 val = regs->gpr[rd]; 2119 op->val = (val ? __builtin_ctzl(val) : 64); 2120 goto logical_done; 2121 #endif 2122 case 922: /* extsh */ 2123 op->val = (signed short) regs->gpr[rd]; 2124 goto logical_done; 2125 2126 case 954: /* extsb */ 2127 op->val = (signed char) regs->gpr[rd]; 2128 goto logical_done; 2129 #ifdef __powerpc64__ 2130 case 986: /* extsw */ 2131 op->val = (signed int) regs->gpr[rd]; 2132 goto logical_done; 2133 #endif 2134 2135 /* 2136 * Shift instructions 2137 */ 2138 case 24: /* slw */ 2139 sh = regs->gpr[rb] & 0x3f; 2140 if (sh < 32) 2141 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; 2142 else 2143 op->val = 0; 2144 goto logical_done; 2145 2146 case 536: /* srw */ 2147 sh = regs->gpr[rb] & 0x3f; 2148 if (sh < 32) 2149 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; 2150 else 2151 op->val = 0; 2152 goto logical_done; 2153 2154 case 792: /* sraw */ 2155 op->type = COMPUTE + SETREG + SETXER; 2156 sh = regs->gpr[rb] & 0x3f; 2157 ival = (signed int) regs->gpr[rd]; 2158 op->val = ival >> (sh < 32 ? sh : 31); 2159 op->xerval = regs->xer; 2160 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 2161 op->xerval |= XER_CA; 2162 else 2163 op->xerval &= ~XER_CA; 2164 set_ca32(op, op->xerval & XER_CA); 2165 goto logical_done; 2166 2167 case 824: /* srawi */ 2168 op->type = COMPUTE + SETREG + SETXER; 2169 sh = rb; 2170 ival = (signed int) regs->gpr[rd]; 2171 op->val = ival >> sh; 2172 op->xerval = regs->xer; 2173 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2174 op->xerval |= XER_CA; 2175 else 2176 op->xerval &= ~XER_CA; 2177 set_ca32(op, op->xerval & XER_CA); 2178 goto logical_done; 2179 2180 #ifdef __powerpc64__ 2181 case 27: /* sld */ 2182 sh = regs->gpr[rb] & 0x7f; 2183 if (sh < 64) 2184 op->val = regs->gpr[rd] << sh; 2185 else 2186 op->val = 0; 2187 goto logical_done; 2188 2189 case 539: /* srd */ 2190 sh = regs->gpr[rb] & 0x7f; 2191 if (sh < 64) 2192 op->val = regs->gpr[rd] >> sh; 2193 else 2194 op->val = 0; 2195 goto logical_done; 2196 2197 case 794: /* srad */ 2198 op->type = COMPUTE + SETREG + SETXER; 2199 sh = regs->gpr[rb] & 0x7f; 2200 ival = (signed long int) regs->gpr[rd]; 2201 op->val = ival >> (sh < 64 ? sh : 63); 2202 op->xerval = regs->xer; 2203 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 2204 op->xerval |= XER_CA; 2205 else 2206 op->xerval &= ~XER_CA; 2207 set_ca32(op, op->xerval & XER_CA); 2208 goto logical_done; 2209 2210 case 826: /* sradi with sh_5 = 0 */ 2211 case 827: /* sradi with sh_5 = 1 */ 2212 op->type = COMPUTE + SETREG + SETXER; 2213 sh = rb | ((word & 2) << 4); 2214 ival = (signed long int) regs->gpr[rd]; 2215 op->val = ival >> sh; 2216 op->xerval = regs->xer; 2217 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2218 op->xerval |= XER_CA; 2219 else 2220 op->xerval &= ~XER_CA; 2221 set_ca32(op, op->xerval & XER_CA); 2222 goto logical_done; 2223 2224 case 890: /* extswsli with sh_5 = 0 */ 2225 case 891: /* extswsli with sh_5 = 1 */ 2226 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2227 goto unknown_opcode; 2228 op->type = COMPUTE + SETREG; 2229 sh = rb | ((word & 2) << 4); 2230 val = (signed int) regs->gpr[rd]; 2231 if (sh) 2232 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); 2233 else 2234 op->val = val; 2235 goto logical_done; 2236 2237 #endif /* __powerpc64__ */ 2238 2239 /* 2240 * Cache instructions 2241 */ 2242 case 54: /* dcbst */ 2243 op->type = MKOP(CACHEOP, DCBST, 0); 2244 op->ea = xform_ea(word, regs); 2245 return 0; 2246 2247 case 86: /* dcbf */ 2248 op->type = MKOP(CACHEOP, DCBF, 0); 2249 op->ea = xform_ea(word, regs); 2250 return 0; 2251 2252 case 246: /* dcbtst */ 2253 op->type = MKOP(CACHEOP, DCBTST, 0); 2254 op->ea = xform_ea(word, regs); 2255 op->reg = rd; 2256 return 0; 2257 2258 case 278: /* dcbt */ 2259 op->type = MKOP(CACHEOP, DCBTST, 0); 2260 op->ea = xform_ea(word, regs); 2261 op->reg = rd; 2262 return 0; 2263 2264 case 982: /* icbi */ 2265 op->type = MKOP(CACHEOP, ICBI, 0); 2266 op->ea = xform_ea(word, regs); 2267 return 0; 2268 2269 case 1014: /* dcbz */ 2270 op->type = MKOP(CACHEOP, DCBZ, 0); 2271 op->ea = xform_ea(word, regs); 2272 return 0; 2273 } 2274 break; 2275 } 2276 2277 /* 2278 * Loads and stores. 2279 */ 2280 op->type = UNKNOWN; 2281 op->update_reg = ra; 2282 op->reg = rd; 2283 op->val = regs->gpr[rd]; 2284 u = (word >> 20) & UPDATE; 2285 op->vsx_flags = 0; 2286 2287 switch (opcode) { 2288 case 31: 2289 u = word & UPDATE; 2290 op->ea = xform_ea(word, regs); 2291 switch ((word >> 1) & 0x3ff) { 2292 case 20: /* lwarx */ 2293 op->type = MKOP(LARX, 0, 4); 2294 break; 2295 2296 case 150: /* stwcx. */ 2297 op->type = MKOP(STCX, 0, 4); 2298 break; 2299 2300 #ifdef __powerpc64__ 2301 case 84: /* ldarx */ 2302 op->type = MKOP(LARX, 0, 8); 2303 break; 2304 2305 case 214: /* stdcx. */ 2306 op->type = MKOP(STCX, 0, 8); 2307 break; 2308 2309 case 52: /* lbarx */ 2310 op->type = MKOP(LARX, 0, 1); 2311 break; 2312 2313 case 694: /* stbcx. */ 2314 op->type = MKOP(STCX, 0, 1); 2315 break; 2316 2317 case 116: /* lharx */ 2318 op->type = MKOP(LARX, 0, 2); 2319 break; 2320 2321 case 726: /* sthcx. */ 2322 op->type = MKOP(STCX, 0, 2); 2323 break; 2324 2325 case 276: /* lqarx */ 2326 if (!((rd & 1) || rd == ra || rd == rb)) 2327 op->type = MKOP(LARX, 0, 16); 2328 break; 2329 2330 case 182: /* stqcx. */ 2331 if (!(rd & 1)) 2332 op->type = MKOP(STCX, 0, 16); 2333 break; 2334 #endif 2335 2336 case 23: /* lwzx */ 2337 case 55: /* lwzux */ 2338 op->type = MKOP(LOAD, u, 4); 2339 break; 2340 2341 case 87: /* lbzx */ 2342 case 119: /* lbzux */ 2343 op->type = MKOP(LOAD, u, 1); 2344 break; 2345 2346 #ifdef CONFIG_ALTIVEC 2347 /* 2348 * Note: for the load/store vector element instructions, 2349 * bits of the EA say which field of the VMX register to use. 2350 */ 2351 case 7: /* lvebx */ 2352 op->type = MKOP(LOAD_VMX, 0, 1); 2353 op->element_size = 1; 2354 break; 2355 2356 case 39: /* lvehx */ 2357 op->type = MKOP(LOAD_VMX, 0, 2); 2358 op->element_size = 2; 2359 break; 2360 2361 case 71: /* lvewx */ 2362 op->type = MKOP(LOAD_VMX, 0, 4); 2363 op->element_size = 4; 2364 break; 2365 2366 case 103: /* lvx */ 2367 case 359: /* lvxl */ 2368 op->type = MKOP(LOAD_VMX, 0, 16); 2369 op->element_size = 16; 2370 break; 2371 2372 case 135: /* stvebx */ 2373 op->type = MKOP(STORE_VMX, 0, 1); 2374 op->element_size = 1; 2375 break; 2376 2377 case 167: /* stvehx */ 2378 op->type = MKOP(STORE_VMX, 0, 2); 2379 op->element_size = 2; 2380 break; 2381 2382 case 199: /* stvewx */ 2383 op->type = MKOP(STORE_VMX, 0, 4); 2384 op->element_size = 4; 2385 break; 2386 2387 case 231: /* stvx */ 2388 case 487: /* stvxl */ 2389 op->type = MKOP(STORE_VMX, 0, 16); 2390 break; 2391 #endif /* CONFIG_ALTIVEC */ 2392 2393 #ifdef __powerpc64__ 2394 case 21: /* ldx */ 2395 case 53: /* ldux */ 2396 op->type = MKOP(LOAD, u, 8); 2397 break; 2398 2399 case 149: /* stdx */ 2400 case 181: /* stdux */ 2401 op->type = MKOP(STORE, u, 8); 2402 break; 2403 #endif 2404 2405 case 151: /* stwx */ 2406 case 183: /* stwux */ 2407 op->type = MKOP(STORE, u, 4); 2408 break; 2409 2410 case 215: /* stbx */ 2411 case 247: /* stbux */ 2412 op->type = MKOP(STORE, u, 1); 2413 break; 2414 2415 case 279: /* lhzx */ 2416 case 311: /* lhzux */ 2417 op->type = MKOP(LOAD, u, 2); 2418 break; 2419 2420 #ifdef __powerpc64__ 2421 case 341: /* lwax */ 2422 case 373: /* lwaux */ 2423 op->type = MKOP(LOAD, SIGNEXT | u, 4); 2424 break; 2425 #endif 2426 2427 case 343: /* lhax */ 2428 case 375: /* lhaux */ 2429 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2430 break; 2431 2432 case 407: /* sthx */ 2433 case 439: /* sthux */ 2434 op->type = MKOP(STORE, u, 2); 2435 break; 2436 2437 #ifdef __powerpc64__ 2438 case 532: /* ldbrx */ 2439 op->type = MKOP(LOAD, BYTEREV, 8); 2440 break; 2441 2442 #endif 2443 case 533: /* lswx */ 2444 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 2445 break; 2446 2447 case 534: /* lwbrx */ 2448 op->type = MKOP(LOAD, BYTEREV, 4); 2449 break; 2450 2451 case 597: /* lswi */ 2452 if (rb == 0) 2453 rb = 32; /* # bytes to load */ 2454 op->type = MKOP(LOAD_MULTI, 0, rb); 2455 op->ea = ra ? regs->gpr[ra] : 0; 2456 break; 2457 2458 #ifdef CONFIG_PPC_FPU 2459 case 535: /* lfsx */ 2460 case 567: /* lfsux */ 2461 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2462 break; 2463 2464 case 599: /* lfdx */ 2465 case 631: /* lfdux */ 2466 op->type = MKOP(LOAD_FP, u, 8); 2467 break; 2468 2469 case 663: /* stfsx */ 2470 case 695: /* stfsux */ 2471 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2472 break; 2473 2474 case 727: /* stfdx */ 2475 case 759: /* stfdux */ 2476 op->type = MKOP(STORE_FP, u, 8); 2477 break; 2478 2479 #ifdef __powerpc64__ 2480 case 791: /* lfdpx */ 2481 op->type = MKOP(LOAD_FP, 0, 16); 2482 break; 2483 2484 case 855: /* lfiwax */ 2485 op->type = MKOP(LOAD_FP, SIGNEXT, 4); 2486 break; 2487 2488 case 887: /* lfiwzx */ 2489 op->type = MKOP(LOAD_FP, 0, 4); 2490 break; 2491 2492 case 919: /* stfdpx */ 2493 op->type = MKOP(STORE_FP, 0, 16); 2494 break; 2495 2496 case 983: /* stfiwx */ 2497 op->type = MKOP(STORE_FP, 0, 4); 2498 break; 2499 #endif /* __powerpc64 */ 2500 #endif /* CONFIG_PPC_FPU */ 2501 2502 #ifdef __powerpc64__ 2503 case 660: /* stdbrx */ 2504 op->type = MKOP(STORE, BYTEREV, 8); 2505 op->val = byterev_8(regs->gpr[rd]); 2506 break; 2507 2508 #endif 2509 case 661: /* stswx */ 2510 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 2511 break; 2512 2513 case 662: /* stwbrx */ 2514 op->type = MKOP(STORE, BYTEREV, 4); 2515 op->val = byterev_4(regs->gpr[rd]); 2516 break; 2517 2518 case 725: /* stswi */ 2519 if (rb == 0) 2520 rb = 32; /* # bytes to store */ 2521 op->type = MKOP(STORE_MULTI, 0, rb); 2522 op->ea = ra ? regs->gpr[ra] : 0; 2523 break; 2524 2525 case 790: /* lhbrx */ 2526 op->type = MKOP(LOAD, BYTEREV, 2); 2527 break; 2528 2529 case 918: /* sthbrx */ 2530 op->type = MKOP(STORE, BYTEREV, 2); 2531 op->val = byterev_2(regs->gpr[rd]); 2532 break; 2533 2534 #ifdef CONFIG_VSX 2535 case 12: /* lxsiwzx */ 2536 op->reg = rd | ((word & 1) << 5); 2537 op->type = MKOP(LOAD_VSX, 0, 4); 2538 op->element_size = 8; 2539 break; 2540 2541 case 76: /* lxsiwax */ 2542 op->reg = rd | ((word & 1) << 5); 2543 op->type = MKOP(LOAD_VSX, SIGNEXT, 4); 2544 op->element_size = 8; 2545 break; 2546 2547 case 140: /* stxsiwx */ 2548 op->reg = rd | ((word & 1) << 5); 2549 op->type = MKOP(STORE_VSX, 0, 4); 2550 op->element_size = 8; 2551 break; 2552 2553 case 268: /* lxvx */ 2554 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2555 goto unknown_opcode; 2556 op->reg = rd | ((word & 1) << 5); 2557 op->type = MKOP(LOAD_VSX, 0, 16); 2558 op->element_size = 16; 2559 op->vsx_flags = VSX_CHECK_VEC; 2560 break; 2561 2562 case 269: /* lxvl */ 2563 case 301: { /* lxvll */ 2564 int nb; 2565 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2566 goto unknown_opcode; 2567 op->reg = rd | ((word & 1) << 5); 2568 op->ea = ra ? regs->gpr[ra] : 0; 2569 nb = regs->gpr[rb] & 0xff; 2570 if (nb > 16) 2571 nb = 16; 2572 op->type = MKOP(LOAD_VSX, 0, nb); 2573 op->element_size = 16; 2574 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2575 VSX_CHECK_VEC; 2576 break; 2577 } 2578 case 332: /* lxvdsx */ 2579 op->reg = rd | ((word & 1) << 5); 2580 op->type = MKOP(LOAD_VSX, 0, 8); 2581 op->element_size = 8; 2582 op->vsx_flags = VSX_SPLAT; 2583 break; 2584 2585 case 333: /* lxvpx */ 2586 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2587 goto unknown_opcode; 2588 op->reg = VSX_REGISTER_XTP(rd); 2589 op->type = MKOP(LOAD_VSX, 0, 32); 2590 op->element_size = 32; 2591 break; 2592 2593 case 364: /* lxvwsx */ 2594 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2595 goto unknown_opcode; 2596 op->reg = rd | ((word & 1) << 5); 2597 op->type = MKOP(LOAD_VSX, 0, 4); 2598 op->element_size = 4; 2599 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; 2600 break; 2601 2602 case 396: /* stxvx */ 2603 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2604 goto unknown_opcode; 2605 op->reg = rd | ((word & 1) << 5); 2606 op->type = MKOP(STORE_VSX, 0, 16); 2607 op->element_size = 16; 2608 op->vsx_flags = VSX_CHECK_VEC; 2609 break; 2610 2611 case 397: /* stxvl */ 2612 case 429: { /* stxvll */ 2613 int nb; 2614 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2615 goto unknown_opcode; 2616 op->reg = rd | ((word & 1) << 5); 2617 op->ea = ra ? regs->gpr[ra] : 0; 2618 nb = regs->gpr[rb] & 0xff; 2619 if (nb > 16) 2620 nb = 16; 2621 op->type = MKOP(STORE_VSX, 0, nb); 2622 op->element_size = 16; 2623 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2624 VSX_CHECK_VEC; 2625 break; 2626 } 2627 case 461: /* stxvpx */ 2628 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2629 goto unknown_opcode; 2630 op->reg = VSX_REGISTER_XTP(rd); 2631 op->type = MKOP(STORE_VSX, 0, 32); 2632 op->element_size = 32; 2633 break; 2634 case 524: /* lxsspx */ 2635 op->reg = rd | ((word & 1) << 5); 2636 op->type = MKOP(LOAD_VSX, 0, 4); 2637 op->element_size = 8; 2638 op->vsx_flags = VSX_FPCONV; 2639 break; 2640 2641 case 588: /* lxsdx */ 2642 op->reg = rd | ((word & 1) << 5); 2643 op->type = MKOP(LOAD_VSX, 0, 8); 2644 op->element_size = 8; 2645 break; 2646 2647 case 652: /* stxsspx */ 2648 op->reg = rd | ((word & 1) << 5); 2649 op->type = MKOP(STORE_VSX, 0, 4); 2650 op->element_size = 8; 2651 op->vsx_flags = VSX_FPCONV; 2652 break; 2653 2654 case 716: /* stxsdx */ 2655 op->reg = rd | ((word & 1) << 5); 2656 op->type = MKOP(STORE_VSX, 0, 8); 2657 op->element_size = 8; 2658 break; 2659 2660 case 780: /* lxvw4x */ 2661 op->reg = rd | ((word & 1) << 5); 2662 op->type = MKOP(LOAD_VSX, 0, 16); 2663 op->element_size = 4; 2664 break; 2665 2666 case 781: /* lxsibzx */ 2667 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2668 goto unknown_opcode; 2669 op->reg = rd | ((word & 1) << 5); 2670 op->type = MKOP(LOAD_VSX, 0, 1); 2671 op->element_size = 8; 2672 op->vsx_flags = VSX_CHECK_VEC; 2673 break; 2674 2675 case 812: /* lxvh8x */ 2676 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2677 goto unknown_opcode; 2678 op->reg = rd | ((word & 1) << 5); 2679 op->type = MKOP(LOAD_VSX, 0, 16); 2680 op->element_size = 2; 2681 op->vsx_flags = VSX_CHECK_VEC; 2682 break; 2683 2684 case 813: /* lxsihzx */ 2685 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2686 goto unknown_opcode; 2687 op->reg = rd | ((word & 1) << 5); 2688 op->type = MKOP(LOAD_VSX, 0, 2); 2689 op->element_size = 8; 2690 op->vsx_flags = VSX_CHECK_VEC; 2691 break; 2692 2693 case 844: /* lxvd2x */ 2694 op->reg = rd | ((word & 1) << 5); 2695 op->type = MKOP(LOAD_VSX, 0, 16); 2696 op->element_size = 8; 2697 break; 2698 2699 case 876: /* lxvb16x */ 2700 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2701 goto unknown_opcode; 2702 op->reg = rd | ((word & 1) << 5); 2703 op->type = MKOP(LOAD_VSX, 0, 16); 2704 op->element_size = 1; 2705 op->vsx_flags = VSX_CHECK_VEC; 2706 break; 2707 2708 case 908: /* stxvw4x */ 2709 op->reg = rd | ((word & 1) << 5); 2710 op->type = MKOP(STORE_VSX, 0, 16); 2711 op->element_size = 4; 2712 break; 2713 2714 case 909: /* stxsibx */ 2715 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2716 goto unknown_opcode; 2717 op->reg = rd | ((word & 1) << 5); 2718 op->type = MKOP(STORE_VSX, 0, 1); 2719 op->element_size = 8; 2720 op->vsx_flags = VSX_CHECK_VEC; 2721 break; 2722 2723 case 940: /* stxvh8x */ 2724 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2725 goto unknown_opcode; 2726 op->reg = rd | ((word & 1) << 5); 2727 op->type = MKOP(STORE_VSX, 0, 16); 2728 op->element_size = 2; 2729 op->vsx_flags = VSX_CHECK_VEC; 2730 break; 2731 2732 case 941: /* stxsihx */ 2733 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2734 goto unknown_opcode; 2735 op->reg = rd | ((word & 1) << 5); 2736 op->type = MKOP(STORE_VSX, 0, 2); 2737 op->element_size = 8; 2738 op->vsx_flags = VSX_CHECK_VEC; 2739 break; 2740 2741 case 972: /* stxvd2x */ 2742 op->reg = rd | ((word & 1) << 5); 2743 op->type = MKOP(STORE_VSX, 0, 16); 2744 op->element_size = 8; 2745 break; 2746 2747 case 1004: /* stxvb16x */ 2748 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2749 goto unknown_opcode; 2750 op->reg = rd | ((word & 1) << 5); 2751 op->type = MKOP(STORE_VSX, 0, 16); 2752 op->element_size = 1; 2753 op->vsx_flags = VSX_CHECK_VEC; 2754 break; 2755 2756 #endif /* CONFIG_VSX */ 2757 } 2758 break; 2759 2760 case 32: /* lwz */ 2761 case 33: /* lwzu */ 2762 op->type = MKOP(LOAD, u, 4); 2763 op->ea = dform_ea(word, regs); 2764 break; 2765 2766 case 34: /* lbz */ 2767 case 35: /* lbzu */ 2768 op->type = MKOP(LOAD, u, 1); 2769 op->ea = dform_ea(word, regs); 2770 break; 2771 2772 case 36: /* stw */ 2773 case 37: /* stwu */ 2774 op->type = MKOP(STORE, u, 4); 2775 op->ea = dform_ea(word, regs); 2776 break; 2777 2778 case 38: /* stb */ 2779 case 39: /* stbu */ 2780 op->type = MKOP(STORE, u, 1); 2781 op->ea = dform_ea(word, regs); 2782 break; 2783 2784 case 40: /* lhz */ 2785 case 41: /* lhzu */ 2786 op->type = MKOP(LOAD, u, 2); 2787 op->ea = dform_ea(word, regs); 2788 break; 2789 2790 case 42: /* lha */ 2791 case 43: /* lhau */ 2792 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2793 op->ea = dform_ea(word, regs); 2794 break; 2795 2796 case 44: /* sth */ 2797 case 45: /* sthu */ 2798 op->type = MKOP(STORE, u, 2); 2799 op->ea = dform_ea(word, regs); 2800 break; 2801 2802 case 46: /* lmw */ 2803 if (ra >= rd) 2804 break; /* invalid form, ra in range to load */ 2805 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 2806 op->ea = dform_ea(word, regs); 2807 break; 2808 2809 case 47: /* stmw */ 2810 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 2811 op->ea = dform_ea(word, regs); 2812 break; 2813 2814 #ifdef CONFIG_PPC_FPU 2815 case 48: /* lfs */ 2816 case 49: /* lfsu */ 2817 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2818 op->ea = dform_ea(word, regs); 2819 break; 2820 2821 case 50: /* lfd */ 2822 case 51: /* lfdu */ 2823 op->type = MKOP(LOAD_FP, u, 8); 2824 op->ea = dform_ea(word, regs); 2825 break; 2826 2827 case 52: /* stfs */ 2828 case 53: /* stfsu */ 2829 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2830 op->ea = dform_ea(word, regs); 2831 break; 2832 2833 case 54: /* stfd */ 2834 case 55: /* stfdu */ 2835 op->type = MKOP(STORE_FP, u, 8); 2836 op->ea = dform_ea(word, regs); 2837 break; 2838 #endif 2839 2840 #ifdef __powerpc64__ 2841 case 56: /* lq */ 2842 if (!((rd & 1) || (rd == ra))) 2843 op->type = MKOP(LOAD, 0, 16); 2844 op->ea = dqform_ea(word, regs); 2845 break; 2846 #endif 2847 2848 #ifdef CONFIG_VSX 2849 case 57: /* lfdp, lxsd, lxssp */ 2850 op->ea = dsform_ea(word, regs); 2851 switch (word & 3) { 2852 case 0: /* lfdp */ 2853 if (rd & 1) 2854 break; /* reg must be even */ 2855 op->type = MKOP(LOAD_FP, 0, 16); 2856 break; 2857 case 2: /* lxsd */ 2858 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2859 goto unknown_opcode; 2860 op->reg = rd + 32; 2861 op->type = MKOP(LOAD_VSX, 0, 8); 2862 op->element_size = 8; 2863 op->vsx_flags = VSX_CHECK_VEC; 2864 break; 2865 case 3: /* lxssp */ 2866 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2867 goto unknown_opcode; 2868 op->reg = rd + 32; 2869 op->type = MKOP(LOAD_VSX, 0, 4); 2870 op->element_size = 8; 2871 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2872 break; 2873 } 2874 break; 2875 #endif /* CONFIG_VSX */ 2876 2877 #ifdef __powerpc64__ 2878 case 58: /* ld[u], lwa */ 2879 op->ea = dsform_ea(word, regs); 2880 switch (word & 3) { 2881 case 0: /* ld */ 2882 op->type = MKOP(LOAD, 0, 8); 2883 break; 2884 case 1: /* ldu */ 2885 op->type = MKOP(LOAD, UPDATE, 8); 2886 break; 2887 case 2: /* lwa */ 2888 op->type = MKOP(LOAD, SIGNEXT, 4); 2889 break; 2890 } 2891 break; 2892 #endif 2893 2894 #ifdef CONFIG_VSX 2895 case 6: 2896 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2897 goto unknown_opcode; 2898 op->ea = dqform_ea(word, regs); 2899 op->reg = VSX_REGISTER_XTP(rd); 2900 op->element_size = 32; 2901 switch (word & 0xf) { 2902 case 0: /* lxvp */ 2903 op->type = MKOP(LOAD_VSX, 0, 32); 2904 break; 2905 case 1: /* stxvp */ 2906 op->type = MKOP(STORE_VSX, 0, 32); 2907 break; 2908 } 2909 break; 2910 2911 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ 2912 switch (word & 7) { 2913 case 0: /* stfdp with LSB of DS field = 0 */ 2914 case 4: /* stfdp with LSB of DS field = 1 */ 2915 op->ea = dsform_ea(word, regs); 2916 op->type = MKOP(STORE_FP, 0, 16); 2917 break; 2918 2919 case 1: /* lxv */ 2920 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2921 goto unknown_opcode; 2922 op->ea = dqform_ea(word, regs); 2923 if (word & 8) 2924 op->reg = rd + 32; 2925 op->type = MKOP(LOAD_VSX, 0, 16); 2926 op->element_size = 16; 2927 op->vsx_flags = VSX_CHECK_VEC; 2928 break; 2929 2930 case 2: /* stxsd with LSB of DS field = 0 */ 2931 case 6: /* stxsd with LSB of DS field = 1 */ 2932 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2933 goto unknown_opcode; 2934 op->ea = dsform_ea(word, regs); 2935 op->reg = rd + 32; 2936 op->type = MKOP(STORE_VSX, 0, 8); 2937 op->element_size = 8; 2938 op->vsx_flags = VSX_CHECK_VEC; 2939 break; 2940 2941 case 3: /* stxssp with LSB of DS field = 0 */ 2942 case 7: /* stxssp with LSB of DS field = 1 */ 2943 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2944 goto unknown_opcode; 2945 op->ea = dsform_ea(word, regs); 2946 op->reg = rd + 32; 2947 op->type = MKOP(STORE_VSX, 0, 4); 2948 op->element_size = 8; 2949 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2950 break; 2951 2952 case 5: /* stxv */ 2953 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2954 goto unknown_opcode; 2955 op->ea = dqform_ea(word, regs); 2956 if (word & 8) 2957 op->reg = rd + 32; 2958 op->type = MKOP(STORE_VSX, 0, 16); 2959 op->element_size = 16; 2960 op->vsx_flags = VSX_CHECK_VEC; 2961 break; 2962 } 2963 break; 2964 #endif /* CONFIG_VSX */ 2965 2966 #ifdef __powerpc64__ 2967 case 62: /* std[u] */ 2968 op->ea = dsform_ea(word, regs); 2969 switch (word & 3) { 2970 case 0: /* std */ 2971 op->type = MKOP(STORE, 0, 8); 2972 break; 2973 case 1: /* stdu */ 2974 op->type = MKOP(STORE, UPDATE, 8); 2975 break; 2976 case 2: /* stq */ 2977 if (!(rd & 1)) 2978 op->type = MKOP(STORE, 0, 16); 2979 break; 2980 } 2981 break; 2982 case 1: /* Prefixed instructions */ 2983 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2984 goto unknown_opcode; 2985 2986 prefix_r = GET_PREFIX_R(word); 2987 ra = GET_PREFIX_RA(suffix); 2988 op->update_reg = ra; 2989 rd = (suffix >> 21) & 0x1f; 2990 op->reg = rd; 2991 op->val = regs->gpr[rd]; 2992 2993 suffixopcode = get_op(suffix); 2994 prefixtype = (word >> 24) & 0x3; 2995 switch (prefixtype) { 2996 case 0: /* Type 00 Eight-Byte Load/Store */ 2997 if (prefix_r && ra) 2998 break; 2999 op->ea = mlsd_8lsd_ea(word, suffix, regs); 3000 switch (suffixopcode) { 3001 case 41: /* plwa */ 3002 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); 3003 break; 3004 #ifdef CONFIG_VSX 3005 case 42: /* plxsd */ 3006 op->reg = rd + 32; 3007 op->type = MKOP(LOAD_VSX, PREFIXED, 8); 3008 op->element_size = 8; 3009 op->vsx_flags = VSX_CHECK_VEC; 3010 break; 3011 case 43: /* plxssp */ 3012 op->reg = rd + 32; 3013 op->type = MKOP(LOAD_VSX, PREFIXED, 4); 3014 op->element_size = 8; 3015 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3016 break; 3017 case 46: /* pstxsd */ 3018 op->reg = rd + 32; 3019 op->type = MKOP(STORE_VSX, PREFIXED, 8); 3020 op->element_size = 8; 3021 op->vsx_flags = VSX_CHECK_VEC; 3022 break; 3023 case 47: /* pstxssp */ 3024 op->reg = rd + 32; 3025 op->type = MKOP(STORE_VSX, PREFIXED, 4); 3026 op->element_size = 8; 3027 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3028 break; 3029 case 51: /* plxv1 */ 3030 op->reg += 32; 3031 fallthrough; 3032 case 50: /* plxv0 */ 3033 op->type = MKOP(LOAD_VSX, PREFIXED, 16); 3034 op->element_size = 16; 3035 op->vsx_flags = VSX_CHECK_VEC; 3036 break; 3037 case 55: /* pstxv1 */ 3038 op->reg = rd + 32; 3039 fallthrough; 3040 case 54: /* pstxv0 */ 3041 op->type = MKOP(STORE_VSX, PREFIXED, 16); 3042 op->element_size = 16; 3043 op->vsx_flags = VSX_CHECK_VEC; 3044 break; 3045 #endif /* CONFIG_VSX */ 3046 case 56: /* plq */ 3047 op->type = MKOP(LOAD, PREFIXED, 16); 3048 break; 3049 case 57: /* pld */ 3050 op->type = MKOP(LOAD, PREFIXED, 8); 3051 break; 3052 #ifdef CONFIG_VSX 3053 case 58: /* plxvp */ 3054 op->reg = VSX_REGISTER_XTP(rd); 3055 op->type = MKOP(LOAD_VSX, PREFIXED, 32); 3056 op->element_size = 32; 3057 break; 3058 #endif /* CONFIG_VSX */ 3059 case 60: /* pstq */ 3060 op->type = MKOP(STORE, PREFIXED, 16); 3061 break; 3062 case 61: /* pstd */ 3063 op->type = MKOP(STORE, PREFIXED, 8); 3064 break; 3065 #ifdef CONFIG_VSX 3066 case 62: /* pstxvp */ 3067 op->reg = VSX_REGISTER_XTP(rd); 3068 op->type = MKOP(STORE_VSX, PREFIXED, 32); 3069 op->element_size = 32; 3070 break; 3071 #endif /* CONFIG_VSX */ 3072 } 3073 break; 3074 case 1: /* Type 01 Eight-Byte Register-to-Register */ 3075 break; 3076 case 2: /* Type 10 Modified Load/Store */ 3077 if (prefix_r && ra) 3078 break; 3079 op->ea = mlsd_8lsd_ea(word, suffix, regs); 3080 switch (suffixopcode) { 3081 case 32: /* plwz */ 3082 op->type = MKOP(LOAD, PREFIXED, 4); 3083 break; 3084 case 34: /* plbz */ 3085 op->type = MKOP(LOAD, PREFIXED, 1); 3086 break; 3087 case 36: /* pstw */ 3088 op->type = MKOP(STORE, PREFIXED, 4); 3089 break; 3090 case 38: /* pstb */ 3091 op->type = MKOP(STORE, PREFIXED, 1); 3092 break; 3093 case 40: /* plhz */ 3094 op->type = MKOP(LOAD, PREFIXED, 2); 3095 break; 3096 case 42: /* plha */ 3097 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); 3098 break; 3099 case 44: /* psth */ 3100 op->type = MKOP(STORE, PREFIXED, 2); 3101 break; 3102 case 48: /* plfs */ 3103 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); 3104 break; 3105 case 50: /* plfd */ 3106 op->type = MKOP(LOAD_FP, PREFIXED, 8); 3107 break; 3108 case 52: /* pstfs */ 3109 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); 3110 break; 3111 case 54: /* pstfd */ 3112 op->type = MKOP(STORE_FP, PREFIXED, 8); 3113 break; 3114 } 3115 break; 3116 case 3: /* Type 11 Modified Register-to-Register */ 3117 break; 3118 } 3119 #endif /* __powerpc64__ */ 3120 3121 } 3122 3123 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { 3124 switch (GETTYPE(op->type)) { 3125 case LOAD: 3126 if (ra == rd) 3127 goto unknown_opcode; 3128 fallthrough; 3129 case STORE: 3130 case LOAD_FP: 3131 case STORE_FP: 3132 if (ra == 0) 3133 goto unknown_opcode; 3134 } 3135 } 3136 3137 #ifdef CONFIG_VSX 3138 if ((GETTYPE(op->type) == LOAD_VSX || 3139 GETTYPE(op->type) == STORE_VSX) && 3140 !cpu_has_feature(CPU_FTR_VSX)) { 3141 return -1; 3142 } 3143 #endif /* CONFIG_VSX */ 3144 3145 return 0; 3146 3147 unknown_opcode: 3148 op->type = UNKNOWN; 3149 return 0; 3150 3151 logical_done: 3152 if (word & 1) 3153 set_cr0(regs, op); 3154 logical_done_nocc: 3155 op->reg = ra; 3156 op->type |= SETREG; 3157 return 1; 3158 3159 arith_done: 3160 if (word & 1) 3161 set_cr0(regs, op); 3162 compute_done: 3163 op->reg = rd; 3164 op->type |= SETREG; 3165 return 1; 3166 3167 priv: 3168 op->type = INTERRUPT | 0x700; 3169 op->val = SRR1_PROGPRIV; 3170 return 0; 3171 3172 trap: 3173 op->type = INTERRUPT | 0x700; 3174 op->val = SRR1_PROGTRAP; 3175 return 0; 3176 } 3177 EXPORT_SYMBOL_GPL(analyse_instr); 3178 NOKPROBE_SYMBOL(analyse_instr); 3179 3180 /* 3181 * For PPC32 we always use stwu with r1 to change the stack pointer. 3182 * So this emulated store may corrupt the exception frame, now we 3183 * have to provide the exception frame trampoline, which is pushed 3184 * below the kprobed function stack. So we only update gpr[1] but 3185 * don't emulate the real store operation. We will do real store 3186 * operation safely in exception return code by checking this flag. 3187 */ 3188 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) 3189 { 3190 /* 3191 * Check if we already set since that means we'll 3192 * lose the previous value. 3193 */ 3194 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 3195 set_thread_flag(TIF_EMULATE_STACK_STORE); 3196 return 0; 3197 } 3198 3199 static nokprobe_inline void do_signext(unsigned long *valp, int size) 3200 { 3201 switch (size) { 3202 case 2: 3203 *valp = (signed short) *valp; 3204 break; 3205 case 4: 3206 *valp = (signed int) *valp; 3207 break; 3208 } 3209 } 3210 3211 static nokprobe_inline void do_byterev(unsigned long *valp, int size) 3212 { 3213 switch (size) { 3214 case 2: 3215 *valp = byterev_2(*valp); 3216 break; 3217 case 4: 3218 *valp = byterev_4(*valp); 3219 break; 3220 #ifdef __powerpc64__ 3221 case 8: 3222 *valp = byterev_8(*valp); 3223 break; 3224 #endif 3225 } 3226 } 3227 3228 /* 3229 * Emulate an instruction that can be executed just by updating 3230 * fields in *regs. 3231 */ 3232 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) 3233 { 3234 unsigned long next_pc; 3235 3236 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); 3237 switch (GETTYPE(op->type)) { 3238 case COMPUTE: 3239 if (op->type & SETREG) 3240 regs->gpr[op->reg] = op->val; 3241 if (op->type & SETCC) 3242 regs->ccr = op->ccval; 3243 if (op->type & SETXER) 3244 regs->xer = op->xerval; 3245 break; 3246 3247 case BRANCH: 3248 if (op->type & SETLK) 3249 regs->link = next_pc; 3250 if (op->type & BRTAKEN) 3251 next_pc = op->val; 3252 if (op->type & DECCTR) 3253 --regs->ctr; 3254 break; 3255 3256 case BARRIER: 3257 switch (op->type & BARRIER_MASK) { 3258 case BARRIER_SYNC: 3259 mb(); 3260 break; 3261 case BARRIER_ISYNC: 3262 isync(); 3263 break; 3264 case BARRIER_EIEIO: 3265 eieio(); 3266 break; 3267 case BARRIER_LWSYNC: 3268 asm volatile("lwsync" : : : "memory"); 3269 break; 3270 case BARRIER_PTESYNC: 3271 asm volatile("ptesync" : : : "memory"); 3272 break; 3273 } 3274 break; 3275 3276 case MFSPR: 3277 switch (op->spr) { 3278 case SPRN_XER: 3279 regs->gpr[op->reg] = regs->xer & 0xffffffffUL; 3280 break; 3281 case SPRN_LR: 3282 regs->gpr[op->reg] = regs->link; 3283 break; 3284 case SPRN_CTR: 3285 regs->gpr[op->reg] = regs->ctr; 3286 break; 3287 default: 3288 WARN_ON_ONCE(1); 3289 } 3290 break; 3291 3292 case MTSPR: 3293 switch (op->spr) { 3294 case SPRN_XER: 3295 regs->xer = op->val & 0xffffffffUL; 3296 break; 3297 case SPRN_LR: 3298 regs->link = op->val; 3299 break; 3300 case SPRN_CTR: 3301 regs->ctr = op->val; 3302 break; 3303 default: 3304 WARN_ON_ONCE(1); 3305 } 3306 break; 3307 3308 default: 3309 WARN_ON_ONCE(1); 3310 } 3311 regs_set_return_ip(regs, next_pc); 3312 } 3313 NOKPROBE_SYMBOL(emulate_update_regs); 3314 3315 /* 3316 * Emulate a previously-analysed load or store instruction. 3317 * Return values are: 3318 * 0 = instruction emulated successfully 3319 * -EFAULT = address out of range or access faulted (regs->dar 3320 * contains the faulting address) 3321 * -EACCES = misaligned access, instruction requires alignment 3322 * -EINVAL = unknown operation in *op 3323 */ 3324 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) 3325 { 3326 int err, size, type; 3327 int i, rd, nb; 3328 unsigned int cr; 3329 unsigned long val; 3330 unsigned long ea; 3331 bool cross_endian; 3332 3333 err = 0; 3334 size = GETSIZE(op->type); 3335 type = GETTYPE(op->type); 3336 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 3337 ea = truncate_if_32bit(regs->msr, op->ea); 3338 3339 switch (type) { 3340 case LARX: 3341 if (ea & (size - 1)) 3342 return -EACCES; /* can't handle misaligned */ 3343 if (!address_ok(regs, ea, size)) 3344 return -EFAULT; 3345 err = 0; 3346 val = 0; 3347 switch (size) { 3348 #ifdef __powerpc64__ 3349 case 1: 3350 __get_user_asmx(val, ea, err, "lbarx"); 3351 break; 3352 case 2: 3353 __get_user_asmx(val, ea, err, "lharx"); 3354 break; 3355 #endif 3356 case 4: 3357 __get_user_asmx(val, ea, err, "lwarx"); 3358 break; 3359 #ifdef __powerpc64__ 3360 case 8: 3361 __get_user_asmx(val, ea, err, "ldarx"); 3362 break; 3363 case 16: 3364 err = do_lqarx(ea, ®s->gpr[op->reg]); 3365 break; 3366 #endif 3367 default: 3368 return -EINVAL; 3369 } 3370 if (err) { 3371 regs->dar = ea; 3372 break; 3373 } 3374 if (size < 16) 3375 regs->gpr[op->reg] = val; 3376 break; 3377 3378 case STCX: 3379 if (ea & (size - 1)) 3380 return -EACCES; /* can't handle misaligned */ 3381 if (!address_ok(regs, ea, size)) 3382 return -EFAULT; 3383 err = 0; 3384 switch (size) { 3385 #ifdef __powerpc64__ 3386 case 1: 3387 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3388 break; 3389 case 2: 3390 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3391 break; 3392 #endif 3393 case 4: 3394 __put_user_asmx(op->val, ea, err, "stwcx.", cr); 3395 break; 3396 #ifdef __powerpc64__ 3397 case 8: 3398 __put_user_asmx(op->val, ea, err, "stdcx.", cr); 3399 break; 3400 case 16: 3401 err = do_stqcx(ea, regs->gpr[op->reg], 3402 regs->gpr[op->reg + 1], &cr); 3403 break; 3404 #endif 3405 default: 3406 return -EINVAL; 3407 } 3408 if (!err) 3409 regs->ccr = (regs->ccr & 0x0fffffff) | 3410 (cr & 0xe0000000) | 3411 ((regs->xer >> 3) & 0x10000000); 3412 else 3413 regs->dar = ea; 3414 break; 3415 3416 case LOAD: 3417 #ifdef __powerpc64__ 3418 if (size == 16) { 3419 err = emulate_lq(regs, ea, op->reg, cross_endian); 3420 break; 3421 } 3422 #endif 3423 err = read_mem(®s->gpr[op->reg], ea, size, regs); 3424 if (!err) { 3425 if (op->type & SIGNEXT) 3426 do_signext(®s->gpr[op->reg], size); 3427 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) 3428 do_byterev(®s->gpr[op->reg], size); 3429 } 3430 break; 3431 3432 #ifdef CONFIG_PPC_FPU 3433 case LOAD_FP: 3434 /* 3435 * If the instruction is in userspace, we can emulate it even 3436 * if the VMX state is not live, because we have the state 3437 * stored in the thread_struct. If the instruction is in 3438 * the kernel, we must not touch the state in the thread_struct. 3439 */ 3440 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3441 return 0; 3442 err = do_fp_load(op, ea, regs, cross_endian); 3443 break; 3444 #endif 3445 #ifdef CONFIG_ALTIVEC 3446 case LOAD_VMX: 3447 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3448 return 0; 3449 err = do_vec_load(op->reg, ea, size, regs, cross_endian); 3450 break; 3451 #endif 3452 #ifdef CONFIG_VSX 3453 case LOAD_VSX: { 3454 unsigned long msrbit = MSR_VSX; 3455 3456 /* 3457 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3458 * when the target of the instruction is a vector register. 3459 */ 3460 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3461 msrbit = MSR_VEC; 3462 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3463 return 0; 3464 err = do_vsx_load(op, ea, regs, cross_endian); 3465 break; 3466 } 3467 #endif 3468 case LOAD_MULTI: 3469 if (!address_ok(regs, ea, size)) 3470 return -EFAULT; 3471 rd = op->reg; 3472 for (i = 0; i < size; i += 4) { 3473 unsigned int v32 = 0; 3474 3475 nb = size - i; 3476 if (nb > 4) 3477 nb = 4; 3478 err = copy_mem_in((u8 *) &v32, ea, nb, regs); 3479 if (err) 3480 break; 3481 if (unlikely(cross_endian)) 3482 v32 = byterev_4(v32); 3483 regs->gpr[rd] = v32; 3484 ea += 4; 3485 /* reg number wraps from 31 to 0 for lsw[ix] */ 3486 rd = (rd + 1) & 0x1f; 3487 } 3488 break; 3489 3490 case STORE: 3491 #ifdef __powerpc64__ 3492 if (size == 16) { 3493 err = emulate_stq(regs, ea, op->reg, cross_endian); 3494 break; 3495 } 3496 #endif 3497 if ((op->type & UPDATE) && size == sizeof(long) && 3498 op->reg == 1 && op->update_reg == 1 && 3499 !(regs->msr & MSR_PR) && 3500 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 3501 err = handle_stack_update(ea, regs); 3502 break; 3503 } 3504 if (unlikely(cross_endian)) 3505 do_byterev(&op->val, size); 3506 err = write_mem(op->val, ea, size, regs); 3507 break; 3508 3509 #ifdef CONFIG_PPC_FPU 3510 case STORE_FP: 3511 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3512 return 0; 3513 err = do_fp_store(op, ea, regs, cross_endian); 3514 break; 3515 #endif 3516 #ifdef CONFIG_ALTIVEC 3517 case STORE_VMX: 3518 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3519 return 0; 3520 err = do_vec_store(op->reg, ea, size, regs, cross_endian); 3521 break; 3522 #endif 3523 #ifdef CONFIG_VSX 3524 case STORE_VSX: { 3525 unsigned long msrbit = MSR_VSX; 3526 3527 /* 3528 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3529 * when the target of the instruction is a vector register. 3530 */ 3531 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3532 msrbit = MSR_VEC; 3533 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3534 return 0; 3535 err = do_vsx_store(op, ea, regs, cross_endian); 3536 break; 3537 } 3538 #endif 3539 case STORE_MULTI: 3540 if (!address_ok(regs, ea, size)) 3541 return -EFAULT; 3542 rd = op->reg; 3543 for (i = 0; i < size; i += 4) { 3544 unsigned int v32 = regs->gpr[rd]; 3545 3546 nb = size - i; 3547 if (nb > 4) 3548 nb = 4; 3549 if (unlikely(cross_endian)) 3550 v32 = byterev_4(v32); 3551 err = copy_mem_out((u8 *) &v32, ea, nb, regs); 3552 if (err) 3553 break; 3554 ea += 4; 3555 /* reg number wraps from 31 to 0 for stsw[ix] */ 3556 rd = (rd + 1) & 0x1f; 3557 } 3558 break; 3559 3560 default: 3561 return -EINVAL; 3562 } 3563 3564 if (err) 3565 return err; 3566 3567 if (op->type & UPDATE) 3568 regs->gpr[op->update_reg] = op->ea; 3569 3570 return 0; 3571 } 3572 NOKPROBE_SYMBOL(emulate_loadstore); 3573 3574 /* 3575 * Emulate instructions that cause a transfer of control, 3576 * loads and stores, and a few other instructions. 3577 * Returns 1 if the step was emulated, 0 if not, 3578 * or -1 if the instruction is one that should not be stepped, 3579 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3580 */ 3581 int emulate_step(struct pt_regs *regs, ppc_inst_t instr) 3582 { 3583 struct instruction_op op; 3584 int r, err, type; 3585 unsigned long val; 3586 unsigned long ea; 3587 3588 r = analyse_instr(&op, regs, instr); 3589 if (r < 0) 3590 return r; 3591 if (r > 0) { 3592 emulate_update_regs(regs, &op); 3593 return 1; 3594 } 3595 3596 err = 0; 3597 type = GETTYPE(op.type); 3598 3599 if (OP_IS_LOAD_STORE(type)) { 3600 err = emulate_loadstore(regs, &op); 3601 if (err) 3602 return 0; 3603 goto instr_done; 3604 } 3605 3606 switch (type) { 3607 case CACHEOP: 3608 ea = truncate_if_32bit(regs->msr, op.ea); 3609 if (!address_ok(regs, ea, 8)) 3610 return 0; 3611 switch (op.type & CACHEOP_MASK) { 3612 case DCBST: 3613 __cacheop_user_asmx(ea, err, "dcbst"); 3614 break; 3615 case DCBF: 3616 __cacheop_user_asmx(ea, err, "dcbf"); 3617 break; 3618 case DCBTST: 3619 if (op.reg == 0) 3620 prefetchw((void *) ea); 3621 break; 3622 case DCBT: 3623 if (op.reg == 0) 3624 prefetch((void *) ea); 3625 break; 3626 case ICBI: 3627 __cacheop_user_asmx(ea, err, "icbi"); 3628 break; 3629 case DCBZ: 3630 err = emulate_dcbz(ea, regs); 3631 break; 3632 } 3633 if (err) { 3634 regs->dar = ea; 3635 return 0; 3636 } 3637 goto instr_done; 3638 3639 case MFMSR: 3640 regs->gpr[op.reg] = regs->msr & MSR_MASK; 3641 goto instr_done; 3642 3643 case MTMSR: 3644 val = regs->gpr[op.reg]; 3645 if ((val & MSR_RI) == 0) 3646 /* can't step mtmsr[d] that would clear MSR_RI */ 3647 return -1; 3648 /* here op.val is the mask of bits to change */ 3649 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3650 goto instr_done; 3651 3652 #ifdef CONFIG_PPC64 3653 case SYSCALL: /* sc */ 3654 /* 3655 * N.B. this uses knowledge about how the syscall 3656 * entry code works. If that is changed, this will 3657 * need to be changed also. 3658 */ 3659 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && 3660 cpu_has_feature(CPU_FTR_REAL_LE) && 3661 regs->gpr[0] == 0x1ebe) { 3662 regs_set_return_msr(regs, regs->msr ^ MSR_LE); 3663 goto instr_done; 3664 } 3665 regs->gpr[9] = regs->gpr[13]; 3666 regs->gpr[10] = MSR_KERNEL; 3667 regs->gpr[11] = regs->nip + 4; 3668 regs->gpr[12] = regs->msr & MSR_MASK; 3669 regs->gpr[13] = (unsigned long) get_paca(); 3670 regs_set_return_ip(regs, (unsigned long) &system_call_common); 3671 regs_set_return_msr(regs, MSR_KERNEL); 3672 return 1; 3673 3674 #ifdef CONFIG_PPC_BOOK3S_64 3675 case SYSCALL_VECTORED_0: /* scv 0 */ 3676 regs->gpr[9] = regs->gpr[13]; 3677 regs->gpr[10] = MSR_KERNEL; 3678 regs->gpr[11] = regs->nip + 4; 3679 regs->gpr[12] = regs->msr & MSR_MASK; 3680 regs->gpr[13] = (unsigned long) get_paca(); 3681 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); 3682 regs_set_return_msr(regs, MSR_KERNEL); 3683 return 1; 3684 #endif 3685 3686 case RFI: 3687 return -1; 3688 #endif 3689 } 3690 return 0; 3691 3692 instr_done: 3693 regs_set_return_ip(regs, 3694 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3695 return 1; 3696 } 3697 NOKPROBE_SYMBOL(emulate_step); 3698