1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Single-step support. 4 * 5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 6 */ 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/ptrace.h> 10 #include <linux/prefetch.h> 11 #include <asm/sstep.h> 12 #include <asm/processor.h> 13 #include <linux/uaccess.h> 14 #include <asm/cpu_has_feature.h> 15 #include <asm/cputable.h> 16 #include <asm/disassemble.h> 17 18 extern char system_call_common[]; 19 extern char system_call_vectored_emulate[]; 20 21 #ifdef CONFIG_PPC64 22 /* Bits in SRR1 that are copied from MSR */ 23 #define MSR_MASK 0xffffffff87c0ffffUL 24 #else 25 #define MSR_MASK 0x87c0ffff 26 #endif 27 28 /* Bits in XER */ 29 #define XER_SO 0x80000000U 30 #define XER_OV 0x40000000U 31 #define XER_CA 0x20000000U 32 #define XER_OV32 0x00080000U 33 #define XER_CA32 0x00040000U 34 35 #ifdef CONFIG_VSX 36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) 37 #endif 38 39 #ifdef CONFIG_PPC_FPU 40 /* 41 * Functions in ldstfp.S 42 */ 43 extern void get_fpr(int rn, double *p); 44 extern void put_fpr(int rn, const double *p); 45 extern void get_vr(int rn, __vector128 *p); 46 extern void put_vr(int rn, __vector128 *p); 47 extern void load_vsrn(int vsr, const void *p); 48 extern void store_vsrn(int vsr, void *p); 49 extern void conv_sp_to_dp(const float *sp, double *dp); 50 extern void conv_dp_to_sp(const double *dp, float *sp); 51 #endif 52 53 #ifdef __powerpc64__ 54 /* 55 * Functions in quad.S 56 */ 57 extern int do_lq(unsigned long ea, unsigned long *regs); 58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); 59 extern int do_lqarx(unsigned long ea, unsigned long *regs); 60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, 61 unsigned int *crp); 62 #endif 63 64 #ifdef __LITTLE_ENDIAN__ 65 #define IS_LE 1 66 #define IS_BE 0 67 #else 68 #define IS_LE 0 69 #define IS_BE 1 70 #endif 71 72 /* 73 * Emulate the truncation of 64 bit values in 32-bit mode. 74 */ 75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, 76 unsigned long val) 77 { 78 if ((msr & MSR_64BIT) == 0) 79 val &= 0xffffffffUL; 80 return val; 81 } 82 83 /* 84 * Determine whether a conditional branch instruction would branch. 85 */ 86 static nokprobe_inline int branch_taken(unsigned int instr, 87 const struct pt_regs *regs, 88 struct instruction_op *op) 89 { 90 unsigned int bo = (instr >> 21) & 0x1f; 91 unsigned int bi; 92 93 if ((bo & 4) == 0) { 94 /* decrement counter */ 95 op->type |= DECCTR; 96 if (((bo >> 1) & 1) ^ (regs->ctr == 1)) 97 return 0; 98 } 99 if ((bo & 0x10) == 0) { 100 /* check bit from CR */ 101 bi = (instr >> 16) & 0x1f; 102 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 103 return 0; 104 } 105 return 1; 106 } 107 108 static nokprobe_inline long address_ok(struct pt_regs *regs, 109 unsigned long ea, int nb) 110 { 111 if (!user_mode(regs)) 112 return 1; 113 if (access_ok((void __user *)ea, nb)) 114 return 1; 115 if (access_ok((void __user *)ea, 1)) 116 /* Access overlaps the end of the user region */ 117 regs->dar = TASK_SIZE_MAX - 1; 118 else 119 regs->dar = ea; 120 return 0; 121 } 122 123 /* 124 * Calculate effective address for a D-form instruction 125 */ 126 static nokprobe_inline unsigned long dform_ea(unsigned int instr, 127 const struct pt_regs *regs) 128 { 129 int ra; 130 unsigned long ea; 131 132 ra = (instr >> 16) & 0x1f; 133 ea = (signed short) instr; /* sign-extend */ 134 if (ra) 135 ea += regs->gpr[ra]; 136 137 return ea; 138 } 139 140 #ifdef __powerpc64__ 141 /* 142 * Calculate effective address for a DS-form instruction 143 */ 144 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 145 const struct pt_regs *regs) 146 { 147 int ra; 148 unsigned long ea; 149 150 ra = (instr >> 16) & 0x1f; 151 ea = (signed short) (instr & ~3); /* sign-extend */ 152 if (ra) 153 ea += regs->gpr[ra]; 154 155 return ea; 156 } 157 158 /* 159 * Calculate effective address for a DQ-form instruction 160 */ 161 static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 162 const struct pt_regs *regs) 163 { 164 int ra; 165 unsigned long ea; 166 167 ra = (instr >> 16) & 0x1f; 168 ea = (signed short) (instr & ~0xf); /* sign-extend */ 169 if (ra) 170 ea += regs->gpr[ra]; 171 172 return ea; 173 } 174 #endif /* __powerpc64 */ 175 176 /* 177 * Calculate effective address for an X-form instruction 178 */ 179 static nokprobe_inline unsigned long xform_ea(unsigned int instr, 180 const struct pt_regs *regs) 181 { 182 int ra, rb; 183 unsigned long ea; 184 185 ra = (instr >> 16) & 0x1f; 186 rb = (instr >> 11) & 0x1f; 187 ea = regs->gpr[rb]; 188 if (ra) 189 ea += regs->gpr[ra]; 190 191 return ea; 192 } 193 194 /* 195 * Calculate effective address for a MLS:D-form / 8LS:D-form 196 * prefixed instruction 197 */ 198 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, 199 unsigned int suffix, 200 const struct pt_regs *regs) 201 { 202 int ra, prefix_r; 203 unsigned int dd; 204 unsigned long ea, d0, d1, d; 205 206 prefix_r = GET_PREFIX_R(instr); 207 ra = GET_PREFIX_RA(suffix); 208 209 d0 = instr & 0x3ffff; 210 d1 = suffix & 0xffff; 211 d = (d0 << 16) | d1; 212 213 /* 214 * sign extend a 34 bit number 215 */ 216 dd = (unsigned int)(d >> 2); 217 ea = (signed int)dd; 218 ea = (ea << 2) | (d & 0x3); 219 220 if (!prefix_r && ra) 221 ea += regs->gpr[ra]; 222 else if (!prefix_r && !ra) 223 ; /* Leave ea as is */ 224 else if (prefix_r) 225 ea += regs->nip; 226 227 /* 228 * (prefix_r && ra) is an invalid form. Should already be 229 * checked for by caller! 230 */ 231 232 return ea; 233 } 234 235 /* 236 * Return the largest power of 2, not greater than sizeof(unsigned long), 237 * such that x is a multiple of it. 238 */ 239 static nokprobe_inline unsigned long max_align(unsigned long x) 240 { 241 x |= sizeof(unsigned long); 242 return x & -x; /* isolates rightmost bit */ 243 } 244 245 static nokprobe_inline unsigned long byterev_2(unsigned long x) 246 { 247 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 248 } 249 250 static nokprobe_inline unsigned long byterev_4(unsigned long x) 251 { 252 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 253 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 254 } 255 256 #ifdef __powerpc64__ 257 static nokprobe_inline unsigned long byterev_8(unsigned long x) 258 { 259 return (byterev_4(x) << 32) | byterev_4(x >> 32); 260 } 261 #endif 262 263 static nokprobe_inline void do_byte_reverse(void *ptr, int nb) 264 { 265 switch (nb) { 266 case 2: 267 *(u16 *)ptr = byterev_2(*(u16 *)ptr); 268 break; 269 case 4: 270 *(u32 *)ptr = byterev_4(*(u32 *)ptr); 271 break; 272 #ifdef __powerpc64__ 273 case 8: 274 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); 275 break; 276 case 16: { 277 unsigned long *up = (unsigned long *)ptr; 278 unsigned long tmp; 279 tmp = byterev_8(up[0]); 280 up[0] = byterev_8(up[1]); 281 up[1] = tmp; 282 break; 283 } 284 case 32: { 285 unsigned long *up = (unsigned long *)ptr; 286 unsigned long tmp; 287 288 tmp = byterev_8(up[0]); 289 up[0] = byterev_8(up[3]); 290 up[3] = tmp; 291 tmp = byterev_8(up[2]); 292 up[2] = byterev_8(up[1]); 293 up[1] = tmp; 294 break; 295 } 296 297 #endif 298 default: 299 WARN_ON_ONCE(1); 300 } 301 } 302 303 static __always_inline int 304 __read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 305 { 306 unsigned long x = 0; 307 308 switch (nb) { 309 case 1: 310 unsafe_get_user(x, (unsigned char __user *)ea, Efault); 311 break; 312 case 2: 313 unsafe_get_user(x, (unsigned short __user *)ea, Efault); 314 break; 315 case 4: 316 unsafe_get_user(x, (unsigned int __user *)ea, Efault); 317 break; 318 #ifdef __powerpc64__ 319 case 8: 320 unsafe_get_user(x, (unsigned long __user *)ea, Efault); 321 break; 322 #endif 323 } 324 *dest = x; 325 return 0; 326 327 Efault: 328 regs->dar = ea; 329 return -EFAULT; 330 } 331 332 static nokprobe_inline int 333 read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) 334 { 335 int err; 336 337 if (is_kernel_addr(ea)) 338 return __read_mem_aligned(dest, ea, nb, regs); 339 340 if (user_read_access_begin((void __user *)ea, nb)) { 341 err = __read_mem_aligned(dest, ea, nb, regs); 342 user_read_access_end(); 343 } else { 344 err = -EFAULT; 345 regs->dar = ea; 346 } 347 348 return err; 349 } 350 351 /* 352 * Copy from userspace to a buffer, using the largest possible 353 * aligned accesses, up to sizeof(long). 354 */ 355 static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 356 { 357 int c; 358 359 for (; nb > 0; nb -= c) { 360 c = max_align(ea); 361 if (c > nb) 362 c = max_align(nb); 363 switch (c) { 364 case 1: 365 unsafe_get_user(*dest, (u8 __user *)ea, Efault); 366 break; 367 case 2: 368 unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); 369 break; 370 case 4: 371 unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); 372 break; 373 #ifdef __powerpc64__ 374 case 8: 375 unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); 376 break; 377 #endif 378 } 379 dest += c; 380 ea += c; 381 } 382 return 0; 383 384 Efault: 385 regs->dar = ea; 386 return -EFAULT; 387 } 388 389 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 390 { 391 int err; 392 393 if (is_kernel_addr(ea)) 394 return __copy_mem_in(dest, ea, nb, regs); 395 396 if (user_read_access_begin((void __user *)ea, nb)) { 397 err = __copy_mem_in(dest, ea, nb, regs); 398 user_read_access_end(); 399 } else { 400 err = -EFAULT; 401 regs->dar = ea; 402 } 403 404 return err; 405 } 406 407 static nokprobe_inline int read_mem_unaligned(unsigned long *dest, 408 unsigned long ea, int nb, 409 struct pt_regs *regs) 410 { 411 union { 412 unsigned long ul; 413 u8 b[sizeof(unsigned long)]; 414 } u; 415 int i; 416 int err; 417 418 u.ul = 0; 419 i = IS_BE ? sizeof(unsigned long) - nb : 0; 420 err = copy_mem_in(&u.b[i], ea, nb, regs); 421 if (!err) 422 *dest = u.ul; 423 return err; 424 } 425 426 /* 427 * Read memory at address ea for nb bytes, return 0 for success 428 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 429 * If nb < sizeof(long), the result is right-justified on BE systems. 430 */ 431 static int read_mem(unsigned long *dest, unsigned long ea, int nb, 432 struct pt_regs *regs) 433 { 434 if (!address_ok(regs, ea, nb)) 435 return -EFAULT; 436 if ((ea & (nb - 1)) == 0) 437 return read_mem_aligned(dest, ea, nb, regs); 438 return read_mem_unaligned(dest, ea, nb, regs); 439 } 440 NOKPROBE_SYMBOL(read_mem); 441 442 static __always_inline int 443 __write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 444 { 445 switch (nb) { 446 case 1: 447 unsafe_put_user(val, (unsigned char __user *)ea, Efault); 448 break; 449 case 2: 450 unsafe_put_user(val, (unsigned short __user *)ea, Efault); 451 break; 452 case 4: 453 unsafe_put_user(val, (unsigned int __user *)ea, Efault); 454 break; 455 #ifdef __powerpc64__ 456 case 8: 457 unsafe_put_user(val, (unsigned long __user *)ea, Efault); 458 break; 459 #endif 460 } 461 return 0; 462 463 Efault: 464 regs->dar = ea; 465 return -EFAULT; 466 } 467 468 static nokprobe_inline int 469 write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) 470 { 471 int err; 472 473 if (is_kernel_addr(ea)) 474 return __write_mem_aligned(val, ea, nb, regs); 475 476 if (user_write_access_begin((void __user *)ea, nb)) { 477 err = __write_mem_aligned(val, ea, nb, regs); 478 user_write_access_end(); 479 } else { 480 err = -EFAULT; 481 regs->dar = ea; 482 } 483 484 return err; 485 } 486 487 /* 488 * Copy from a buffer to userspace, using the largest possible 489 * aligned accesses, up to sizeof(long). 490 */ 491 static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 492 { 493 int c; 494 495 for (; nb > 0; nb -= c) { 496 c = max_align(ea); 497 if (c > nb) 498 c = max_align(nb); 499 switch (c) { 500 case 1: 501 unsafe_put_user(*dest, (u8 __user *)ea, Efault); 502 break; 503 case 2: 504 unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); 505 break; 506 case 4: 507 unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); 508 break; 509 #ifdef __powerpc64__ 510 case 8: 511 unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); 512 break; 513 #endif 514 } 515 dest += c; 516 ea += c; 517 } 518 return 0; 519 520 Efault: 521 regs->dar = ea; 522 return -EFAULT; 523 } 524 525 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) 526 { 527 int err; 528 529 if (is_kernel_addr(ea)) 530 return __copy_mem_out(dest, ea, nb, regs); 531 532 if (user_write_access_begin((void __user *)ea, nb)) { 533 err = __copy_mem_out(dest, ea, nb, regs); 534 user_write_access_end(); 535 } else { 536 err = -EFAULT; 537 regs->dar = ea; 538 } 539 540 return err; 541 } 542 543 static nokprobe_inline int write_mem_unaligned(unsigned long val, 544 unsigned long ea, int nb, 545 struct pt_regs *regs) 546 { 547 union { 548 unsigned long ul; 549 u8 b[sizeof(unsigned long)]; 550 } u; 551 int i; 552 553 u.ul = val; 554 i = IS_BE ? sizeof(unsigned long) - nb : 0; 555 return copy_mem_out(&u.b[i], ea, nb, regs); 556 } 557 558 /* 559 * Write memory at address ea for nb bytes, return 0 for success 560 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 561 */ 562 static int write_mem(unsigned long val, unsigned long ea, int nb, 563 struct pt_regs *regs) 564 { 565 if (!address_ok(regs, ea, nb)) 566 return -EFAULT; 567 if ((ea & (nb - 1)) == 0) 568 return write_mem_aligned(val, ea, nb, regs); 569 return write_mem_unaligned(val, ea, nb, regs); 570 } 571 NOKPROBE_SYMBOL(write_mem); 572 573 #ifdef CONFIG_PPC_FPU 574 /* 575 * These access either the real FP register or the image in the 576 * thread_struct, depending on regs->msr & MSR_FP. 577 */ 578 static int do_fp_load(struct instruction_op *op, unsigned long ea, 579 struct pt_regs *regs, bool cross_endian) 580 { 581 int err, rn, nb; 582 union { 583 int i; 584 unsigned int u; 585 float f; 586 double d[2]; 587 unsigned long l[2]; 588 u8 b[2 * sizeof(double)]; 589 } u; 590 591 nb = GETSIZE(op->type); 592 if (!address_ok(regs, ea, nb)) 593 return -EFAULT; 594 rn = op->reg; 595 err = copy_mem_in(u.b, ea, nb, regs); 596 if (err) 597 return err; 598 if (unlikely(cross_endian)) { 599 do_byte_reverse(u.b, min(nb, 8)); 600 if (nb == 16) 601 do_byte_reverse(&u.b[8], 8); 602 } 603 preempt_disable(); 604 if (nb == 4) { 605 if (op->type & FPCONV) 606 conv_sp_to_dp(&u.f, &u.d[0]); 607 else if (op->type & SIGNEXT) 608 u.l[0] = u.i; 609 else 610 u.l[0] = u.u; 611 } 612 if (regs->msr & MSR_FP) 613 put_fpr(rn, &u.d[0]); 614 else 615 current->thread.TS_FPR(rn) = u.l[0]; 616 if (nb == 16) { 617 /* lfdp */ 618 rn |= 1; 619 if (regs->msr & MSR_FP) 620 put_fpr(rn, &u.d[1]); 621 else 622 current->thread.TS_FPR(rn) = u.l[1]; 623 } 624 preempt_enable(); 625 return 0; 626 } 627 NOKPROBE_SYMBOL(do_fp_load); 628 629 static int do_fp_store(struct instruction_op *op, unsigned long ea, 630 struct pt_regs *regs, bool cross_endian) 631 { 632 int rn, nb; 633 union { 634 unsigned int u; 635 float f; 636 double d[2]; 637 unsigned long l[2]; 638 u8 b[2 * sizeof(double)]; 639 } u; 640 641 nb = GETSIZE(op->type); 642 if (!address_ok(regs, ea, nb)) 643 return -EFAULT; 644 rn = op->reg; 645 preempt_disable(); 646 if (regs->msr & MSR_FP) 647 get_fpr(rn, &u.d[0]); 648 else 649 u.l[0] = current->thread.TS_FPR(rn); 650 if (nb == 4) { 651 if (op->type & FPCONV) 652 conv_dp_to_sp(&u.d[0], &u.f); 653 else 654 u.u = u.l[0]; 655 } 656 if (nb == 16) { 657 rn |= 1; 658 if (regs->msr & MSR_FP) 659 get_fpr(rn, &u.d[1]); 660 else 661 u.l[1] = current->thread.TS_FPR(rn); 662 } 663 preempt_enable(); 664 if (unlikely(cross_endian)) { 665 do_byte_reverse(u.b, min(nb, 8)); 666 if (nb == 16) 667 do_byte_reverse(&u.b[8], 8); 668 } 669 return copy_mem_out(u.b, ea, nb, regs); 670 } 671 NOKPROBE_SYMBOL(do_fp_store); 672 #endif 673 674 #ifdef CONFIG_ALTIVEC 675 /* For Altivec/VMX, no need to worry about alignment */ 676 static nokprobe_inline int do_vec_load(int rn, unsigned long ea, 677 int size, struct pt_regs *regs, 678 bool cross_endian) 679 { 680 int err; 681 union { 682 __vector128 v; 683 u8 b[sizeof(__vector128)]; 684 } u = {}; 685 686 if (!address_ok(regs, ea & ~0xfUL, 16)) 687 return -EFAULT; 688 /* align to multiple of size */ 689 ea &= ~(size - 1); 690 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); 691 if (err) 692 return err; 693 if (unlikely(cross_endian)) 694 do_byte_reverse(&u.b[ea & 0xf], size); 695 preempt_disable(); 696 if (regs->msr & MSR_VEC) 697 put_vr(rn, &u.v); 698 else 699 current->thread.vr_state.vr[rn] = u.v; 700 preempt_enable(); 701 return 0; 702 } 703 704 static nokprobe_inline int do_vec_store(int rn, unsigned long ea, 705 int size, struct pt_regs *regs, 706 bool cross_endian) 707 { 708 union { 709 __vector128 v; 710 u8 b[sizeof(__vector128)]; 711 } u; 712 713 if (!address_ok(regs, ea & ~0xfUL, 16)) 714 return -EFAULT; 715 /* align to multiple of size */ 716 ea &= ~(size - 1); 717 718 preempt_disable(); 719 if (regs->msr & MSR_VEC) 720 get_vr(rn, &u.v); 721 else 722 u.v = current->thread.vr_state.vr[rn]; 723 preempt_enable(); 724 if (unlikely(cross_endian)) 725 do_byte_reverse(&u.b[ea & 0xf], size); 726 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 727 } 728 #endif /* CONFIG_ALTIVEC */ 729 730 #ifdef __powerpc64__ 731 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, 732 int reg, bool cross_endian) 733 { 734 int err; 735 736 if (!address_ok(regs, ea, 16)) 737 return -EFAULT; 738 /* if aligned, should be atomic */ 739 if ((ea & 0xf) == 0) { 740 err = do_lq(ea, ®s->gpr[reg]); 741 } else { 742 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); 743 if (!err) 744 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); 745 } 746 if (!err && unlikely(cross_endian)) 747 do_byte_reverse(®s->gpr[reg], 16); 748 return err; 749 } 750 751 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, 752 int reg, bool cross_endian) 753 { 754 int err; 755 unsigned long vals[2]; 756 757 if (!address_ok(regs, ea, 16)) 758 return -EFAULT; 759 vals[0] = regs->gpr[reg]; 760 vals[1] = regs->gpr[reg + 1]; 761 if (unlikely(cross_endian)) 762 do_byte_reverse(vals, 16); 763 764 /* if aligned, should be atomic */ 765 if ((ea & 0xf) == 0) 766 return do_stq(ea, vals[0], vals[1]); 767 768 err = write_mem(vals[IS_LE], ea, 8, regs); 769 if (!err) 770 err = write_mem(vals[IS_BE], ea + 8, 8, regs); 771 return err; 772 } 773 #endif /* __powerpc64 */ 774 775 #ifdef CONFIG_VSX 776 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, 777 const void *mem, bool rev) 778 { 779 int size, read_size; 780 int i, j; 781 const unsigned int *wp; 782 const unsigned short *hp; 783 const unsigned char *bp; 784 785 size = GETSIZE(op->type); 786 reg->d[0] = reg->d[1] = 0; 787 788 switch (op->element_size) { 789 case 32: 790 /* [p]lxvp[x] */ 791 case 16: 792 /* whole vector; lxv[x] or lxvl[l] */ 793 if (size == 0) 794 break; 795 memcpy(reg, mem, size); 796 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 797 rev = !rev; 798 if (rev) 799 do_byte_reverse(reg, size); 800 break; 801 case 8: 802 /* scalar loads, lxvd2x, lxvdsx */ 803 read_size = (size >= 8) ? 8 : size; 804 i = IS_LE ? 8 : 8 - read_size; 805 memcpy(®->b[i], mem, read_size); 806 if (rev) 807 do_byte_reverse(®->b[i], 8); 808 if (size < 8) { 809 if (op->type & SIGNEXT) { 810 /* size == 4 is the only case here */ 811 reg->d[IS_LE] = (signed int) reg->d[IS_LE]; 812 } else if (op->vsx_flags & VSX_FPCONV) { 813 preempt_disable(); 814 conv_sp_to_dp(®->fp[1 + IS_LE], 815 ®->dp[IS_LE]); 816 preempt_enable(); 817 } 818 } else { 819 if (size == 16) { 820 unsigned long v = *(unsigned long *)(mem + 8); 821 reg->d[IS_BE] = !rev ? v : byterev_8(v); 822 } else if (op->vsx_flags & VSX_SPLAT) 823 reg->d[IS_BE] = reg->d[IS_LE]; 824 } 825 break; 826 case 4: 827 /* lxvw4x, lxvwsx */ 828 wp = mem; 829 for (j = 0; j < size / 4; ++j) { 830 i = IS_LE ? 3 - j : j; 831 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); 832 } 833 if (op->vsx_flags & VSX_SPLAT) { 834 u32 val = reg->w[IS_LE ? 3 : 0]; 835 for (; j < 4; ++j) { 836 i = IS_LE ? 3 - j : j; 837 reg->w[i] = val; 838 } 839 } 840 break; 841 case 2: 842 /* lxvh8x */ 843 hp = mem; 844 for (j = 0; j < size / 2; ++j) { 845 i = IS_LE ? 7 - j : j; 846 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); 847 } 848 break; 849 case 1: 850 /* lxvb16x */ 851 bp = mem; 852 for (j = 0; j < size; ++j) { 853 i = IS_LE ? 15 - j : j; 854 reg->b[i] = *bp++; 855 } 856 break; 857 } 858 } 859 EXPORT_SYMBOL_GPL(emulate_vsx_load); 860 NOKPROBE_SYMBOL(emulate_vsx_load); 861 862 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, 863 void *mem, bool rev) 864 { 865 int size, write_size; 866 int i, j; 867 union vsx_reg buf; 868 unsigned int *wp; 869 unsigned short *hp; 870 unsigned char *bp; 871 872 size = GETSIZE(op->type); 873 874 switch (op->element_size) { 875 case 32: 876 /* [p]stxvp[x] */ 877 if (size == 0) 878 break; 879 if (rev) { 880 /* reverse 32 bytes */ 881 union vsx_reg buf32[2]; 882 buf32[0].d[0] = byterev_8(reg[1].d[1]); 883 buf32[0].d[1] = byterev_8(reg[1].d[0]); 884 buf32[1].d[0] = byterev_8(reg[0].d[1]); 885 buf32[1].d[1] = byterev_8(reg[0].d[0]); 886 memcpy(mem, buf32, size); 887 } else { 888 memcpy(mem, reg, size); 889 } 890 break; 891 case 16: 892 /* stxv, stxvx, stxvl, stxvll */ 893 if (size == 0) 894 break; 895 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 896 rev = !rev; 897 if (rev) { 898 /* reverse 16 bytes */ 899 buf.d[0] = byterev_8(reg->d[1]); 900 buf.d[1] = byterev_8(reg->d[0]); 901 reg = &buf; 902 } 903 memcpy(mem, reg, size); 904 break; 905 case 8: 906 /* scalar stores, stxvd2x */ 907 write_size = (size >= 8) ? 8 : size; 908 i = IS_LE ? 8 : 8 - write_size; 909 if (size < 8 && op->vsx_flags & VSX_FPCONV) { 910 buf.d[0] = buf.d[1] = 0; 911 preempt_disable(); 912 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); 913 preempt_enable(); 914 reg = &buf; 915 } 916 memcpy(mem, ®->b[i], write_size); 917 if (size == 16) 918 memcpy(mem + 8, ®->d[IS_BE], 8); 919 if (unlikely(rev)) { 920 do_byte_reverse(mem, write_size); 921 if (size == 16) 922 do_byte_reverse(mem + 8, 8); 923 } 924 break; 925 case 4: 926 /* stxvw4x */ 927 wp = mem; 928 for (j = 0; j < size / 4; ++j) { 929 i = IS_LE ? 3 - j : j; 930 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); 931 } 932 break; 933 case 2: 934 /* stxvh8x */ 935 hp = mem; 936 for (j = 0; j < size / 2; ++j) { 937 i = IS_LE ? 7 - j : j; 938 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); 939 } 940 break; 941 case 1: 942 /* stvxb16x */ 943 bp = mem; 944 for (j = 0; j < size; ++j) { 945 i = IS_LE ? 15 - j : j; 946 *bp++ = reg->b[i]; 947 } 948 break; 949 } 950 } 951 EXPORT_SYMBOL_GPL(emulate_vsx_store); 952 NOKPROBE_SYMBOL(emulate_vsx_store); 953 954 static nokprobe_inline int do_vsx_load(struct instruction_op *op, 955 unsigned long ea, struct pt_regs *regs, 956 bool cross_endian) 957 { 958 int reg = op->reg; 959 int i, j, nr_vsx_regs; 960 u8 mem[32]; 961 union vsx_reg buf[2]; 962 int size = GETSIZE(op->type); 963 964 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) 965 return -EFAULT; 966 967 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 968 emulate_vsx_load(op, buf, mem, cross_endian); 969 preempt_disable(); 970 if (reg < 32) { 971 /* FP regs + extensions */ 972 if (regs->msr & MSR_FP) { 973 for (i = 0; i < nr_vsx_regs; i++) { 974 j = IS_LE ? nr_vsx_regs - i - 1 : i; 975 load_vsrn(reg + i, &buf[j].v); 976 } 977 } else { 978 for (i = 0; i < nr_vsx_regs; i++) { 979 j = IS_LE ? nr_vsx_regs - i - 1 : i; 980 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; 981 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; 982 } 983 } 984 } else { 985 if (regs->msr & MSR_VEC) { 986 for (i = 0; i < nr_vsx_regs; i++) { 987 j = IS_LE ? nr_vsx_regs - i - 1 : i; 988 load_vsrn(reg + i, &buf[j].v); 989 } 990 } else { 991 for (i = 0; i < nr_vsx_regs; i++) { 992 j = IS_LE ? nr_vsx_regs - i - 1 : i; 993 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; 994 } 995 } 996 } 997 preempt_enable(); 998 return 0; 999 } 1000 1001 static nokprobe_inline int do_vsx_store(struct instruction_op *op, 1002 unsigned long ea, struct pt_regs *regs, 1003 bool cross_endian) 1004 { 1005 int reg = op->reg; 1006 int i, j, nr_vsx_regs; 1007 u8 mem[32]; 1008 union vsx_reg buf[2]; 1009 int size = GETSIZE(op->type); 1010 1011 if (!address_ok(regs, ea, size)) 1012 return -EFAULT; 1013 1014 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 1015 preempt_disable(); 1016 if (reg < 32) { 1017 /* FP regs + extensions */ 1018 if (regs->msr & MSR_FP) { 1019 for (i = 0; i < nr_vsx_regs; i++) { 1020 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1021 store_vsrn(reg + i, &buf[j].v); 1022 } 1023 } else { 1024 for (i = 0; i < nr_vsx_regs; i++) { 1025 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1026 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; 1027 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; 1028 } 1029 } 1030 } else { 1031 if (regs->msr & MSR_VEC) { 1032 for (i = 0; i < nr_vsx_regs; i++) { 1033 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1034 store_vsrn(reg + i, &buf[j].v); 1035 } 1036 } else { 1037 for (i = 0; i < nr_vsx_regs; i++) { 1038 j = IS_LE ? nr_vsx_regs - i - 1 : i; 1039 buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; 1040 } 1041 } 1042 } 1043 preempt_enable(); 1044 emulate_vsx_store(op, buf, mem, cross_endian); 1045 return copy_mem_out(mem, ea, size, regs); 1046 } 1047 #endif /* CONFIG_VSX */ 1048 1049 static int __emulate_dcbz(unsigned long ea) 1050 { 1051 unsigned long i; 1052 unsigned long size = l1_dcache_bytes(); 1053 1054 for (i = 0; i < size; i += sizeof(long)) 1055 unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); 1056 1057 return 0; 1058 1059 Efault: 1060 return -EFAULT; 1061 } 1062 1063 int emulate_dcbz(unsigned long ea, struct pt_regs *regs) 1064 { 1065 int err; 1066 unsigned long size = l1_dcache_bytes(); 1067 1068 ea = truncate_if_32bit(regs->msr, ea); 1069 ea &= ~(size - 1); 1070 if (!address_ok(regs, ea, size)) 1071 return -EFAULT; 1072 1073 if (is_kernel_addr(ea)) { 1074 err = __emulate_dcbz(ea); 1075 } else if (user_write_access_begin((void __user *)ea, size)) { 1076 err = __emulate_dcbz(ea); 1077 user_write_access_end(); 1078 } else { 1079 err = -EFAULT; 1080 } 1081 1082 if (err) 1083 regs->dar = ea; 1084 1085 1086 return err; 1087 } 1088 NOKPROBE_SYMBOL(emulate_dcbz); 1089 1090 #define __put_user_asmx(x, addr, err, op, cr) \ 1091 __asm__ __volatile__( \ 1092 ".machine push\n" \ 1093 ".machine power8\n" \ 1094 "1: " op " %2,0,%3\n" \ 1095 ".machine pop\n" \ 1096 " mfcr %1\n" \ 1097 "2:\n" \ 1098 ".section .fixup,\"ax\"\n" \ 1099 "3: li %0,%4\n" \ 1100 " b 2b\n" \ 1101 ".previous\n" \ 1102 EX_TABLE(1b, 3b) \ 1103 : "=r" (err), "=r" (cr) \ 1104 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 1105 1106 #define __get_user_asmx(x, addr, err, op) \ 1107 __asm__ __volatile__( \ 1108 ".machine push\n" \ 1109 ".machine power8\n" \ 1110 "1: "op" %1,0,%2\n" \ 1111 ".machine pop\n" \ 1112 "2:\n" \ 1113 ".section .fixup,\"ax\"\n" \ 1114 "3: li %0,%3\n" \ 1115 " b 2b\n" \ 1116 ".previous\n" \ 1117 EX_TABLE(1b, 3b) \ 1118 : "=r" (err), "=r" (x) \ 1119 : "r" (addr), "i" (-EFAULT), "0" (err)) 1120 1121 #define __cacheop_user_asmx(addr, err, op) \ 1122 __asm__ __volatile__( \ 1123 "1: "op" 0,%1\n" \ 1124 "2:\n" \ 1125 ".section .fixup,\"ax\"\n" \ 1126 "3: li %0,%3\n" \ 1127 " b 2b\n" \ 1128 ".previous\n" \ 1129 EX_TABLE(1b, 3b) \ 1130 : "=r" (err) \ 1131 : "r" (addr), "i" (-EFAULT), "0" (err)) 1132 1133 static nokprobe_inline void set_cr0(const struct pt_regs *regs, 1134 struct instruction_op *op) 1135 { 1136 long val = op->val; 1137 1138 op->type |= SETCC; 1139 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 1140 if (!(regs->msr & MSR_64BIT)) 1141 val = (int) val; 1142 if (val < 0) 1143 op->ccval |= 0x80000000; 1144 else if (val > 0) 1145 op->ccval |= 0x40000000; 1146 else 1147 op->ccval |= 0x20000000; 1148 } 1149 1150 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) 1151 { 1152 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1153 if (val) 1154 op->xerval |= XER_CA32; 1155 else 1156 op->xerval &= ~XER_CA32; 1157 } 1158 } 1159 1160 static nokprobe_inline void add_with_carry(const struct pt_regs *regs, 1161 struct instruction_op *op, int rd, 1162 unsigned long val1, unsigned long val2, 1163 unsigned long carry_in) 1164 { 1165 unsigned long val = val1 + val2; 1166 1167 if (carry_in) 1168 ++val; 1169 op->type = COMPUTE + SETREG + SETXER; 1170 op->reg = rd; 1171 op->val = val; 1172 val = truncate_if_32bit(regs->msr, val); 1173 val1 = truncate_if_32bit(regs->msr, val1); 1174 op->xerval = regs->xer; 1175 if (val < val1 || (carry_in && val == val1)) 1176 op->xerval |= XER_CA; 1177 else 1178 op->xerval &= ~XER_CA; 1179 1180 set_ca32(op, (unsigned int)val < (unsigned int)val1 || 1181 (carry_in && (unsigned int)val == (unsigned int)val1)); 1182 } 1183 1184 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, 1185 struct instruction_op *op, 1186 long v1, long v2, int crfld) 1187 { 1188 unsigned int crval, shift; 1189 1190 op->type = COMPUTE + SETCC; 1191 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1192 if (v1 < v2) 1193 crval |= 8; 1194 else if (v1 > v2) 1195 crval |= 4; 1196 else 1197 crval |= 2; 1198 shift = (7 - crfld) * 4; 1199 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1200 } 1201 1202 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, 1203 struct instruction_op *op, 1204 unsigned long v1, 1205 unsigned long v2, int crfld) 1206 { 1207 unsigned int crval, shift; 1208 1209 op->type = COMPUTE + SETCC; 1210 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1211 if (v1 < v2) 1212 crval |= 8; 1213 else if (v1 > v2) 1214 crval |= 4; 1215 else 1216 crval |= 2; 1217 shift = (7 - crfld) * 4; 1218 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1219 } 1220 1221 static nokprobe_inline void do_cmpb(const struct pt_regs *regs, 1222 struct instruction_op *op, 1223 unsigned long v1, unsigned long v2) 1224 { 1225 unsigned long long out_val, mask; 1226 int i; 1227 1228 out_val = 0; 1229 for (i = 0; i < 8; i++) { 1230 mask = 0xffUL << (i * 8); 1231 if ((v1 & mask) == (v2 & mask)) 1232 out_val |= mask; 1233 } 1234 op->val = out_val; 1235 } 1236 1237 /* 1238 * The size parameter is used to adjust the equivalent popcnt instruction. 1239 * popcntb = 8, popcntw = 32, popcntd = 64 1240 */ 1241 static nokprobe_inline void do_popcnt(const struct pt_regs *regs, 1242 struct instruction_op *op, 1243 unsigned long v1, int size) 1244 { 1245 unsigned long long out = v1; 1246 1247 out -= (out >> 1) & 0x5555555555555555ULL; 1248 out = (0x3333333333333333ULL & out) + 1249 (0x3333333333333333ULL & (out >> 2)); 1250 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1251 1252 if (size == 8) { /* popcntb */ 1253 op->val = out; 1254 return; 1255 } 1256 out += out >> 8; 1257 out += out >> 16; 1258 if (size == 32) { /* popcntw */ 1259 op->val = out & 0x0000003f0000003fULL; 1260 return; 1261 } 1262 1263 out = (out + (out >> 32)) & 0x7f; 1264 op->val = out; /* popcntd */ 1265 } 1266 1267 #ifdef CONFIG_PPC64 1268 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, 1269 struct instruction_op *op, 1270 unsigned long v1, unsigned long v2) 1271 { 1272 unsigned char perm, idx; 1273 unsigned int i; 1274 1275 perm = 0; 1276 for (i = 0; i < 8; i++) { 1277 idx = (v1 >> (i * 8)) & 0xff; 1278 if (idx < 64) 1279 if (v2 & PPC_BIT(idx)) 1280 perm |= 1 << i; 1281 } 1282 op->val = perm; 1283 } 1284 #endif /* CONFIG_PPC64 */ 1285 /* 1286 * The size parameter adjusts the equivalent prty instruction. 1287 * prtyw = 32, prtyd = 64 1288 */ 1289 static nokprobe_inline void do_prty(const struct pt_regs *regs, 1290 struct instruction_op *op, 1291 unsigned long v, int size) 1292 { 1293 unsigned long long res = v ^ (v >> 8); 1294 1295 res ^= res >> 16; 1296 if (size == 32) { /* prtyw */ 1297 op->val = res & 0x0000000100000001ULL; 1298 return; 1299 } 1300 1301 res ^= res >> 32; 1302 op->val = res & 1; /*prtyd */ 1303 } 1304 1305 static nokprobe_inline int trap_compare(long v1, long v2) 1306 { 1307 int ret = 0; 1308 1309 if (v1 < v2) 1310 ret |= 0x10; 1311 else if (v1 > v2) 1312 ret |= 0x08; 1313 else 1314 ret |= 0x04; 1315 if ((unsigned long)v1 < (unsigned long)v2) 1316 ret |= 0x02; 1317 else if ((unsigned long)v1 > (unsigned long)v2) 1318 ret |= 0x01; 1319 return ret; 1320 } 1321 1322 /* 1323 * Elements of 32-bit rotate and mask instructions. 1324 */ 1325 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 1326 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 1327 #ifdef __powerpc64__ 1328 #define MASK64_L(mb) (~0UL >> (mb)) 1329 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 1330 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 1331 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 1332 #else 1333 #define DATA32(x) (x) 1334 #endif 1335 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 1336 1337 /* 1338 * Decode an instruction, and return information about it in *op 1339 * without changing *regs. 1340 * Integer arithmetic and logical instructions, branches, and barrier 1341 * instructions can be emulated just using the information in *op. 1342 * 1343 * Return value is 1 if the instruction can be emulated just by 1344 * updating *regs with the information in *op, -1 if we need the 1345 * GPRs but *regs doesn't contain the full register set, or 0 1346 * otherwise. 1347 */ 1348 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1349 ppc_inst_t instr) 1350 { 1351 #ifdef CONFIG_PPC64 1352 unsigned int suffixopcode, prefixtype, prefix_r; 1353 #endif 1354 unsigned int opcode, ra, rb, rc, rd, spr, u; 1355 unsigned long int imm; 1356 unsigned long int val, val2; 1357 unsigned int mb, me, sh; 1358 unsigned int word, suffix; 1359 long ival; 1360 1361 word = ppc_inst_val(instr); 1362 suffix = ppc_inst_suffix(instr); 1363 1364 op->type = COMPUTE; 1365 1366 opcode = ppc_inst_primary_opcode(instr); 1367 switch (opcode) { 1368 case 16: /* bc */ 1369 op->type = BRANCH; 1370 imm = (signed short)(word & 0xfffc); 1371 if ((word & 2) == 0) 1372 imm += regs->nip; 1373 op->val = truncate_if_32bit(regs->msr, imm); 1374 if (word & 1) 1375 op->type |= SETLK; 1376 if (branch_taken(word, regs, op)) 1377 op->type |= BRTAKEN; 1378 return 1; 1379 #ifdef CONFIG_PPC64 1380 case 17: /* sc */ 1381 if ((word & 0xfe2) == 2) 1382 op->type = SYSCALL; 1383 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 1384 (word & 0xfe3) == 1) { /* scv */ 1385 op->type = SYSCALL_VECTORED_0; 1386 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1387 goto unknown_opcode; 1388 } else 1389 op->type = UNKNOWN; 1390 return 0; 1391 #endif 1392 case 18: /* b */ 1393 op->type = BRANCH | BRTAKEN; 1394 imm = word & 0x03fffffc; 1395 if (imm & 0x02000000) 1396 imm -= 0x04000000; 1397 if ((word & 2) == 0) 1398 imm += regs->nip; 1399 op->val = truncate_if_32bit(regs->msr, imm); 1400 if (word & 1) 1401 op->type |= SETLK; 1402 return 1; 1403 case 19: 1404 switch ((word >> 1) & 0x3ff) { 1405 case 0: /* mcrf */ 1406 op->type = COMPUTE + SETCC; 1407 rd = 7 - ((word >> 23) & 0x7); 1408 ra = 7 - ((word >> 18) & 0x7); 1409 rd *= 4; 1410 ra *= 4; 1411 val = (regs->ccr >> ra) & 0xf; 1412 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 1413 return 1; 1414 1415 case 16: /* bclr */ 1416 case 528: /* bcctr */ 1417 op->type = BRANCH; 1418 imm = (word & 0x400)? regs->ctr: regs->link; 1419 op->val = truncate_if_32bit(regs->msr, imm); 1420 if (word & 1) 1421 op->type |= SETLK; 1422 if (branch_taken(word, regs, op)) 1423 op->type |= BRTAKEN; 1424 return 1; 1425 1426 case 18: /* rfid, scary */ 1427 if (regs->msr & MSR_PR) 1428 goto priv; 1429 op->type = RFI; 1430 return 0; 1431 1432 case 150: /* isync */ 1433 op->type = BARRIER | BARRIER_ISYNC; 1434 return 1; 1435 1436 case 33: /* crnor */ 1437 case 129: /* crandc */ 1438 case 193: /* crxor */ 1439 case 225: /* crnand */ 1440 case 257: /* crand */ 1441 case 289: /* creqv */ 1442 case 417: /* crorc */ 1443 case 449: /* cror */ 1444 op->type = COMPUTE + SETCC; 1445 ra = (word >> 16) & 0x1f; 1446 rb = (word >> 11) & 0x1f; 1447 rd = (word >> 21) & 0x1f; 1448 ra = (regs->ccr >> (31 - ra)) & 1; 1449 rb = (regs->ccr >> (31 - rb)) & 1; 1450 val = (word >> (6 + ra * 2 + rb)) & 1; 1451 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | 1452 (val << (31 - rd)); 1453 return 1; 1454 } 1455 break; 1456 case 31: 1457 switch ((word >> 1) & 0x3ff) { 1458 case 598: /* sync */ 1459 op->type = BARRIER + BARRIER_SYNC; 1460 #ifdef __powerpc64__ 1461 switch ((word >> 21) & 3) { 1462 case 1: /* lwsync */ 1463 op->type = BARRIER + BARRIER_LWSYNC; 1464 break; 1465 case 2: /* ptesync */ 1466 op->type = BARRIER + BARRIER_PTESYNC; 1467 break; 1468 } 1469 #endif 1470 return 1; 1471 1472 case 854: /* eieio */ 1473 op->type = BARRIER + BARRIER_EIEIO; 1474 return 1; 1475 } 1476 break; 1477 } 1478 1479 rd = (word >> 21) & 0x1f; 1480 ra = (word >> 16) & 0x1f; 1481 rb = (word >> 11) & 0x1f; 1482 rc = (word >> 6) & 0x1f; 1483 1484 switch (opcode) { 1485 #ifdef __powerpc64__ 1486 case 1: 1487 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 1488 goto unknown_opcode; 1489 1490 prefix_r = GET_PREFIX_R(word); 1491 ra = GET_PREFIX_RA(suffix); 1492 rd = (suffix >> 21) & 0x1f; 1493 op->reg = rd; 1494 op->val = regs->gpr[rd]; 1495 suffixopcode = get_op(suffix); 1496 prefixtype = (word >> 24) & 0x3; 1497 switch (prefixtype) { 1498 case 2: 1499 if (prefix_r && ra) 1500 return 0; 1501 switch (suffixopcode) { 1502 case 14: /* paddi */ 1503 op->type = COMPUTE | PREFIXED; 1504 op->val = mlsd_8lsd_ea(word, suffix, regs); 1505 goto compute_done; 1506 } 1507 } 1508 break; 1509 case 2: /* tdi */ 1510 if (rd & trap_compare(regs->gpr[ra], (short) word)) 1511 goto trap; 1512 return 1; 1513 #endif 1514 case 3: /* twi */ 1515 if (rd & trap_compare((int)regs->gpr[ra], (short) word)) 1516 goto trap; 1517 return 1; 1518 1519 #ifdef __powerpc64__ 1520 case 4: 1521 /* 1522 * There are very many instructions with this primary opcode 1523 * introduced in the ISA as early as v2.03. However, the ones 1524 * we currently emulate were all introduced with ISA 3.0 1525 */ 1526 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1527 goto unknown_opcode; 1528 1529 switch (word & 0x3f) { 1530 case 48: /* maddhd */ 1531 asm volatile(PPC_MADDHD(%0, %1, %2, %3) : 1532 "=r" (op->val) : "r" (regs->gpr[ra]), 1533 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1534 goto compute_done; 1535 1536 case 49: /* maddhdu */ 1537 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : 1538 "=r" (op->val) : "r" (regs->gpr[ra]), 1539 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1540 goto compute_done; 1541 1542 case 51: /* maddld */ 1543 asm volatile(PPC_MADDLD(%0, %1, %2, %3) : 1544 "=r" (op->val) : "r" (regs->gpr[ra]), 1545 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1546 goto compute_done; 1547 } 1548 1549 /* 1550 * There are other instructions from ISA 3.0 with the same 1551 * primary opcode which do not have emulation support yet. 1552 */ 1553 goto unknown_opcode; 1554 #endif 1555 1556 case 7: /* mulli */ 1557 op->val = regs->gpr[ra] * (short) word; 1558 goto compute_done; 1559 1560 case 8: /* subfic */ 1561 imm = (short) word; 1562 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); 1563 return 1; 1564 1565 case 10: /* cmpli */ 1566 imm = (unsigned short) word; 1567 val = regs->gpr[ra]; 1568 #ifdef __powerpc64__ 1569 if ((rd & 1) == 0) 1570 val = (unsigned int) val; 1571 #endif 1572 do_cmp_unsigned(regs, op, val, imm, rd >> 2); 1573 return 1; 1574 1575 case 11: /* cmpi */ 1576 imm = (short) word; 1577 val = regs->gpr[ra]; 1578 #ifdef __powerpc64__ 1579 if ((rd & 1) == 0) 1580 val = (int) val; 1581 #endif 1582 do_cmp_signed(regs, op, val, imm, rd >> 2); 1583 return 1; 1584 1585 case 12: /* addic */ 1586 imm = (short) word; 1587 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1588 return 1; 1589 1590 case 13: /* addic. */ 1591 imm = (short) word; 1592 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1593 set_cr0(regs, op); 1594 return 1; 1595 1596 case 14: /* addi */ 1597 imm = (short) word; 1598 if (ra) 1599 imm += regs->gpr[ra]; 1600 op->val = imm; 1601 goto compute_done; 1602 1603 case 15: /* addis */ 1604 imm = ((short) word) << 16; 1605 if (ra) 1606 imm += regs->gpr[ra]; 1607 op->val = imm; 1608 goto compute_done; 1609 1610 case 19: 1611 if (((word >> 1) & 0x1f) == 2) { 1612 /* addpcis */ 1613 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1614 goto unknown_opcode; 1615 imm = (short) (word & 0xffc1); /* d0 + d2 fields */ 1616 imm |= (word >> 15) & 0x3e; /* d1 field */ 1617 op->val = regs->nip + (imm << 16) + 4; 1618 goto compute_done; 1619 } 1620 op->type = UNKNOWN; 1621 return 0; 1622 1623 case 20: /* rlwimi */ 1624 mb = (word >> 6) & 0x1f; 1625 me = (word >> 1) & 0x1f; 1626 val = DATA32(regs->gpr[rd]); 1627 imm = MASK32(mb, me); 1628 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 1629 goto logical_done; 1630 1631 case 21: /* rlwinm */ 1632 mb = (word >> 6) & 0x1f; 1633 me = (word >> 1) & 0x1f; 1634 val = DATA32(regs->gpr[rd]); 1635 op->val = ROTATE(val, rb) & MASK32(mb, me); 1636 goto logical_done; 1637 1638 case 23: /* rlwnm */ 1639 mb = (word >> 6) & 0x1f; 1640 me = (word >> 1) & 0x1f; 1641 rb = regs->gpr[rb] & 0x1f; 1642 val = DATA32(regs->gpr[rd]); 1643 op->val = ROTATE(val, rb) & MASK32(mb, me); 1644 goto logical_done; 1645 1646 case 24: /* ori */ 1647 op->val = regs->gpr[rd] | (unsigned short) word; 1648 goto logical_done_nocc; 1649 1650 case 25: /* oris */ 1651 imm = (unsigned short) word; 1652 op->val = regs->gpr[rd] | (imm << 16); 1653 goto logical_done_nocc; 1654 1655 case 26: /* xori */ 1656 op->val = regs->gpr[rd] ^ (unsigned short) word; 1657 goto logical_done_nocc; 1658 1659 case 27: /* xoris */ 1660 imm = (unsigned short) word; 1661 op->val = regs->gpr[rd] ^ (imm << 16); 1662 goto logical_done_nocc; 1663 1664 case 28: /* andi. */ 1665 op->val = regs->gpr[rd] & (unsigned short) word; 1666 set_cr0(regs, op); 1667 goto logical_done_nocc; 1668 1669 case 29: /* andis. */ 1670 imm = (unsigned short) word; 1671 op->val = regs->gpr[rd] & (imm << 16); 1672 set_cr0(regs, op); 1673 goto logical_done_nocc; 1674 1675 #ifdef __powerpc64__ 1676 case 30: /* rld* */ 1677 mb = ((word >> 6) & 0x1f) | (word & 0x20); 1678 val = regs->gpr[rd]; 1679 if ((word & 0x10) == 0) { 1680 sh = rb | ((word & 2) << 4); 1681 val = ROTATE(val, sh); 1682 switch ((word >> 2) & 3) { 1683 case 0: /* rldicl */ 1684 val &= MASK64_L(mb); 1685 break; 1686 case 1: /* rldicr */ 1687 val &= MASK64_R(mb); 1688 break; 1689 case 2: /* rldic */ 1690 val &= MASK64(mb, 63 - sh); 1691 break; 1692 case 3: /* rldimi */ 1693 imm = MASK64(mb, 63 - sh); 1694 val = (regs->gpr[ra] & ~imm) | 1695 (val & imm); 1696 } 1697 op->val = val; 1698 goto logical_done; 1699 } else { 1700 sh = regs->gpr[rb] & 0x3f; 1701 val = ROTATE(val, sh); 1702 switch ((word >> 1) & 7) { 1703 case 0: /* rldcl */ 1704 op->val = val & MASK64_L(mb); 1705 goto logical_done; 1706 case 1: /* rldcr */ 1707 op->val = val & MASK64_R(mb); 1708 goto logical_done; 1709 } 1710 } 1711 #endif 1712 op->type = UNKNOWN; /* illegal instruction */ 1713 return 0; 1714 1715 case 31: 1716 /* isel occupies 32 minor opcodes */ 1717 if (((word >> 1) & 0x1f) == 15) { 1718 mb = (word >> 6) & 0x1f; /* bc field */ 1719 val = (regs->ccr >> (31 - mb)) & 1; 1720 val2 = (ra) ? regs->gpr[ra] : 0; 1721 1722 op->val = (val) ? val2 : regs->gpr[rb]; 1723 goto compute_done; 1724 } 1725 1726 switch ((word >> 1) & 0x3ff) { 1727 case 4: /* tw */ 1728 if (rd == 0x1f || 1729 (rd & trap_compare((int)regs->gpr[ra], 1730 (int)regs->gpr[rb]))) 1731 goto trap; 1732 return 1; 1733 #ifdef __powerpc64__ 1734 case 68: /* td */ 1735 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 1736 goto trap; 1737 return 1; 1738 #endif 1739 case 83: /* mfmsr */ 1740 if (regs->msr & MSR_PR) 1741 goto priv; 1742 op->type = MFMSR; 1743 op->reg = rd; 1744 return 0; 1745 case 146: /* mtmsr */ 1746 if (regs->msr & MSR_PR) 1747 goto priv; 1748 op->type = MTMSR; 1749 op->reg = rd; 1750 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 1751 return 0; 1752 #ifdef CONFIG_PPC64 1753 case 178: /* mtmsrd */ 1754 if (regs->msr & MSR_PR) 1755 goto priv; 1756 op->type = MTMSR; 1757 op->reg = rd; 1758 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 1759 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 1760 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 1761 op->val = imm; 1762 return 0; 1763 #endif 1764 1765 case 19: /* mfcr */ 1766 imm = 0xffffffffUL; 1767 if ((word >> 20) & 1) { 1768 imm = 0xf0000000UL; 1769 for (sh = 0; sh < 8; ++sh) { 1770 if (word & (0x80000 >> sh)) 1771 break; 1772 imm >>= 4; 1773 } 1774 } 1775 op->val = regs->ccr & imm; 1776 goto compute_done; 1777 1778 case 128: /* setb */ 1779 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1780 goto unknown_opcode; 1781 /* 1782 * 'ra' encodes the CR field number (bfa) in the top 3 bits. 1783 * Since each CR field is 4 bits, 1784 * we can simply mask off the bottom two bits (bfa * 4) 1785 * to yield the first bit in the CR field. 1786 */ 1787 ra = ra & ~0x3; 1788 /* 'val' stores bits of the CR field (bfa) */ 1789 val = regs->ccr >> (CR0_SHIFT - ra); 1790 /* checks if the LT bit of CR field (bfa) is set */ 1791 if (val & 8) 1792 op->val = -1; 1793 /* checks if the GT bit of CR field (bfa) is set */ 1794 else if (val & 4) 1795 op->val = 1; 1796 else 1797 op->val = 0; 1798 goto compute_done; 1799 1800 case 144: /* mtcrf */ 1801 op->type = COMPUTE + SETCC; 1802 imm = 0xf0000000UL; 1803 val = regs->gpr[rd]; 1804 op->ccval = regs->ccr; 1805 for (sh = 0; sh < 8; ++sh) { 1806 if (word & (0x80000 >> sh)) 1807 op->ccval = (op->ccval & ~imm) | 1808 (val & imm); 1809 imm >>= 4; 1810 } 1811 return 1; 1812 1813 case 339: /* mfspr */ 1814 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1815 op->type = MFSPR; 1816 op->reg = rd; 1817 op->spr = spr; 1818 if (spr == SPRN_XER || spr == SPRN_LR || 1819 spr == SPRN_CTR) 1820 return 1; 1821 return 0; 1822 1823 case 467: /* mtspr */ 1824 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1825 op->type = MTSPR; 1826 op->val = regs->gpr[rd]; 1827 op->spr = spr; 1828 if (spr == SPRN_XER || spr == SPRN_LR || 1829 spr == SPRN_CTR) 1830 return 1; 1831 return 0; 1832 1833 /* 1834 * Compare instructions 1835 */ 1836 case 0: /* cmp */ 1837 val = regs->gpr[ra]; 1838 val2 = regs->gpr[rb]; 1839 #ifdef __powerpc64__ 1840 if ((rd & 1) == 0) { 1841 /* word (32-bit) compare */ 1842 val = (int) val; 1843 val2 = (int) val2; 1844 } 1845 #endif 1846 do_cmp_signed(regs, op, val, val2, rd >> 2); 1847 return 1; 1848 1849 case 32: /* cmpl */ 1850 val = regs->gpr[ra]; 1851 val2 = regs->gpr[rb]; 1852 #ifdef __powerpc64__ 1853 if ((rd & 1) == 0) { 1854 /* word (32-bit) compare */ 1855 val = (unsigned int) val; 1856 val2 = (unsigned int) val2; 1857 } 1858 #endif 1859 do_cmp_unsigned(regs, op, val, val2, rd >> 2); 1860 return 1; 1861 1862 case 508: /* cmpb */ 1863 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); 1864 goto logical_done_nocc; 1865 1866 /* 1867 * Arithmetic instructions 1868 */ 1869 case 8: /* subfc */ 1870 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1871 regs->gpr[rb], 1); 1872 goto arith_done; 1873 #ifdef __powerpc64__ 1874 case 9: /* mulhdu */ 1875 asm("mulhdu %0,%1,%2" : "=r" (op->val) : 1876 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1877 goto arith_done; 1878 #endif 1879 case 10: /* addc */ 1880 add_with_carry(regs, op, rd, regs->gpr[ra], 1881 regs->gpr[rb], 0); 1882 goto arith_done; 1883 1884 case 11: /* mulhwu */ 1885 asm("mulhwu %0,%1,%2" : "=r" (op->val) : 1886 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1887 goto arith_done; 1888 1889 case 40: /* subf */ 1890 op->val = regs->gpr[rb] - regs->gpr[ra]; 1891 goto arith_done; 1892 #ifdef __powerpc64__ 1893 case 73: /* mulhd */ 1894 asm("mulhd %0,%1,%2" : "=r" (op->val) : 1895 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1896 goto arith_done; 1897 #endif 1898 case 75: /* mulhw */ 1899 asm("mulhw %0,%1,%2" : "=r" (op->val) : 1900 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1901 goto arith_done; 1902 1903 case 104: /* neg */ 1904 op->val = -regs->gpr[ra]; 1905 goto arith_done; 1906 1907 case 136: /* subfe */ 1908 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1909 regs->gpr[rb], regs->xer & XER_CA); 1910 goto arith_done; 1911 1912 case 138: /* adde */ 1913 add_with_carry(regs, op, rd, regs->gpr[ra], 1914 regs->gpr[rb], regs->xer & XER_CA); 1915 goto arith_done; 1916 1917 case 200: /* subfze */ 1918 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, 1919 regs->xer & XER_CA); 1920 goto arith_done; 1921 1922 case 202: /* addze */ 1923 add_with_carry(regs, op, rd, regs->gpr[ra], 0L, 1924 regs->xer & XER_CA); 1925 goto arith_done; 1926 1927 case 232: /* subfme */ 1928 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, 1929 regs->xer & XER_CA); 1930 goto arith_done; 1931 #ifdef __powerpc64__ 1932 case 233: /* mulld */ 1933 op->val = regs->gpr[ra] * regs->gpr[rb]; 1934 goto arith_done; 1935 #endif 1936 case 234: /* addme */ 1937 add_with_carry(regs, op, rd, regs->gpr[ra], -1L, 1938 regs->xer & XER_CA); 1939 goto arith_done; 1940 1941 case 235: /* mullw */ 1942 op->val = (long)(int) regs->gpr[ra] * 1943 (int) regs->gpr[rb]; 1944 1945 goto arith_done; 1946 #ifdef __powerpc64__ 1947 case 265: /* modud */ 1948 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1949 goto unknown_opcode; 1950 op->val = regs->gpr[ra] % regs->gpr[rb]; 1951 goto compute_done; 1952 #endif 1953 case 266: /* add */ 1954 op->val = regs->gpr[ra] + regs->gpr[rb]; 1955 goto arith_done; 1956 1957 case 267: /* moduw */ 1958 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1959 goto unknown_opcode; 1960 op->val = (unsigned int) regs->gpr[ra] % 1961 (unsigned int) regs->gpr[rb]; 1962 goto compute_done; 1963 #ifdef __powerpc64__ 1964 case 457: /* divdu */ 1965 op->val = regs->gpr[ra] / regs->gpr[rb]; 1966 goto arith_done; 1967 #endif 1968 case 459: /* divwu */ 1969 op->val = (unsigned int) regs->gpr[ra] / 1970 (unsigned int) regs->gpr[rb]; 1971 goto arith_done; 1972 #ifdef __powerpc64__ 1973 case 489: /* divd */ 1974 op->val = (long int) regs->gpr[ra] / 1975 (long int) regs->gpr[rb]; 1976 goto arith_done; 1977 #endif 1978 case 491: /* divw */ 1979 op->val = (int) regs->gpr[ra] / 1980 (int) regs->gpr[rb]; 1981 goto arith_done; 1982 #ifdef __powerpc64__ 1983 case 425: /* divde[.] */ 1984 asm volatile(PPC_DIVDE(%0, %1, %2) : 1985 "=r" (op->val) : "r" (regs->gpr[ra]), 1986 "r" (regs->gpr[rb])); 1987 goto arith_done; 1988 case 393: /* divdeu[.] */ 1989 asm volatile(PPC_DIVDEU(%0, %1, %2) : 1990 "=r" (op->val) : "r" (regs->gpr[ra]), 1991 "r" (regs->gpr[rb])); 1992 goto arith_done; 1993 #endif 1994 case 755: /* darn */ 1995 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1996 goto unknown_opcode; 1997 switch (ra & 0x3) { 1998 case 0: 1999 /* 32-bit conditioned */ 2000 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); 2001 goto compute_done; 2002 2003 case 1: 2004 /* 64-bit conditioned */ 2005 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); 2006 goto compute_done; 2007 2008 case 2: 2009 /* 64-bit raw */ 2010 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); 2011 goto compute_done; 2012 } 2013 2014 goto unknown_opcode; 2015 #ifdef __powerpc64__ 2016 case 777: /* modsd */ 2017 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2018 goto unknown_opcode; 2019 op->val = (long int) regs->gpr[ra] % 2020 (long int) regs->gpr[rb]; 2021 goto compute_done; 2022 #endif 2023 case 779: /* modsw */ 2024 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2025 goto unknown_opcode; 2026 op->val = (int) regs->gpr[ra] % 2027 (int) regs->gpr[rb]; 2028 goto compute_done; 2029 2030 2031 /* 2032 * Logical instructions 2033 */ 2034 case 26: /* cntlzw */ 2035 val = (unsigned int) regs->gpr[rd]; 2036 op->val = ( val ? __builtin_clz(val) : 32 ); 2037 goto logical_done; 2038 #ifdef __powerpc64__ 2039 case 58: /* cntlzd */ 2040 val = regs->gpr[rd]; 2041 op->val = ( val ? __builtin_clzl(val) : 64 ); 2042 goto logical_done; 2043 #endif 2044 case 28: /* and */ 2045 op->val = regs->gpr[rd] & regs->gpr[rb]; 2046 goto logical_done; 2047 2048 case 60: /* andc */ 2049 op->val = regs->gpr[rd] & ~regs->gpr[rb]; 2050 goto logical_done; 2051 2052 case 122: /* popcntb */ 2053 do_popcnt(regs, op, regs->gpr[rd], 8); 2054 goto logical_done_nocc; 2055 2056 case 124: /* nor */ 2057 op->val = ~(regs->gpr[rd] | regs->gpr[rb]); 2058 goto logical_done; 2059 2060 case 154: /* prtyw */ 2061 do_prty(regs, op, regs->gpr[rd], 32); 2062 goto logical_done_nocc; 2063 2064 case 186: /* prtyd */ 2065 do_prty(regs, op, regs->gpr[rd], 64); 2066 goto logical_done_nocc; 2067 #ifdef CONFIG_PPC64 2068 case 252: /* bpermd */ 2069 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); 2070 goto logical_done_nocc; 2071 #endif 2072 case 284: /* xor */ 2073 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); 2074 goto logical_done; 2075 2076 case 316: /* xor */ 2077 op->val = regs->gpr[rd] ^ regs->gpr[rb]; 2078 goto logical_done; 2079 2080 case 378: /* popcntw */ 2081 do_popcnt(regs, op, regs->gpr[rd], 32); 2082 goto logical_done_nocc; 2083 2084 case 412: /* orc */ 2085 op->val = regs->gpr[rd] | ~regs->gpr[rb]; 2086 goto logical_done; 2087 2088 case 444: /* or */ 2089 op->val = regs->gpr[rd] | regs->gpr[rb]; 2090 goto logical_done; 2091 2092 case 476: /* nand */ 2093 op->val = ~(regs->gpr[rd] & regs->gpr[rb]); 2094 goto logical_done; 2095 #ifdef CONFIG_PPC64 2096 case 506: /* popcntd */ 2097 do_popcnt(regs, op, regs->gpr[rd], 64); 2098 goto logical_done_nocc; 2099 #endif 2100 case 538: /* cnttzw */ 2101 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2102 goto unknown_opcode; 2103 val = (unsigned int) regs->gpr[rd]; 2104 op->val = (val ? __builtin_ctz(val) : 32); 2105 goto logical_done; 2106 #ifdef __powerpc64__ 2107 case 570: /* cnttzd */ 2108 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2109 goto unknown_opcode; 2110 val = regs->gpr[rd]; 2111 op->val = (val ? __builtin_ctzl(val) : 64); 2112 goto logical_done; 2113 #endif 2114 case 922: /* extsh */ 2115 op->val = (signed short) regs->gpr[rd]; 2116 goto logical_done; 2117 2118 case 954: /* extsb */ 2119 op->val = (signed char) regs->gpr[rd]; 2120 goto logical_done; 2121 #ifdef __powerpc64__ 2122 case 986: /* extsw */ 2123 op->val = (signed int) regs->gpr[rd]; 2124 goto logical_done; 2125 #endif 2126 2127 /* 2128 * Shift instructions 2129 */ 2130 case 24: /* slw */ 2131 sh = regs->gpr[rb] & 0x3f; 2132 if (sh < 32) 2133 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; 2134 else 2135 op->val = 0; 2136 goto logical_done; 2137 2138 case 536: /* srw */ 2139 sh = regs->gpr[rb] & 0x3f; 2140 if (sh < 32) 2141 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; 2142 else 2143 op->val = 0; 2144 goto logical_done; 2145 2146 case 792: /* sraw */ 2147 op->type = COMPUTE + SETREG + SETXER; 2148 sh = regs->gpr[rb] & 0x3f; 2149 ival = (signed int) regs->gpr[rd]; 2150 op->val = ival >> (sh < 32 ? sh : 31); 2151 op->xerval = regs->xer; 2152 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 2153 op->xerval |= XER_CA; 2154 else 2155 op->xerval &= ~XER_CA; 2156 set_ca32(op, op->xerval & XER_CA); 2157 goto logical_done; 2158 2159 case 824: /* srawi */ 2160 op->type = COMPUTE + SETREG + SETXER; 2161 sh = rb; 2162 ival = (signed int) regs->gpr[rd]; 2163 op->val = ival >> sh; 2164 op->xerval = regs->xer; 2165 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2166 op->xerval |= XER_CA; 2167 else 2168 op->xerval &= ~XER_CA; 2169 set_ca32(op, op->xerval & XER_CA); 2170 goto logical_done; 2171 2172 #ifdef __powerpc64__ 2173 case 27: /* sld */ 2174 sh = regs->gpr[rb] & 0x7f; 2175 if (sh < 64) 2176 op->val = regs->gpr[rd] << sh; 2177 else 2178 op->val = 0; 2179 goto logical_done; 2180 2181 case 539: /* srd */ 2182 sh = regs->gpr[rb] & 0x7f; 2183 if (sh < 64) 2184 op->val = regs->gpr[rd] >> sh; 2185 else 2186 op->val = 0; 2187 goto logical_done; 2188 2189 case 794: /* srad */ 2190 op->type = COMPUTE + SETREG + SETXER; 2191 sh = regs->gpr[rb] & 0x7f; 2192 ival = (signed long int) regs->gpr[rd]; 2193 op->val = ival >> (sh < 64 ? sh : 63); 2194 op->xerval = regs->xer; 2195 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 2196 op->xerval |= XER_CA; 2197 else 2198 op->xerval &= ~XER_CA; 2199 set_ca32(op, op->xerval & XER_CA); 2200 goto logical_done; 2201 2202 case 826: /* sradi with sh_5 = 0 */ 2203 case 827: /* sradi with sh_5 = 1 */ 2204 op->type = COMPUTE + SETREG + SETXER; 2205 sh = rb | ((word & 2) << 4); 2206 ival = (signed long int) regs->gpr[rd]; 2207 op->val = ival >> sh; 2208 op->xerval = regs->xer; 2209 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2210 op->xerval |= XER_CA; 2211 else 2212 op->xerval &= ~XER_CA; 2213 set_ca32(op, op->xerval & XER_CA); 2214 goto logical_done; 2215 2216 case 890: /* extswsli with sh_5 = 0 */ 2217 case 891: /* extswsli with sh_5 = 1 */ 2218 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2219 goto unknown_opcode; 2220 op->type = COMPUTE + SETREG; 2221 sh = rb | ((word & 2) << 4); 2222 val = (signed int) regs->gpr[rd]; 2223 if (sh) 2224 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); 2225 else 2226 op->val = val; 2227 goto logical_done; 2228 2229 #endif /* __powerpc64__ */ 2230 2231 /* 2232 * Cache instructions 2233 */ 2234 case 54: /* dcbst */ 2235 op->type = MKOP(CACHEOP, DCBST, 0); 2236 op->ea = xform_ea(word, regs); 2237 return 0; 2238 2239 case 86: /* dcbf */ 2240 op->type = MKOP(CACHEOP, DCBF, 0); 2241 op->ea = xform_ea(word, regs); 2242 return 0; 2243 2244 case 246: /* dcbtst */ 2245 op->type = MKOP(CACHEOP, DCBTST, 0); 2246 op->ea = xform_ea(word, regs); 2247 op->reg = rd; 2248 return 0; 2249 2250 case 278: /* dcbt */ 2251 op->type = MKOP(CACHEOP, DCBTST, 0); 2252 op->ea = xform_ea(word, regs); 2253 op->reg = rd; 2254 return 0; 2255 2256 case 982: /* icbi */ 2257 op->type = MKOP(CACHEOP, ICBI, 0); 2258 op->ea = xform_ea(word, regs); 2259 return 0; 2260 2261 case 1014: /* dcbz */ 2262 op->type = MKOP(CACHEOP, DCBZ, 0); 2263 op->ea = xform_ea(word, regs); 2264 return 0; 2265 } 2266 break; 2267 } 2268 2269 /* 2270 * Loads and stores. 2271 */ 2272 op->type = UNKNOWN; 2273 op->update_reg = ra; 2274 op->reg = rd; 2275 op->val = regs->gpr[rd]; 2276 u = (word >> 20) & UPDATE; 2277 op->vsx_flags = 0; 2278 2279 switch (opcode) { 2280 case 31: 2281 u = word & UPDATE; 2282 op->ea = xform_ea(word, regs); 2283 switch ((word >> 1) & 0x3ff) { 2284 case 20: /* lwarx */ 2285 op->type = MKOP(LARX, 0, 4); 2286 break; 2287 2288 case 150: /* stwcx. */ 2289 op->type = MKOP(STCX, 0, 4); 2290 break; 2291 2292 #ifdef __powerpc64__ 2293 case 84: /* ldarx */ 2294 op->type = MKOP(LARX, 0, 8); 2295 break; 2296 2297 case 214: /* stdcx. */ 2298 op->type = MKOP(STCX, 0, 8); 2299 break; 2300 2301 case 52: /* lbarx */ 2302 op->type = MKOP(LARX, 0, 1); 2303 break; 2304 2305 case 694: /* stbcx. */ 2306 op->type = MKOP(STCX, 0, 1); 2307 break; 2308 2309 case 116: /* lharx */ 2310 op->type = MKOP(LARX, 0, 2); 2311 break; 2312 2313 case 726: /* sthcx. */ 2314 op->type = MKOP(STCX, 0, 2); 2315 break; 2316 2317 case 276: /* lqarx */ 2318 if (!((rd & 1) || rd == ra || rd == rb)) 2319 op->type = MKOP(LARX, 0, 16); 2320 break; 2321 2322 case 182: /* stqcx. */ 2323 if (!(rd & 1)) 2324 op->type = MKOP(STCX, 0, 16); 2325 break; 2326 #endif 2327 2328 case 23: /* lwzx */ 2329 case 55: /* lwzux */ 2330 op->type = MKOP(LOAD, u, 4); 2331 break; 2332 2333 case 87: /* lbzx */ 2334 case 119: /* lbzux */ 2335 op->type = MKOP(LOAD, u, 1); 2336 break; 2337 2338 #ifdef CONFIG_ALTIVEC 2339 /* 2340 * Note: for the load/store vector element instructions, 2341 * bits of the EA say which field of the VMX register to use. 2342 */ 2343 case 7: /* lvebx */ 2344 op->type = MKOP(LOAD_VMX, 0, 1); 2345 op->element_size = 1; 2346 break; 2347 2348 case 39: /* lvehx */ 2349 op->type = MKOP(LOAD_VMX, 0, 2); 2350 op->element_size = 2; 2351 break; 2352 2353 case 71: /* lvewx */ 2354 op->type = MKOP(LOAD_VMX, 0, 4); 2355 op->element_size = 4; 2356 break; 2357 2358 case 103: /* lvx */ 2359 case 359: /* lvxl */ 2360 op->type = MKOP(LOAD_VMX, 0, 16); 2361 op->element_size = 16; 2362 break; 2363 2364 case 135: /* stvebx */ 2365 op->type = MKOP(STORE_VMX, 0, 1); 2366 op->element_size = 1; 2367 break; 2368 2369 case 167: /* stvehx */ 2370 op->type = MKOP(STORE_VMX, 0, 2); 2371 op->element_size = 2; 2372 break; 2373 2374 case 199: /* stvewx */ 2375 op->type = MKOP(STORE_VMX, 0, 4); 2376 op->element_size = 4; 2377 break; 2378 2379 case 231: /* stvx */ 2380 case 487: /* stvxl */ 2381 op->type = MKOP(STORE_VMX, 0, 16); 2382 break; 2383 #endif /* CONFIG_ALTIVEC */ 2384 2385 #ifdef __powerpc64__ 2386 case 21: /* ldx */ 2387 case 53: /* ldux */ 2388 op->type = MKOP(LOAD, u, 8); 2389 break; 2390 2391 case 149: /* stdx */ 2392 case 181: /* stdux */ 2393 op->type = MKOP(STORE, u, 8); 2394 break; 2395 #endif 2396 2397 case 151: /* stwx */ 2398 case 183: /* stwux */ 2399 op->type = MKOP(STORE, u, 4); 2400 break; 2401 2402 case 215: /* stbx */ 2403 case 247: /* stbux */ 2404 op->type = MKOP(STORE, u, 1); 2405 break; 2406 2407 case 279: /* lhzx */ 2408 case 311: /* lhzux */ 2409 op->type = MKOP(LOAD, u, 2); 2410 break; 2411 2412 #ifdef __powerpc64__ 2413 case 341: /* lwax */ 2414 case 373: /* lwaux */ 2415 op->type = MKOP(LOAD, SIGNEXT | u, 4); 2416 break; 2417 #endif 2418 2419 case 343: /* lhax */ 2420 case 375: /* lhaux */ 2421 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2422 break; 2423 2424 case 407: /* sthx */ 2425 case 439: /* sthux */ 2426 op->type = MKOP(STORE, u, 2); 2427 break; 2428 2429 #ifdef __powerpc64__ 2430 case 532: /* ldbrx */ 2431 op->type = MKOP(LOAD, BYTEREV, 8); 2432 break; 2433 2434 #endif 2435 case 533: /* lswx */ 2436 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 2437 break; 2438 2439 case 534: /* lwbrx */ 2440 op->type = MKOP(LOAD, BYTEREV, 4); 2441 break; 2442 2443 case 597: /* lswi */ 2444 if (rb == 0) 2445 rb = 32; /* # bytes to load */ 2446 op->type = MKOP(LOAD_MULTI, 0, rb); 2447 op->ea = ra ? regs->gpr[ra] : 0; 2448 break; 2449 2450 #ifdef CONFIG_PPC_FPU 2451 case 535: /* lfsx */ 2452 case 567: /* lfsux */ 2453 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2454 break; 2455 2456 case 599: /* lfdx */ 2457 case 631: /* lfdux */ 2458 op->type = MKOP(LOAD_FP, u, 8); 2459 break; 2460 2461 case 663: /* stfsx */ 2462 case 695: /* stfsux */ 2463 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2464 break; 2465 2466 case 727: /* stfdx */ 2467 case 759: /* stfdux */ 2468 op->type = MKOP(STORE_FP, u, 8); 2469 break; 2470 2471 #ifdef __powerpc64__ 2472 case 791: /* lfdpx */ 2473 op->type = MKOP(LOAD_FP, 0, 16); 2474 break; 2475 2476 case 855: /* lfiwax */ 2477 op->type = MKOP(LOAD_FP, SIGNEXT, 4); 2478 break; 2479 2480 case 887: /* lfiwzx */ 2481 op->type = MKOP(LOAD_FP, 0, 4); 2482 break; 2483 2484 case 919: /* stfdpx */ 2485 op->type = MKOP(STORE_FP, 0, 16); 2486 break; 2487 2488 case 983: /* stfiwx */ 2489 op->type = MKOP(STORE_FP, 0, 4); 2490 break; 2491 #endif /* __powerpc64 */ 2492 #endif /* CONFIG_PPC_FPU */ 2493 2494 #ifdef __powerpc64__ 2495 case 660: /* stdbrx */ 2496 op->type = MKOP(STORE, BYTEREV, 8); 2497 op->val = byterev_8(regs->gpr[rd]); 2498 break; 2499 2500 #endif 2501 case 661: /* stswx */ 2502 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 2503 break; 2504 2505 case 662: /* stwbrx */ 2506 op->type = MKOP(STORE, BYTEREV, 4); 2507 op->val = byterev_4(regs->gpr[rd]); 2508 break; 2509 2510 case 725: /* stswi */ 2511 if (rb == 0) 2512 rb = 32; /* # bytes to store */ 2513 op->type = MKOP(STORE_MULTI, 0, rb); 2514 op->ea = ra ? regs->gpr[ra] : 0; 2515 break; 2516 2517 case 790: /* lhbrx */ 2518 op->type = MKOP(LOAD, BYTEREV, 2); 2519 break; 2520 2521 case 918: /* sthbrx */ 2522 op->type = MKOP(STORE, BYTEREV, 2); 2523 op->val = byterev_2(regs->gpr[rd]); 2524 break; 2525 2526 #ifdef CONFIG_VSX 2527 case 12: /* lxsiwzx */ 2528 op->reg = rd | ((word & 1) << 5); 2529 op->type = MKOP(LOAD_VSX, 0, 4); 2530 op->element_size = 8; 2531 break; 2532 2533 case 76: /* lxsiwax */ 2534 op->reg = rd | ((word & 1) << 5); 2535 op->type = MKOP(LOAD_VSX, SIGNEXT, 4); 2536 op->element_size = 8; 2537 break; 2538 2539 case 140: /* stxsiwx */ 2540 op->reg = rd | ((word & 1) << 5); 2541 op->type = MKOP(STORE_VSX, 0, 4); 2542 op->element_size = 8; 2543 break; 2544 2545 case 268: /* lxvx */ 2546 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2547 goto unknown_opcode; 2548 op->reg = rd | ((word & 1) << 5); 2549 op->type = MKOP(LOAD_VSX, 0, 16); 2550 op->element_size = 16; 2551 op->vsx_flags = VSX_CHECK_VEC; 2552 break; 2553 2554 case 269: /* lxvl */ 2555 case 301: { /* lxvll */ 2556 int nb; 2557 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2558 goto unknown_opcode; 2559 op->reg = rd | ((word & 1) << 5); 2560 op->ea = ra ? regs->gpr[ra] : 0; 2561 nb = regs->gpr[rb] & 0xff; 2562 if (nb > 16) 2563 nb = 16; 2564 op->type = MKOP(LOAD_VSX, 0, nb); 2565 op->element_size = 16; 2566 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2567 VSX_CHECK_VEC; 2568 break; 2569 } 2570 case 332: /* lxvdsx */ 2571 op->reg = rd | ((word & 1) << 5); 2572 op->type = MKOP(LOAD_VSX, 0, 8); 2573 op->element_size = 8; 2574 op->vsx_flags = VSX_SPLAT; 2575 break; 2576 2577 case 333: /* lxvpx */ 2578 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2579 goto unknown_opcode; 2580 op->reg = VSX_REGISTER_XTP(rd); 2581 op->type = MKOP(LOAD_VSX, 0, 32); 2582 op->element_size = 32; 2583 break; 2584 2585 case 364: /* lxvwsx */ 2586 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2587 goto unknown_opcode; 2588 op->reg = rd | ((word & 1) << 5); 2589 op->type = MKOP(LOAD_VSX, 0, 4); 2590 op->element_size = 4; 2591 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; 2592 break; 2593 2594 case 396: /* stxvx */ 2595 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2596 goto unknown_opcode; 2597 op->reg = rd | ((word & 1) << 5); 2598 op->type = MKOP(STORE_VSX, 0, 16); 2599 op->element_size = 16; 2600 op->vsx_flags = VSX_CHECK_VEC; 2601 break; 2602 2603 case 397: /* stxvl */ 2604 case 429: { /* stxvll */ 2605 int nb; 2606 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2607 goto unknown_opcode; 2608 op->reg = rd | ((word & 1) << 5); 2609 op->ea = ra ? regs->gpr[ra] : 0; 2610 nb = regs->gpr[rb] & 0xff; 2611 if (nb > 16) 2612 nb = 16; 2613 op->type = MKOP(STORE_VSX, 0, nb); 2614 op->element_size = 16; 2615 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2616 VSX_CHECK_VEC; 2617 break; 2618 } 2619 case 461: /* stxvpx */ 2620 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2621 goto unknown_opcode; 2622 op->reg = VSX_REGISTER_XTP(rd); 2623 op->type = MKOP(STORE_VSX, 0, 32); 2624 op->element_size = 32; 2625 break; 2626 case 524: /* lxsspx */ 2627 op->reg = rd | ((word & 1) << 5); 2628 op->type = MKOP(LOAD_VSX, 0, 4); 2629 op->element_size = 8; 2630 op->vsx_flags = VSX_FPCONV; 2631 break; 2632 2633 case 588: /* lxsdx */ 2634 op->reg = rd | ((word & 1) << 5); 2635 op->type = MKOP(LOAD_VSX, 0, 8); 2636 op->element_size = 8; 2637 break; 2638 2639 case 652: /* stxsspx */ 2640 op->reg = rd | ((word & 1) << 5); 2641 op->type = MKOP(STORE_VSX, 0, 4); 2642 op->element_size = 8; 2643 op->vsx_flags = VSX_FPCONV; 2644 break; 2645 2646 case 716: /* stxsdx */ 2647 op->reg = rd | ((word & 1) << 5); 2648 op->type = MKOP(STORE_VSX, 0, 8); 2649 op->element_size = 8; 2650 break; 2651 2652 case 780: /* lxvw4x */ 2653 op->reg = rd | ((word & 1) << 5); 2654 op->type = MKOP(LOAD_VSX, 0, 16); 2655 op->element_size = 4; 2656 break; 2657 2658 case 781: /* lxsibzx */ 2659 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2660 goto unknown_opcode; 2661 op->reg = rd | ((word & 1) << 5); 2662 op->type = MKOP(LOAD_VSX, 0, 1); 2663 op->element_size = 8; 2664 op->vsx_flags = VSX_CHECK_VEC; 2665 break; 2666 2667 case 812: /* lxvh8x */ 2668 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2669 goto unknown_opcode; 2670 op->reg = rd | ((word & 1) << 5); 2671 op->type = MKOP(LOAD_VSX, 0, 16); 2672 op->element_size = 2; 2673 op->vsx_flags = VSX_CHECK_VEC; 2674 break; 2675 2676 case 813: /* lxsihzx */ 2677 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2678 goto unknown_opcode; 2679 op->reg = rd | ((word & 1) << 5); 2680 op->type = MKOP(LOAD_VSX, 0, 2); 2681 op->element_size = 8; 2682 op->vsx_flags = VSX_CHECK_VEC; 2683 break; 2684 2685 case 844: /* lxvd2x */ 2686 op->reg = rd | ((word & 1) << 5); 2687 op->type = MKOP(LOAD_VSX, 0, 16); 2688 op->element_size = 8; 2689 break; 2690 2691 case 876: /* lxvb16x */ 2692 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2693 goto unknown_opcode; 2694 op->reg = rd | ((word & 1) << 5); 2695 op->type = MKOP(LOAD_VSX, 0, 16); 2696 op->element_size = 1; 2697 op->vsx_flags = VSX_CHECK_VEC; 2698 break; 2699 2700 case 908: /* stxvw4x */ 2701 op->reg = rd | ((word & 1) << 5); 2702 op->type = MKOP(STORE_VSX, 0, 16); 2703 op->element_size = 4; 2704 break; 2705 2706 case 909: /* stxsibx */ 2707 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2708 goto unknown_opcode; 2709 op->reg = rd | ((word & 1) << 5); 2710 op->type = MKOP(STORE_VSX, 0, 1); 2711 op->element_size = 8; 2712 op->vsx_flags = VSX_CHECK_VEC; 2713 break; 2714 2715 case 940: /* stxvh8x */ 2716 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2717 goto unknown_opcode; 2718 op->reg = rd | ((word & 1) << 5); 2719 op->type = MKOP(STORE_VSX, 0, 16); 2720 op->element_size = 2; 2721 op->vsx_flags = VSX_CHECK_VEC; 2722 break; 2723 2724 case 941: /* stxsihx */ 2725 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2726 goto unknown_opcode; 2727 op->reg = rd | ((word & 1) << 5); 2728 op->type = MKOP(STORE_VSX, 0, 2); 2729 op->element_size = 8; 2730 op->vsx_flags = VSX_CHECK_VEC; 2731 break; 2732 2733 case 972: /* stxvd2x */ 2734 op->reg = rd | ((word & 1) << 5); 2735 op->type = MKOP(STORE_VSX, 0, 16); 2736 op->element_size = 8; 2737 break; 2738 2739 case 1004: /* stxvb16x */ 2740 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2741 goto unknown_opcode; 2742 op->reg = rd | ((word & 1) << 5); 2743 op->type = MKOP(STORE_VSX, 0, 16); 2744 op->element_size = 1; 2745 op->vsx_flags = VSX_CHECK_VEC; 2746 break; 2747 2748 #endif /* CONFIG_VSX */ 2749 } 2750 break; 2751 2752 case 32: /* lwz */ 2753 case 33: /* lwzu */ 2754 op->type = MKOP(LOAD, u, 4); 2755 op->ea = dform_ea(word, regs); 2756 break; 2757 2758 case 34: /* lbz */ 2759 case 35: /* lbzu */ 2760 op->type = MKOP(LOAD, u, 1); 2761 op->ea = dform_ea(word, regs); 2762 break; 2763 2764 case 36: /* stw */ 2765 case 37: /* stwu */ 2766 op->type = MKOP(STORE, u, 4); 2767 op->ea = dform_ea(word, regs); 2768 break; 2769 2770 case 38: /* stb */ 2771 case 39: /* stbu */ 2772 op->type = MKOP(STORE, u, 1); 2773 op->ea = dform_ea(word, regs); 2774 break; 2775 2776 case 40: /* lhz */ 2777 case 41: /* lhzu */ 2778 op->type = MKOP(LOAD, u, 2); 2779 op->ea = dform_ea(word, regs); 2780 break; 2781 2782 case 42: /* lha */ 2783 case 43: /* lhau */ 2784 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2785 op->ea = dform_ea(word, regs); 2786 break; 2787 2788 case 44: /* sth */ 2789 case 45: /* sthu */ 2790 op->type = MKOP(STORE, u, 2); 2791 op->ea = dform_ea(word, regs); 2792 break; 2793 2794 case 46: /* lmw */ 2795 if (ra >= rd) 2796 break; /* invalid form, ra in range to load */ 2797 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 2798 op->ea = dform_ea(word, regs); 2799 break; 2800 2801 case 47: /* stmw */ 2802 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 2803 op->ea = dform_ea(word, regs); 2804 break; 2805 2806 #ifdef CONFIG_PPC_FPU 2807 case 48: /* lfs */ 2808 case 49: /* lfsu */ 2809 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2810 op->ea = dform_ea(word, regs); 2811 break; 2812 2813 case 50: /* lfd */ 2814 case 51: /* lfdu */ 2815 op->type = MKOP(LOAD_FP, u, 8); 2816 op->ea = dform_ea(word, regs); 2817 break; 2818 2819 case 52: /* stfs */ 2820 case 53: /* stfsu */ 2821 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2822 op->ea = dform_ea(word, regs); 2823 break; 2824 2825 case 54: /* stfd */ 2826 case 55: /* stfdu */ 2827 op->type = MKOP(STORE_FP, u, 8); 2828 op->ea = dform_ea(word, regs); 2829 break; 2830 #endif 2831 2832 #ifdef __powerpc64__ 2833 case 56: /* lq */ 2834 if (!((rd & 1) || (rd == ra))) 2835 op->type = MKOP(LOAD, 0, 16); 2836 op->ea = dqform_ea(word, regs); 2837 break; 2838 #endif 2839 2840 #ifdef CONFIG_VSX 2841 case 57: /* lfdp, lxsd, lxssp */ 2842 op->ea = dsform_ea(word, regs); 2843 switch (word & 3) { 2844 case 0: /* lfdp */ 2845 if (rd & 1) 2846 break; /* reg must be even */ 2847 op->type = MKOP(LOAD_FP, 0, 16); 2848 break; 2849 case 2: /* lxsd */ 2850 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2851 goto unknown_opcode; 2852 op->reg = rd + 32; 2853 op->type = MKOP(LOAD_VSX, 0, 8); 2854 op->element_size = 8; 2855 op->vsx_flags = VSX_CHECK_VEC; 2856 break; 2857 case 3: /* lxssp */ 2858 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2859 goto unknown_opcode; 2860 op->reg = rd + 32; 2861 op->type = MKOP(LOAD_VSX, 0, 4); 2862 op->element_size = 8; 2863 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2864 break; 2865 } 2866 break; 2867 #endif /* CONFIG_VSX */ 2868 2869 #ifdef __powerpc64__ 2870 case 58: /* ld[u], lwa */ 2871 op->ea = dsform_ea(word, regs); 2872 switch (word & 3) { 2873 case 0: /* ld */ 2874 op->type = MKOP(LOAD, 0, 8); 2875 break; 2876 case 1: /* ldu */ 2877 op->type = MKOP(LOAD, UPDATE, 8); 2878 break; 2879 case 2: /* lwa */ 2880 op->type = MKOP(LOAD, SIGNEXT, 4); 2881 break; 2882 } 2883 break; 2884 #endif 2885 2886 #ifdef CONFIG_VSX 2887 case 6: 2888 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2889 goto unknown_opcode; 2890 op->ea = dqform_ea(word, regs); 2891 op->reg = VSX_REGISTER_XTP(rd); 2892 op->element_size = 32; 2893 switch (word & 0xf) { 2894 case 0: /* lxvp */ 2895 op->type = MKOP(LOAD_VSX, 0, 32); 2896 break; 2897 case 1: /* stxvp */ 2898 op->type = MKOP(STORE_VSX, 0, 32); 2899 break; 2900 } 2901 break; 2902 2903 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ 2904 switch (word & 7) { 2905 case 0: /* stfdp with LSB of DS field = 0 */ 2906 case 4: /* stfdp with LSB of DS field = 1 */ 2907 op->ea = dsform_ea(word, regs); 2908 op->type = MKOP(STORE_FP, 0, 16); 2909 break; 2910 2911 case 1: /* lxv */ 2912 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2913 goto unknown_opcode; 2914 op->ea = dqform_ea(word, regs); 2915 if (word & 8) 2916 op->reg = rd + 32; 2917 op->type = MKOP(LOAD_VSX, 0, 16); 2918 op->element_size = 16; 2919 op->vsx_flags = VSX_CHECK_VEC; 2920 break; 2921 2922 case 2: /* stxsd with LSB of DS field = 0 */ 2923 case 6: /* stxsd with LSB of DS field = 1 */ 2924 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2925 goto unknown_opcode; 2926 op->ea = dsform_ea(word, regs); 2927 op->reg = rd + 32; 2928 op->type = MKOP(STORE_VSX, 0, 8); 2929 op->element_size = 8; 2930 op->vsx_flags = VSX_CHECK_VEC; 2931 break; 2932 2933 case 3: /* stxssp with LSB of DS field = 0 */ 2934 case 7: /* stxssp with LSB of DS field = 1 */ 2935 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2936 goto unknown_opcode; 2937 op->ea = dsform_ea(word, regs); 2938 op->reg = rd + 32; 2939 op->type = MKOP(STORE_VSX, 0, 4); 2940 op->element_size = 8; 2941 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2942 break; 2943 2944 case 5: /* stxv */ 2945 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2946 goto unknown_opcode; 2947 op->ea = dqform_ea(word, regs); 2948 if (word & 8) 2949 op->reg = rd + 32; 2950 op->type = MKOP(STORE_VSX, 0, 16); 2951 op->element_size = 16; 2952 op->vsx_flags = VSX_CHECK_VEC; 2953 break; 2954 } 2955 break; 2956 #endif /* CONFIG_VSX */ 2957 2958 #ifdef __powerpc64__ 2959 case 62: /* std[u] */ 2960 op->ea = dsform_ea(word, regs); 2961 switch (word & 3) { 2962 case 0: /* std */ 2963 op->type = MKOP(STORE, 0, 8); 2964 break; 2965 case 1: /* stdu */ 2966 op->type = MKOP(STORE, UPDATE, 8); 2967 break; 2968 case 2: /* stq */ 2969 if (!(rd & 1)) 2970 op->type = MKOP(STORE, 0, 16); 2971 break; 2972 } 2973 break; 2974 case 1: /* Prefixed instructions */ 2975 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2976 goto unknown_opcode; 2977 2978 prefix_r = GET_PREFIX_R(word); 2979 ra = GET_PREFIX_RA(suffix); 2980 op->update_reg = ra; 2981 rd = (suffix >> 21) & 0x1f; 2982 op->reg = rd; 2983 op->val = regs->gpr[rd]; 2984 2985 suffixopcode = get_op(suffix); 2986 prefixtype = (word >> 24) & 0x3; 2987 switch (prefixtype) { 2988 case 0: /* Type 00 Eight-Byte Load/Store */ 2989 if (prefix_r && ra) 2990 break; 2991 op->ea = mlsd_8lsd_ea(word, suffix, regs); 2992 switch (suffixopcode) { 2993 case 41: /* plwa */ 2994 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); 2995 break; 2996 #ifdef CONFIG_VSX 2997 case 42: /* plxsd */ 2998 op->reg = rd + 32; 2999 op->type = MKOP(LOAD_VSX, PREFIXED, 8); 3000 op->element_size = 8; 3001 op->vsx_flags = VSX_CHECK_VEC; 3002 break; 3003 case 43: /* plxssp */ 3004 op->reg = rd + 32; 3005 op->type = MKOP(LOAD_VSX, PREFIXED, 4); 3006 op->element_size = 8; 3007 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3008 break; 3009 case 46: /* pstxsd */ 3010 op->reg = rd + 32; 3011 op->type = MKOP(STORE_VSX, PREFIXED, 8); 3012 op->element_size = 8; 3013 op->vsx_flags = VSX_CHECK_VEC; 3014 break; 3015 case 47: /* pstxssp */ 3016 op->reg = rd + 32; 3017 op->type = MKOP(STORE_VSX, PREFIXED, 4); 3018 op->element_size = 8; 3019 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 3020 break; 3021 case 51: /* plxv1 */ 3022 op->reg += 32; 3023 fallthrough; 3024 case 50: /* plxv0 */ 3025 op->type = MKOP(LOAD_VSX, PREFIXED, 16); 3026 op->element_size = 16; 3027 op->vsx_flags = VSX_CHECK_VEC; 3028 break; 3029 case 55: /* pstxv1 */ 3030 op->reg = rd + 32; 3031 fallthrough; 3032 case 54: /* pstxv0 */ 3033 op->type = MKOP(STORE_VSX, PREFIXED, 16); 3034 op->element_size = 16; 3035 op->vsx_flags = VSX_CHECK_VEC; 3036 break; 3037 #endif /* CONFIG_VSX */ 3038 case 56: /* plq */ 3039 op->type = MKOP(LOAD, PREFIXED, 16); 3040 break; 3041 case 57: /* pld */ 3042 op->type = MKOP(LOAD, PREFIXED, 8); 3043 break; 3044 #ifdef CONFIG_VSX 3045 case 58: /* plxvp */ 3046 op->reg = VSX_REGISTER_XTP(rd); 3047 op->type = MKOP(LOAD_VSX, PREFIXED, 32); 3048 op->element_size = 32; 3049 break; 3050 #endif /* CONFIG_VSX */ 3051 case 60: /* pstq */ 3052 op->type = MKOP(STORE, PREFIXED, 16); 3053 break; 3054 case 61: /* pstd */ 3055 op->type = MKOP(STORE, PREFIXED, 8); 3056 break; 3057 #ifdef CONFIG_VSX 3058 case 62: /* pstxvp */ 3059 op->reg = VSX_REGISTER_XTP(rd); 3060 op->type = MKOP(STORE_VSX, PREFIXED, 32); 3061 op->element_size = 32; 3062 break; 3063 #endif /* CONFIG_VSX */ 3064 } 3065 break; 3066 case 1: /* Type 01 Eight-Byte Register-to-Register */ 3067 break; 3068 case 2: /* Type 10 Modified Load/Store */ 3069 if (prefix_r && ra) 3070 break; 3071 op->ea = mlsd_8lsd_ea(word, suffix, regs); 3072 switch (suffixopcode) { 3073 case 32: /* plwz */ 3074 op->type = MKOP(LOAD, PREFIXED, 4); 3075 break; 3076 case 34: /* plbz */ 3077 op->type = MKOP(LOAD, PREFIXED, 1); 3078 break; 3079 case 36: /* pstw */ 3080 op->type = MKOP(STORE, PREFIXED, 4); 3081 break; 3082 case 38: /* pstb */ 3083 op->type = MKOP(STORE, PREFIXED, 1); 3084 break; 3085 case 40: /* plhz */ 3086 op->type = MKOP(LOAD, PREFIXED, 2); 3087 break; 3088 case 42: /* plha */ 3089 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); 3090 break; 3091 case 44: /* psth */ 3092 op->type = MKOP(STORE, PREFIXED, 2); 3093 break; 3094 case 48: /* plfs */ 3095 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); 3096 break; 3097 case 50: /* plfd */ 3098 op->type = MKOP(LOAD_FP, PREFIXED, 8); 3099 break; 3100 case 52: /* pstfs */ 3101 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); 3102 break; 3103 case 54: /* pstfd */ 3104 op->type = MKOP(STORE_FP, PREFIXED, 8); 3105 break; 3106 } 3107 break; 3108 case 3: /* Type 11 Modified Register-to-Register */ 3109 break; 3110 } 3111 #endif /* __powerpc64__ */ 3112 3113 } 3114 3115 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { 3116 switch (GETTYPE(op->type)) { 3117 case LOAD: 3118 if (ra == rd) 3119 goto unknown_opcode; 3120 fallthrough; 3121 case STORE: 3122 case LOAD_FP: 3123 case STORE_FP: 3124 if (ra == 0) 3125 goto unknown_opcode; 3126 } 3127 } 3128 3129 #ifdef CONFIG_VSX 3130 if ((GETTYPE(op->type) == LOAD_VSX || 3131 GETTYPE(op->type) == STORE_VSX) && 3132 !cpu_has_feature(CPU_FTR_VSX)) { 3133 return -1; 3134 } 3135 #endif /* CONFIG_VSX */ 3136 3137 return 0; 3138 3139 unknown_opcode: 3140 op->type = UNKNOWN; 3141 return 0; 3142 3143 logical_done: 3144 if (word & 1) 3145 set_cr0(regs, op); 3146 logical_done_nocc: 3147 op->reg = ra; 3148 op->type |= SETREG; 3149 return 1; 3150 3151 arith_done: 3152 if (word & 1) 3153 set_cr0(regs, op); 3154 compute_done: 3155 op->reg = rd; 3156 op->type |= SETREG; 3157 return 1; 3158 3159 priv: 3160 op->type = INTERRUPT | 0x700; 3161 op->val = SRR1_PROGPRIV; 3162 return 0; 3163 3164 trap: 3165 op->type = INTERRUPT | 0x700; 3166 op->val = SRR1_PROGTRAP; 3167 return 0; 3168 } 3169 EXPORT_SYMBOL_GPL(analyse_instr); 3170 NOKPROBE_SYMBOL(analyse_instr); 3171 3172 /* 3173 * For PPC32 we always use stwu with r1 to change the stack pointer. 3174 * So this emulated store may corrupt the exception frame, now we 3175 * have to provide the exception frame trampoline, which is pushed 3176 * below the kprobed function stack. So we only update gpr[1] but 3177 * don't emulate the real store operation. We will do real store 3178 * operation safely in exception return code by checking this flag. 3179 */ 3180 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) 3181 { 3182 /* 3183 * Check if we already set since that means we'll 3184 * lose the previous value. 3185 */ 3186 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 3187 set_thread_flag(TIF_EMULATE_STACK_STORE); 3188 return 0; 3189 } 3190 3191 static nokprobe_inline void do_signext(unsigned long *valp, int size) 3192 { 3193 switch (size) { 3194 case 2: 3195 *valp = (signed short) *valp; 3196 break; 3197 case 4: 3198 *valp = (signed int) *valp; 3199 break; 3200 } 3201 } 3202 3203 static nokprobe_inline void do_byterev(unsigned long *valp, int size) 3204 { 3205 switch (size) { 3206 case 2: 3207 *valp = byterev_2(*valp); 3208 break; 3209 case 4: 3210 *valp = byterev_4(*valp); 3211 break; 3212 #ifdef __powerpc64__ 3213 case 8: 3214 *valp = byterev_8(*valp); 3215 break; 3216 #endif 3217 } 3218 } 3219 3220 /* 3221 * Emulate an instruction that can be executed just by updating 3222 * fields in *regs. 3223 */ 3224 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) 3225 { 3226 unsigned long next_pc; 3227 3228 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); 3229 switch (GETTYPE(op->type)) { 3230 case COMPUTE: 3231 if (op->type & SETREG) 3232 regs->gpr[op->reg] = op->val; 3233 if (op->type & SETCC) 3234 regs->ccr = op->ccval; 3235 if (op->type & SETXER) 3236 regs->xer = op->xerval; 3237 break; 3238 3239 case BRANCH: 3240 if (op->type & SETLK) 3241 regs->link = next_pc; 3242 if (op->type & BRTAKEN) 3243 next_pc = op->val; 3244 if (op->type & DECCTR) 3245 --regs->ctr; 3246 break; 3247 3248 case BARRIER: 3249 switch (op->type & BARRIER_MASK) { 3250 case BARRIER_SYNC: 3251 mb(); 3252 break; 3253 case BARRIER_ISYNC: 3254 isync(); 3255 break; 3256 case BARRIER_EIEIO: 3257 eieio(); 3258 break; 3259 #ifdef CONFIG_PPC64 3260 case BARRIER_LWSYNC: 3261 asm volatile("lwsync" : : : "memory"); 3262 break; 3263 case BARRIER_PTESYNC: 3264 asm volatile("ptesync" : : : "memory"); 3265 break; 3266 #endif 3267 } 3268 break; 3269 3270 case MFSPR: 3271 switch (op->spr) { 3272 case SPRN_XER: 3273 regs->gpr[op->reg] = regs->xer & 0xffffffffUL; 3274 break; 3275 case SPRN_LR: 3276 regs->gpr[op->reg] = regs->link; 3277 break; 3278 case SPRN_CTR: 3279 regs->gpr[op->reg] = regs->ctr; 3280 break; 3281 default: 3282 WARN_ON_ONCE(1); 3283 } 3284 break; 3285 3286 case MTSPR: 3287 switch (op->spr) { 3288 case SPRN_XER: 3289 regs->xer = op->val & 0xffffffffUL; 3290 break; 3291 case SPRN_LR: 3292 regs->link = op->val; 3293 break; 3294 case SPRN_CTR: 3295 regs->ctr = op->val; 3296 break; 3297 default: 3298 WARN_ON_ONCE(1); 3299 } 3300 break; 3301 3302 default: 3303 WARN_ON_ONCE(1); 3304 } 3305 regs_set_return_ip(regs, next_pc); 3306 } 3307 NOKPROBE_SYMBOL(emulate_update_regs); 3308 3309 /* 3310 * Emulate a previously-analysed load or store instruction. 3311 * Return values are: 3312 * 0 = instruction emulated successfully 3313 * -EFAULT = address out of range or access faulted (regs->dar 3314 * contains the faulting address) 3315 * -EACCES = misaligned access, instruction requires alignment 3316 * -EINVAL = unknown operation in *op 3317 */ 3318 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) 3319 { 3320 int err, size, type; 3321 int i, rd, nb; 3322 unsigned int cr; 3323 unsigned long val; 3324 unsigned long ea; 3325 bool cross_endian; 3326 3327 err = 0; 3328 size = GETSIZE(op->type); 3329 type = GETTYPE(op->type); 3330 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 3331 ea = truncate_if_32bit(regs->msr, op->ea); 3332 3333 switch (type) { 3334 case LARX: 3335 if (ea & (size - 1)) 3336 return -EACCES; /* can't handle misaligned */ 3337 if (!address_ok(regs, ea, size)) 3338 return -EFAULT; 3339 err = 0; 3340 val = 0; 3341 switch (size) { 3342 #ifdef __powerpc64__ 3343 case 1: 3344 __get_user_asmx(val, ea, err, "lbarx"); 3345 break; 3346 case 2: 3347 __get_user_asmx(val, ea, err, "lharx"); 3348 break; 3349 #endif 3350 case 4: 3351 __get_user_asmx(val, ea, err, "lwarx"); 3352 break; 3353 #ifdef __powerpc64__ 3354 case 8: 3355 __get_user_asmx(val, ea, err, "ldarx"); 3356 break; 3357 case 16: 3358 err = do_lqarx(ea, ®s->gpr[op->reg]); 3359 break; 3360 #endif 3361 default: 3362 return -EINVAL; 3363 } 3364 if (err) { 3365 regs->dar = ea; 3366 break; 3367 } 3368 if (size < 16) 3369 regs->gpr[op->reg] = val; 3370 break; 3371 3372 case STCX: 3373 if (ea & (size - 1)) 3374 return -EACCES; /* can't handle misaligned */ 3375 if (!address_ok(regs, ea, size)) 3376 return -EFAULT; 3377 err = 0; 3378 switch (size) { 3379 #ifdef __powerpc64__ 3380 case 1: 3381 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3382 break; 3383 case 2: 3384 __put_user_asmx(op->val, ea, err, "sthcx.", cr); 3385 break; 3386 #endif 3387 case 4: 3388 __put_user_asmx(op->val, ea, err, "stwcx.", cr); 3389 break; 3390 #ifdef __powerpc64__ 3391 case 8: 3392 __put_user_asmx(op->val, ea, err, "stdcx.", cr); 3393 break; 3394 case 16: 3395 err = do_stqcx(ea, regs->gpr[op->reg], 3396 regs->gpr[op->reg + 1], &cr); 3397 break; 3398 #endif 3399 default: 3400 return -EINVAL; 3401 } 3402 if (!err) 3403 regs->ccr = (regs->ccr & 0x0fffffff) | 3404 (cr & 0xe0000000) | 3405 ((regs->xer >> 3) & 0x10000000); 3406 else 3407 regs->dar = ea; 3408 break; 3409 3410 case LOAD: 3411 #ifdef __powerpc64__ 3412 if (size == 16) { 3413 err = emulate_lq(regs, ea, op->reg, cross_endian); 3414 break; 3415 } 3416 #endif 3417 err = read_mem(®s->gpr[op->reg], ea, size, regs); 3418 if (!err) { 3419 if (op->type & SIGNEXT) 3420 do_signext(®s->gpr[op->reg], size); 3421 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) 3422 do_byterev(®s->gpr[op->reg], size); 3423 } 3424 break; 3425 3426 #ifdef CONFIG_PPC_FPU 3427 case LOAD_FP: 3428 /* 3429 * If the instruction is in userspace, we can emulate it even 3430 * if the VMX state is not live, because we have the state 3431 * stored in the thread_struct. If the instruction is in 3432 * the kernel, we must not touch the state in the thread_struct. 3433 */ 3434 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3435 return 0; 3436 err = do_fp_load(op, ea, regs, cross_endian); 3437 break; 3438 #endif 3439 #ifdef CONFIG_ALTIVEC 3440 case LOAD_VMX: 3441 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3442 return 0; 3443 err = do_vec_load(op->reg, ea, size, regs, cross_endian); 3444 break; 3445 #endif 3446 #ifdef CONFIG_VSX 3447 case LOAD_VSX: { 3448 unsigned long msrbit = MSR_VSX; 3449 3450 /* 3451 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3452 * when the target of the instruction is a vector register. 3453 */ 3454 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3455 msrbit = MSR_VEC; 3456 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3457 return 0; 3458 err = do_vsx_load(op, ea, regs, cross_endian); 3459 break; 3460 } 3461 #endif 3462 case LOAD_MULTI: 3463 if (!address_ok(regs, ea, size)) 3464 return -EFAULT; 3465 rd = op->reg; 3466 for (i = 0; i < size; i += 4) { 3467 unsigned int v32 = 0; 3468 3469 nb = size - i; 3470 if (nb > 4) 3471 nb = 4; 3472 err = copy_mem_in((u8 *) &v32, ea, nb, regs); 3473 if (err) 3474 break; 3475 if (unlikely(cross_endian)) 3476 v32 = byterev_4(v32); 3477 regs->gpr[rd] = v32; 3478 ea += 4; 3479 /* reg number wraps from 31 to 0 for lsw[ix] */ 3480 rd = (rd + 1) & 0x1f; 3481 } 3482 break; 3483 3484 case STORE: 3485 #ifdef __powerpc64__ 3486 if (size == 16) { 3487 err = emulate_stq(regs, ea, op->reg, cross_endian); 3488 break; 3489 } 3490 #endif 3491 if ((op->type & UPDATE) && size == sizeof(long) && 3492 op->reg == 1 && op->update_reg == 1 && 3493 !(regs->msr & MSR_PR) && 3494 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 3495 err = handle_stack_update(ea, regs); 3496 break; 3497 } 3498 if (unlikely(cross_endian)) 3499 do_byterev(&op->val, size); 3500 err = write_mem(op->val, ea, size, regs); 3501 break; 3502 3503 #ifdef CONFIG_PPC_FPU 3504 case STORE_FP: 3505 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3506 return 0; 3507 err = do_fp_store(op, ea, regs, cross_endian); 3508 break; 3509 #endif 3510 #ifdef CONFIG_ALTIVEC 3511 case STORE_VMX: 3512 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3513 return 0; 3514 err = do_vec_store(op->reg, ea, size, regs, cross_endian); 3515 break; 3516 #endif 3517 #ifdef CONFIG_VSX 3518 case STORE_VSX: { 3519 unsigned long msrbit = MSR_VSX; 3520 3521 /* 3522 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3523 * when the target of the instruction is a vector register. 3524 */ 3525 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3526 msrbit = MSR_VEC; 3527 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3528 return 0; 3529 err = do_vsx_store(op, ea, regs, cross_endian); 3530 break; 3531 } 3532 #endif 3533 case STORE_MULTI: 3534 if (!address_ok(regs, ea, size)) 3535 return -EFAULT; 3536 rd = op->reg; 3537 for (i = 0; i < size; i += 4) { 3538 unsigned int v32 = regs->gpr[rd]; 3539 3540 nb = size - i; 3541 if (nb > 4) 3542 nb = 4; 3543 if (unlikely(cross_endian)) 3544 v32 = byterev_4(v32); 3545 err = copy_mem_out((u8 *) &v32, ea, nb, regs); 3546 if (err) 3547 break; 3548 ea += 4; 3549 /* reg number wraps from 31 to 0 for stsw[ix] */ 3550 rd = (rd + 1) & 0x1f; 3551 } 3552 break; 3553 3554 default: 3555 return -EINVAL; 3556 } 3557 3558 if (err) 3559 return err; 3560 3561 if (op->type & UPDATE) 3562 regs->gpr[op->update_reg] = op->ea; 3563 3564 return 0; 3565 } 3566 NOKPROBE_SYMBOL(emulate_loadstore); 3567 3568 /* 3569 * Emulate instructions that cause a transfer of control, 3570 * loads and stores, and a few other instructions. 3571 * Returns 1 if the step was emulated, 0 if not, 3572 * or -1 if the instruction is one that should not be stepped, 3573 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3574 */ 3575 int emulate_step(struct pt_regs *regs, ppc_inst_t instr) 3576 { 3577 struct instruction_op op; 3578 int r, err, type; 3579 unsigned long val; 3580 unsigned long ea; 3581 3582 r = analyse_instr(&op, regs, instr); 3583 if (r < 0) 3584 return r; 3585 if (r > 0) { 3586 emulate_update_regs(regs, &op); 3587 return 1; 3588 } 3589 3590 err = 0; 3591 type = GETTYPE(op.type); 3592 3593 if (OP_IS_LOAD_STORE(type)) { 3594 err = emulate_loadstore(regs, &op); 3595 if (err) 3596 return 0; 3597 goto instr_done; 3598 } 3599 3600 switch (type) { 3601 case CACHEOP: 3602 ea = truncate_if_32bit(regs->msr, op.ea); 3603 if (!address_ok(regs, ea, 8)) 3604 return 0; 3605 switch (op.type & CACHEOP_MASK) { 3606 case DCBST: 3607 __cacheop_user_asmx(ea, err, "dcbst"); 3608 break; 3609 case DCBF: 3610 __cacheop_user_asmx(ea, err, "dcbf"); 3611 break; 3612 case DCBTST: 3613 if (op.reg == 0) 3614 prefetchw((void *) ea); 3615 break; 3616 case DCBT: 3617 if (op.reg == 0) 3618 prefetch((void *) ea); 3619 break; 3620 case ICBI: 3621 __cacheop_user_asmx(ea, err, "icbi"); 3622 break; 3623 case DCBZ: 3624 err = emulate_dcbz(ea, regs); 3625 break; 3626 } 3627 if (err) { 3628 regs->dar = ea; 3629 return 0; 3630 } 3631 goto instr_done; 3632 3633 case MFMSR: 3634 regs->gpr[op.reg] = regs->msr & MSR_MASK; 3635 goto instr_done; 3636 3637 case MTMSR: 3638 val = regs->gpr[op.reg]; 3639 if ((val & MSR_RI) == 0) 3640 /* can't step mtmsr[d] that would clear MSR_RI */ 3641 return -1; 3642 /* here op.val is the mask of bits to change */ 3643 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3644 goto instr_done; 3645 3646 #ifdef CONFIG_PPC64 3647 case SYSCALL: /* sc */ 3648 /* 3649 * N.B. this uses knowledge about how the syscall 3650 * entry code works. If that is changed, this will 3651 * need to be changed also. 3652 */ 3653 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && 3654 cpu_has_feature(CPU_FTR_REAL_LE) && 3655 regs->gpr[0] == 0x1ebe) { 3656 regs_set_return_msr(regs, regs->msr ^ MSR_LE); 3657 goto instr_done; 3658 } 3659 regs->gpr[9] = regs->gpr[13]; 3660 regs->gpr[10] = MSR_KERNEL; 3661 regs->gpr[11] = regs->nip + 4; 3662 regs->gpr[12] = regs->msr & MSR_MASK; 3663 regs->gpr[13] = (unsigned long) get_paca(); 3664 regs_set_return_ip(regs, (unsigned long) &system_call_common); 3665 regs_set_return_msr(regs, MSR_KERNEL); 3666 return 1; 3667 3668 #ifdef CONFIG_PPC_BOOK3S_64 3669 case SYSCALL_VECTORED_0: /* scv 0 */ 3670 regs->gpr[9] = regs->gpr[13]; 3671 regs->gpr[10] = MSR_KERNEL; 3672 regs->gpr[11] = regs->nip + 4; 3673 regs->gpr[12] = regs->msr & MSR_MASK; 3674 regs->gpr[13] = (unsigned long) get_paca(); 3675 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); 3676 regs_set_return_msr(regs, MSR_KERNEL); 3677 return 1; 3678 #endif 3679 3680 case RFI: 3681 return -1; 3682 #endif 3683 } 3684 return 0; 3685 3686 instr_done: 3687 regs_set_return_ip(regs, 3688 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3689 return 1; 3690 } 3691 NOKPROBE_SYMBOL(emulate_step); 3692