1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Single-step support. 4 * 5 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 6 */ 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/ptrace.h> 10 #include <linux/prefetch.h> 11 #include <asm/sstep.h> 12 #include <asm/processor.h> 13 #include <linux/uaccess.h> 14 #include <asm/cpu_has_feature.h> 15 #include <asm/cputable.h> 16 #include <asm/disassemble.h> 17 18 extern char system_call_common[]; 19 extern char system_call_vectored_emulate[]; 20 21 #ifdef CONFIG_PPC64 22 /* Bits in SRR1 that are copied from MSR */ 23 #define MSR_MASK 0xffffffff87c0ffffUL 24 #else 25 #define MSR_MASK 0x87c0ffff 26 #endif 27 28 /* Bits in XER */ 29 #define XER_SO 0x80000000U 30 #define XER_OV 0x40000000U 31 #define XER_CA 0x20000000U 32 #define XER_OV32 0x00080000U 33 #define XER_CA32 0x00040000U 34 35 #ifdef CONFIG_VSX 36 #define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) 37 #endif 38 39 #ifdef CONFIG_PPC_FPU 40 /* 41 * Functions in ldstfp.S 42 */ 43 extern void get_fpr(int rn, double *p); 44 extern void put_fpr(int rn, const double *p); 45 extern void get_vr(int rn, __vector128 *p); 46 extern void put_vr(int rn, __vector128 *p); 47 extern void load_vsrn(int vsr, const void *p); 48 extern void store_vsrn(int vsr, void *p); 49 extern void conv_sp_to_dp(const float *sp, double *dp); 50 extern void conv_dp_to_sp(const double *dp, float *sp); 51 #endif 52 53 #ifdef __powerpc64__ 54 /* 55 * Functions in quad.S 56 */ 57 extern int do_lq(unsigned long ea, unsigned long *regs); 58 extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1); 59 extern int do_lqarx(unsigned long ea, unsigned long *regs); 60 extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, 61 unsigned int *crp); 62 #endif 63 64 #ifdef __LITTLE_ENDIAN__ 65 #define IS_LE 1 66 #define IS_BE 0 67 #else 68 #define IS_LE 0 69 #define IS_BE 1 70 #endif 71 72 /* 73 * Emulate the truncation of 64 bit values in 32-bit mode. 74 */ 75 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, 76 unsigned long val) 77 { 78 #ifdef __powerpc64__ 79 if ((msr & MSR_64BIT) == 0) 80 val &= 0xffffffffUL; 81 #endif 82 return val; 83 } 84 85 /* 86 * Determine whether a conditional branch instruction would branch. 87 */ 88 static nokprobe_inline int branch_taken(unsigned int instr, 89 const struct pt_regs *regs, 90 struct instruction_op *op) 91 { 92 unsigned int bo = (instr >> 21) & 0x1f; 93 unsigned int bi; 94 95 if ((bo & 4) == 0) { 96 /* decrement counter */ 97 op->type |= DECCTR; 98 if (((bo >> 1) & 1) ^ (regs->ctr == 1)) 99 return 0; 100 } 101 if ((bo & 0x10) == 0) { 102 /* check bit from CR */ 103 bi = (instr >> 16) & 0x1f; 104 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 105 return 0; 106 } 107 return 1; 108 } 109 110 static nokprobe_inline long address_ok(struct pt_regs *regs, 111 unsigned long ea, int nb) 112 { 113 if (!user_mode(regs)) 114 return 1; 115 if (__access_ok(ea, nb)) 116 return 1; 117 if (__access_ok(ea, 1)) 118 /* Access overlaps the end of the user region */ 119 regs->dar = TASK_SIZE_MAX - 1; 120 else 121 regs->dar = ea; 122 return 0; 123 } 124 125 /* 126 * Calculate effective address for a D-form instruction 127 */ 128 static nokprobe_inline unsigned long dform_ea(unsigned int instr, 129 const struct pt_regs *regs) 130 { 131 int ra; 132 unsigned long ea; 133 134 ra = (instr >> 16) & 0x1f; 135 ea = (signed short) instr; /* sign-extend */ 136 if (ra) 137 ea += regs->gpr[ra]; 138 139 return ea; 140 } 141 142 #ifdef __powerpc64__ 143 /* 144 * Calculate effective address for a DS-form instruction 145 */ 146 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, 147 const struct pt_regs *regs) 148 { 149 int ra; 150 unsigned long ea; 151 152 ra = (instr >> 16) & 0x1f; 153 ea = (signed short) (instr & ~3); /* sign-extend */ 154 if (ra) 155 ea += regs->gpr[ra]; 156 157 return ea; 158 } 159 160 /* 161 * Calculate effective address for a DQ-form instruction 162 */ 163 static nokprobe_inline unsigned long dqform_ea(unsigned int instr, 164 const struct pt_regs *regs) 165 { 166 int ra; 167 unsigned long ea; 168 169 ra = (instr >> 16) & 0x1f; 170 ea = (signed short) (instr & ~0xf); /* sign-extend */ 171 if (ra) 172 ea += regs->gpr[ra]; 173 174 return ea; 175 } 176 #endif /* __powerpc64 */ 177 178 /* 179 * Calculate effective address for an X-form instruction 180 */ 181 static nokprobe_inline unsigned long xform_ea(unsigned int instr, 182 const struct pt_regs *regs) 183 { 184 int ra, rb; 185 unsigned long ea; 186 187 ra = (instr >> 16) & 0x1f; 188 rb = (instr >> 11) & 0x1f; 189 ea = regs->gpr[rb]; 190 if (ra) 191 ea += regs->gpr[ra]; 192 193 return ea; 194 } 195 196 /* 197 * Calculate effective address for a MLS:D-form / 8LS:D-form 198 * prefixed instruction 199 */ 200 static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, 201 unsigned int suffix, 202 const struct pt_regs *regs) 203 { 204 int ra, prefix_r; 205 unsigned int dd; 206 unsigned long ea, d0, d1, d; 207 208 prefix_r = GET_PREFIX_R(instr); 209 ra = GET_PREFIX_RA(suffix); 210 211 d0 = instr & 0x3ffff; 212 d1 = suffix & 0xffff; 213 d = (d0 << 16) | d1; 214 215 /* 216 * sign extend a 34 bit number 217 */ 218 dd = (unsigned int)(d >> 2); 219 ea = (signed int)dd; 220 ea = (ea << 2) | (d & 0x3); 221 222 if (!prefix_r && ra) 223 ea += regs->gpr[ra]; 224 else if (!prefix_r && !ra) 225 ; /* Leave ea as is */ 226 else if (prefix_r) 227 ea += regs->nip; 228 229 /* 230 * (prefix_r && ra) is an invalid form. Should already be 231 * checked for by caller! 232 */ 233 234 return ea; 235 } 236 237 /* 238 * Return the largest power of 2, not greater than sizeof(unsigned long), 239 * such that x is a multiple of it. 240 */ 241 static nokprobe_inline unsigned long max_align(unsigned long x) 242 { 243 x |= sizeof(unsigned long); 244 return x & -x; /* isolates rightmost bit */ 245 } 246 247 static nokprobe_inline unsigned long byterev_2(unsigned long x) 248 { 249 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 250 } 251 252 static nokprobe_inline unsigned long byterev_4(unsigned long x) 253 { 254 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 255 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 256 } 257 258 #ifdef __powerpc64__ 259 static nokprobe_inline unsigned long byterev_8(unsigned long x) 260 { 261 return (byterev_4(x) << 32) | byterev_4(x >> 32); 262 } 263 #endif 264 265 static nokprobe_inline void do_byte_reverse(void *ptr, int nb) 266 { 267 switch (nb) { 268 case 2: 269 *(u16 *)ptr = byterev_2(*(u16 *)ptr); 270 break; 271 case 4: 272 *(u32 *)ptr = byterev_4(*(u32 *)ptr); 273 break; 274 #ifdef __powerpc64__ 275 case 8: 276 *(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr); 277 break; 278 case 16: { 279 unsigned long *up = (unsigned long *)ptr; 280 unsigned long tmp; 281 tmp = byterev_8(up[0]); 282 up[0] = byterev_8(up[1]); 283 up[1] = tmp; 284 break; 285 } 286 case 32: { 287 unsigned long *up = (unsigned long *)ptr; 288 unsigned long tmp; 289 290 tmp = byterev_8(up[0]); 291 up[0] = byterev_8(up[3]); 292 up[3] = tmp; 293 tmp = byterev_8(up[2]); 294 up[2] = byterev_8(up[1]); 295 up[1] = tmp; 296 break; 297 } 298 299 #endif 300 default: 301 WARN_ON_ONCE(1); 302 } 303 } 304 305 static nokprobe_inline int read_mem_aligned(unsigned long *dest, 306 unsigned long ea, int nb, 307 struct pt_regs *regs) 308 { 309 int err = 0; 310 unsigned long x = 0; 311 312 switch (nb) { 313 case 1: 314 err = __get_user(x, (unsigned char __user *) ea); 315 break; 316 case 2: 317 err = __get_user(x, (unsigned short __user *) ea); 318 break; 319 case 4: 320 err = __get_user(x, (unsigned int __user *) ea); 321 break; 322 #ifdef __powerpc64__ 323 case 8: 324 err = __get_user(x, (unsigned long __user *) ea); 325 break; 326 #endif 327 } 328 if (!err) 329 *dest = x; 330 else 331 regs->dar = ea; 332 return err; 333 } 334 335 /* 336 * Copy from userspace to a buffer, using the largest possible 337 * aligned accesses, up to sizeof(long). 338 */ 339 static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, 340 struct pt_regs *regs) 341 { 342 int err = 0; 343 int c; 344 345 for (; nb > 0; nb -= c) { 346 c = max_align(ea); 347 if (c > nb) 348 c = max_align(nb); 349 switch (c) { 350 case 1: 351 err = __get_user(*dest, (unsigned char __user *) ea); 352 break; 353 case 2: 354 err = __get_user(*(u16 *)dest, 355 (unsigned short __user *) ea); 356 break; 357 case 4: 358 err = __get_user(*(u32 *)dest, 359 (unsigned int __user *) ea); 360 break; 361 #ifdef __powerpc64__ 362 case 8: 363 err = __get_user(*(unsigned long *)dest, 364 (unsigned long __user *) ea); 365 break; 366 #endif 367 } 368 if (err) { 369 regs->dar = ea; 370 return err; 371 } 372 dest += c; 373 ea += c; 374 } 375 return 0; 376 } 377 378 static nokprobe_inline int read_mem_unaligned(unsigned long *dest, 379 unsigned long ea, int nb, 380 struct pt_regs *regs) 381 { 382 union { 383 unsigned long ul; 384 u8 b[sizeof(unsigned long)]; 385 } u; 386 int i; 387 int err; 388 389 u.ul = 0; 390 i = IS_BE ? sizeof(unsigned long) - nb : 0; 391 err = copy_mem_in(&u.b[i], ea, nb, regs); 392 if (!err) 393 *dest = u.ul; 394 return err; 395 } 396 397 /* 398 * Read memory at address ea for nb bytes, return 0 for success 399 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 400 * If nb < sizeof(long), the result is right-justified on BE systems. 401 */ 402 static int read_mem(unsigned long *dest, unsigned long ea, int nb, 403 struct pt_regs *regs) 404 { 405 if (!address_ok(regs, ea, nb)) 406 return -EFAULT; 407 if ((ea & (nb - 1)) == 0) 408 return read_mem_aligned(dest, ea, nb, regs); 409 return read_mem_unaligned(dest, ea, nb, regs); 410 } 411 NOKPROBE_SYMBOL(read_mem); 412 413 static nokprobe_inline int write_mem_aligned(unsigned long val, 414 unsigned long ea, int nb, 415 struct pt_regs *regs) 416 { 417 int err = 0; 418 419 switch (nb) { 420 case 1: 421 err = __put_user(val, (unsigned char __user *) ea); 422 break; 423 case 2: 424 err = __put_user(val, (unsigned short __user *) ea); 425 break; 426 case 4: 427 err = __put_user(val, (unsigned int __user *) ea); 428 break; 429 #ifdef __powerpc64__ 430 case 8: 431 err = __put_user(val, (unsigned long __user *) ea); 432 break; 433 #endif 434 } 435 if (err) 436 regs->dar = ea; 437 return err; 438 } 439 440 /* 441 * Copy from a buffer to userspace, using the largest possible 442 * aligned accesses, up to sizeof(long). 443 */ 444 static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, 445 struct pt_regs *regs) 446 { 447 int err = 0; 448 int c; 449 450 for (; nb > 0; nb -= c) { 451 c = max_align(ea); 452 if (c > nb) 453 c = max_align(nb); 454 switch (c) { 455 case 1: 456 err = __put_user(*dest, (unsigned char __user *) ea); 457 break; 458 case 2: 459 err = __put_user(*(u16 *)dest, 460 (unsigned short __user *) ea); 461 break; 462 case 4: 463 err = __put_user(*(u32 *)dest, 464 (unsigned int __user *) ea); 465 break; 466 #ifdef __powerpc64__ 467 case 8: 468 err = __put_user(*(unsigned long *)dest, 469 (unsigned long __user *) ea); 470 break; 471 #endif 472 } 473 if (err) { 474 regs->dar = ea; 475 return err; 476 } 477 dest += c; 478 ea += c; 479 } 480 return 0; 481 } 482 483 static nokprobe_inline int write_mem_unaligned(unsigned long val, 484 unsigned long ea, int nb, 485 struct pt_regs *regs) 486 { 487 union { 488 unsigned long ul; 489 u8 b[sizeof(unsigned long)]; 490 } u; 491 int i; 492 493 u.ul = val; 494 i = IS_BE ? sizeof(unsigned long) - nb : 0; 495 return copy_mem_out(&u.b[i], ea, nb, regs); 496 } 497 498 /* 499 * Write memory at address ea for nb bytes, return 0 for success 500 * or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8. 501 */ 502 static int write_mem(unsigned long val, unsigned long ea, int nb, 503 struct pt_regs *regs) 504 { 505 if (!address_ok(regs, ea, nb)) 506 return -EFAULT; 507 if ((ea & (nb - 1)) == 0) 508 return write_mem_aligned(val, ea, nb, regs); 509 return write_mem_unaligned(val, ea, nb, regs); 510 } 511 NOKPROBE_SYMBOL(write_mem); 512 513 #ifdef CONFIG_PPC_FPU 514 /* 515 * These access either the real FP register or the image in the 516 * thread_struct, depending on regs->msr & MSR_FP. 517 */ 518 static int do_fp_load(struct instruction_op *op, unsigned long ea, 519 struct pt_regs *regs, bool cross_endian) 520 { 521 int err, rn, nb; 522 union { 523 int i; 524 unsigned int u; 525 float f; 526 double d[2]; 527 unsigned long l[2]; 528 u8 b[2 * sizeof(double)]; 529 } u; 530 531 nb = GETSIZE(op->type); 532 if (!address_ok(regs, ea, nb)) 533 return -EFAULT; 534 rn = op->reg; 535 err = copy_mem_in(u.b, ea, nb, regs); 536 if (err) 537 return err; 538 if (unlikely(cross_endian)) { 539 do_byte_reverse(u.b, min(nb, 8)); 540 if (nb == 16) 541 do_byte_reverse(&u.b[8], 8); 542 } 543 preempt_disable(); 544 if (nb == 4) { 545 if (op->type & FPCONV) 546 conv_sp_to_dp(&u.f, &u.d[0]); 547 else if (op->type & SIGNEXT) 548 u.l[0] = u.i; 549 else 550 u.l[0] = u.u; 551 } 552 if (regs->msr & MSR_FP) 553 put_fpr(rn, &u.d[0]); 554 else 555 current->thread.TS_FPR(rn) = u.l[0]; 556 if (nb == 16) { 557 /* lfdp */ 558 rn |= 1; 559 if (regs->msr & MSR_FP) 560 put_fpr(rn, &u.d[1]); 561 else 562 current->thread.TS_FPR(rn) = u.l[1]; 563 } 564 preempt_enable(); 565 return 0; 566 } 567 NOKPROBE_SYMBOL(do_fp_load); 568 569 static int do_fp_store(struct instruction_op *op, unsigned long ea, 570 struct pt_regs *regs, bool cross_endian) 571 { 572 int rn, nb; 573 union { 574 unsigned int u; 575 float f; 576 double d[2]; 577 unsigned long l[2]; 578 u8 b[2 * sizeof(double)]; 579 } u; 580 581 nb = GETSIZE(op->type); 582 if (!address_ok(regs, ea, nb)) 583 return -EFAULT; 584 rn = op->reg; 585 preempt_disable(); 586 if (regs->msr & MSR_FP) 587 get_fpr(rn, &u.d[0]); 588 else 589 u.l[0] = current->thread.TS_FPR(rn); 590 if (nb == 4) { 591 if (op->type & FPCONV) 592 conv_dp_to_sp(&u.d[0], &u.f); 593 else 594 u.u = u.l[0]; 595 } 596 if (nb == 16) { 597 rn |= 1; 598 if (regs->msr & MSR_FP) 599 get_fpr(rn, &u.d[1]); 600 else 601 u.l[1] = current->thread.TS_FPR(rn); 602 } 603 preempt_enable(); 604 if (unlikely(cross_endian)) { 605 do_byte_reverse(u.b, min(nb, 8)); 606 if (nb == 16) 607 do_byte_reverse(&u.b[8], 8); 608 } 609 return copy_mem_out(u.b, ea, nb, regs); 610 } 611 NOKPROBE_SYMBOL(do_fp_store); 612 #endif 613 614 #ifdef CONFIG_ALTIVEC 615 /* For Altivec/VMX, no need to worry about alignment */ 616 static nokprobe_inline int do_vec_load(int rn, unsigned long ea, 617 int size, struct pt_regs *regs, 618 bool cross_endian) 619 { 620 int err; 621 union { 622 __vector128 v; 623 u8 b[sizeof(__vector128)]; 624 } u = {}; 625 626 if (!address_ok(regs, ea & ~0xfUL, 16)) 627 return -EFAULT; 628 /* align to multiple of size */ 629 ea &= ~(size - 1); 630 err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs); 631 if (err) 632 return err; 633 if (unlikely(cross_endian)) 634 do_byte_reverse(&u.b[ea & 0xf], size); 635 preempt_disable(); 636 if (regs->msr & MSR_VEC) 637 put_vr(rn, &u.v); 638 else 639 current->thread.vr_state.vr[rn] = u.v; 640 preempt_enable(); 641 return 0; 642 } 643 644 static nokprobe_inline int do_vec_store(int rn, unsigned long ea, 645 int size, struct pt_regs *regs, 646 bool cross_endian) 647 { 648 union { 649 __vector128 v; 650 u8 b[sizeof(__vector128)]; 651 } u; 652 653 if (!address_ok(regs, ea & ~0xfUL, 16)) 654 return -EFAULT; 655 /* align to multiple of size */ 656 ea &= ~(size - 1); 657 658 preempt_disable(); 659 if (regs->msr & MSR_VEC) 660 get_vr(rn, &u.v); 661 else 662 u.v = current->thread.vr_state.vr[rn]; 663 preempt_enable(); 664 if (unlikely(cross_endian)) 665 do_byte_reverse(&u.b[ea & 0xf], size); 666 return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); 667 } 668 #endif /* CONFIG_ALTIVEC */ 669 670 #ifdef __powerpc64__ 671 static nokprobe_inline int emulate_lq(struct pt_regs *regs, unsigned long ea, 672 int reg, bool cross_endian) 673 { 674 int err; 675 676 if (!address_ok(regs, ea, 16)) 677 return -EFAULT; 678 /* if aligned, should be atomic */ 679 if ((ea & 0xf) == 0) { 680 err = do_lq(ea, ®s->gpr[reg]); 681 } else { 682 err = read_mem(®s->gpr[reg + IS_LE], ea, 8, regs); 683 if (!err) 684 err = read_mem(®s->gpr[reg + IS_BE], ea + 8, 8, regs); 685 } 686 if (!err && unlikely(cross_endian)) 687 do_byte_reverse(®s->gpr[reg], 16); 688 return err; 689 } 690 691 static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, 692 int reg, bool cross_endian) 693 { 694 int err; 695 unsigned long vals[2]; 696 697 if (!address_ok(regs, ea, 16)) 698 return -EFAULT; 699 vals[0] = regs->gpr[reg]; 700 vals[1] = regs->gpr[reg + 1]; 701 if (unlikely(cross_endian)) 702 do_byte_reverse(vals, 16); 703 704 /* if aligned, should be atomic */ 705 if ((ea & 0xf) == 0) 706 return do_stq(ea, vals[0], vals[1]); 707 708 err = write_mem(vals[IS_LE], ea, 8, regs); 709 if (!err) 710 err = write_mem(vals[IS_BE], ea + 8, 8, regs); 711 return err; 712 } 713 #endif /* __powerpc64 */ 714 715 #ifdef CONFIG_VSX 716 void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, 717 const void *mem, bool rev) 718 { 719 int size, read_size; 720 int i, j; 721 const unsigned int *wp; 722 const unsigned short *hp; 723 const unsigned char *bp; 724 725 size = GETSIZE(op->type); 726 reg->d[0] = reg->d[1] = 0; 727 728 switch (op->element_size) { 729 case 32: 730 /* [p]lxvp[x] */ 731 case 16: 732 /* whole vector; lxv[x] or lxvl[l] */ 733 if (size == 0) 734 break; 735 memcpy(reg, mem, size); 736 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 737 rev = !rev; 738 if (rev) 739 do_byte_reverse(reg, size); 740 break; 741 case 8: 742 /* scalar loads, lxvd2x, lxvdsx */ 743 read_size = (size >= 8) ? 8 : size; 744 i = IS_LE ? 8 : 8 - read_size; 745 memcpy(®->b[i], mem, read_size); 746 if (rev) 747 do_byte_reverse(®->b[i], 8); 748 if (size < 8) { 749 if (op->type & SIGNEXT) { 750 /* size == 4 is the only case here */ 751 reg->d[IS_LE] = (signed int) reg->d[IS_LE]; 752 } else if (op->vsx_flags & VSX_FPCONV) { 753 preempt_disable(); 754 conv_sp_to_dp(®->fp[1 + IS_LE], 755 ®->dp[IS_LE]); 756 preempt_enable(); 757 } 758 } else { 759 if (size == 16) { 760 unsigned long v = *(unsigned long *)(mem + 8); 761 reg->d[IS_BE] = !rev ? v : byterev_8(v); 762 } else if (op->vsx_flags & VSX_SPLAT) 763 reg->d[IS_BE] = reg->d[IS_LE]; 764 } 765 break; 766 case 4: 767 /* lxvw4x, lxvwsx */ 768 wp = mem; 769 for (j = 0; j < size / 4; ++j) { 770 i = IS_LE ? 3 - j : j; 771 reg->w[i] = !rev ? *wp++ : byterev_4(*wp++); 772 } 773 if (op->vsx_flags & VSX_SPLAT) { 774 u32 val = reg->w[IS_LE ? 3 : 0]; 775 for (; j < 4; ++j) { 776 i = IS_LE ? 3 - j : j; 777 reg->w[i] = val; 778 } 779 } 780 break; 781 case 2: 782 /* lxvh8x */ 783 hp = mem; 784 for (j = 0; j < size / 2; ++j) { 785 i = IS_LE ? 7 - j : j; 786 reg->h[i] = !rev ? *hp++ : byterev_2(*hp++); 787 } 788 break; 789 case 1: 790 /* lxvb16x */ 791 bp = mem; 792 for (j = 0; j < size; ++j) { 793 i = IS_LE ? 15 - j : j; 794 reg->b[i] = *bp++; 795 } 796 break; 797 } 798 } 799 EXPORT_SYMBOL_GPL(emulate_vsx_load); 800 NOKPROBE_SYMBOL(emulate_vsx_load); 801 802 void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, 803 void *mem, bool rev) 804 { 805 int size, write_size; 806 int i, j; 807 union vsx_reg buf; 808 unsigned int *wp; 809 unsigned short *hp; 810 unsigned char *bp; 811 812 size = GETSIZE(op->type); 813 814 switch (op->element_size) { 815 case 32: 816 /* [p]stxvp[x] */ 817 if (size == 0) 818 break; 819 if (rev) { 820 /* reverse 32 bytes */ 821 union vsx_reg buf32[2]; 822 buf32[0].d[0] = byterev_8(reg[1].d[1]); 823 buf32[0].d[1] = byterev_8(reg[1].d[0]); 824 buf32[1].d[0] = byterev_8(reg[0].d[1]); 825 buf32[1].d[1] = byterev_8(reg[0].d[0]); 826 memcpy(mem, buf32, size); 827 } else { 828 memcpy(mem, reg, size); 829 } 830 break; 831 case 16: 832 /* stxv, stxvx, stxvl, stxvll */ 833 if (size == 0) 834 break; 835 if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) 836 rev = !rev; 837 if (rev) { 838 /* reverse 16 bytes */ 839 buf.d[0] = byterev_8(reg->d[1]); 840 buf.d[1] = byterev_8(reg->d[0]); 841 reg = &buf; 842 } 843 memcpy(mem, reg, size); 844 break; 845 case 8: 846 /* scalar stores, stxvd2x */ 847 write_size = (size >= 8) ? 8 : size; 848 i = IS_LE ? 8 : 8 - write_size; 849 if (size < 8 && op->vsx_flags & VSX_FPCONV) { 850 buf.d[0] = buf.d[1] = 0; 851 preempt_disable(); 852 conv_dp_to_sp(®->dp[IS_LE], &buf.fp[1 + IS_LE]); 853 preempt_enable(); 854 reg = &buf; 855 } 856 memcpy(mem, ®->b[i], write_size); 857 if (size == 16) 858 memcpy(mem + 8, ®->d[IS_BE], 8); 859 if (unlikely(rev)) { 860 do_byte_reverse(mem, write_size); 861 if (size == 16) 862 do_byte_reverse(mem + 8, 8); 863 } 864 break; 865 case 4: 866 /* stxvw4x */ 867 wp = mem; 868 for (j = 0; j < size / 4; ++j) { 869 i = IS_LE ? 3 - j : j; 870 *wp++ = !rev ? reg->w[i] : byterev_4(reg->w[i]); 871 } 872 break; 873 case 2: 874 /* stxvh8x */ 875 hp = mem; 876 for (j = 0; j < size / 2; ++j) { 877 i = IS_LE ? 7 - j : j; 878 *hp++ = !rev ? reg->h[i] : byterev_2(reg->h[i]); 879 } 880 break; 881 case 1: 882 /* stvxb16x */ 883 bp = mem; 884 for (j = 0; j < size; ++j) { 885 i = IS_LE ? 15 - j : j; 886 *bp++ = reg->b[i]; 887 } 888 break; 889 } 890 } 891 EXPORT_SYMBOL_GPL(emulate_vsx_store); 892 NOKPROBE_SYMBOL(emulate_vsx_store); 893 894 static nokprobe_inline int do_vsx_load(struct instruction_op *op, 895 unsigned long ea, struct pt_regs *regs, 896 bool cross_endian) 897 { 898 int reg = op->reg; 899 int i, j, nr_vsx_regs; 900 u8 mem[32]; 901 union vsx_reg buf[2]; 902 int size = GETSIZE(op->type); 903 904 if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) 905 return -EFAULT; 906 907 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 908 emulate_vsx_load(op, buf, mem, cross_endian); 909 preempt_disable(); 910 if (reg < 32) { 911 /* FP regs + extensions */ 912 if (regs->msr & MSR_FP) { 913 for (i = 0; i < nr_vsx_regs; i++) { 914 j = IS_LE ? nr_vsx_regs - i - 1 : i; 915 load_vsrn(reg + i, &buf[j].v); 916 } 917 } else { 918 for (i = 0; i < nr_vsx_regs; i++) { 919 j = IS_LE ? nr_vsx_regs - i - 1 : i; 920 current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; 921 current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; 922 } 923 } 924 } else { 925 if (regs->msr & MSR_VEC) { 926 for (i = 0; i < nr_vsx_regs; i++) { 927 j = IS_LE ? nr_vsx_regs - i - 1 : i; 928 load_vsrn(reg + i, &buf[j].v); 929 } 930 } else { 931 for (i = 0; i < nr_vsx_regs; i++) { 932 j = IS_LE ? nr_vsx_regs - i - 1 : i; 933 current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; 934 } 935 } 936 } 937 preempt_enable(); 938 return 0; 939 } 940 941 static nokprobe_inline int do_vsx_store(struct instruction_op *op, 942 unsigned long ea, struct pt_regs *regs, 943 bool cross_endian) 944 { 945 int reg = op->reg; 946 int i, j, nr_vsx_regs; 947 u8 mem[32]; 948 union vsx_reg buf[2]; 949 int size = GETSIZE(op->type); 950 951 if (!address_ok(regs, ea, size)) 952 return -EFAULT; 953 954 nr_vsx_regs = max(1ul, size / sizeof(__vector128)); 955 preempt_disable(); 956 if (reg < 32) { 957 /* FP regs + extensions */ 958 if (regs->msr & MSR_FP) { 959 for (i = 0; i < nr_vsx_regs; i++) { 960 j = IS_LE ? nr_vsx_regs - i - 1 : i; 961 store_vsrn(reg + i, &buf[j].v); 962 } 963 } else { 964 for (i = 0; i < nr_vsx_regs; i++) { 965 j = IS_LE ? nr_vsx_regs - i - 1 : i; 966 buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; 967 buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; 968 } 969 } 970 } else { 971 if (regs->msr & MSR_VEC) { 972 for (i = 0; i < nr_vsx_regs; i++) { 973 j = IS_LE ? nr_vsx_regs - i - 1 : i; 974 store_vsrn(reg + i, &buf[j].v); 975 } 976 } else { 977 for (i = 0; i < nr_vsx_regs; i++) { 978 j = IS_LE ? nr_vsx_regs - i - 1 : i; 979 buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; 980 } 981 } 982 } 983 preempt_enable(); 984 emulate_vsx_store(op, buf, mem, cross_endian); 985 return copy_mem_out(mem, ea, size, regs); 986 } 987 #endif /* CONFIG_VSX */ 988 989 int emulate_dcbz(unsigned long ea, struct pt_regs *regs) 990 { 991 int err; 992 unsigned long i, size; 993 994 #ifdef __powerpc64__ 995 size = ppc64_caches.l1d.block_size; 996 if (!(regs->msr & MSR_64BIT)) 997 ea &= 0xffffffffUL; 998 #else 999 size = L1_CACHE_BYTES; 1000 #endif 1001 ea &= ~(size - 1); 1002 if (!address_ok(regs, ea, size)) 1003 return -EFAULT; 1004 for (i = 0; i < size; i += sizeof(long)) { 1005 err = __put_user(0, (unsigned long __user *) (ea + i)); 1006 if (err) { 1007 regs->dar = ea; 1008 return err; 1009 } 1010 } 1011 return 0; 1012 } 1013 NOKPROBE_SYMBOL(emulate_dcbz); 1014 1015 #define __put_user_asmx(x, addr, err, op, cr) \ 1016 __asm__ __volatile__( \ 1017 "1: " op " %2,0,%3\n" \ 1018 " mfcr %1\n" \ 1019 "2:\n" \ 1020 ".section .fixup,\"ax\"\n" \ 1021 "3: li %0,%4\n" \ 1022 " b 2b\n" \ 1023 ".previous\n" \ 1024 EX_TABLE(1b, 3b) \ 1025 : "=r" (err), "=r" (cr) \ 1026 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 1027 1028 #define __get_user_asmx(x, addr, err, op) \ 1029 __asm__ __volatile__( \ 1030 "1: "op" %1,0,%2\n" \ 1031 "2:\n" \ 1032 ".section .fixup,\"ax\"\n" \ 1033 "3: li %0,%3\n" \ 1034 " b 2b\n" \ 1035 ".previous\n" \ 1036 EX_TABLE(1b, 3b) \ 1037 : "=r" (err), "=r" (x) \ 1038 : "r" (addr), "i" (-EFAULT), "0" (err)) 1039 1040 #define __cacheop_user_asmx(addr, err, op) \ 1041 __asm__ __volatile__( \ 1042 "1: "op" 0,%1\n" \ 1043 "2:\n" \ 1044 ".section .fixup,\"ax\"\n" \ 1045 "3: li %0,%3\n" \ 1046 " b 2b\n" \ 1047 ".previous\n" \ 1048 EX_TABLE(1b, 3b) \ 1049 : "=r" (err) \ 1050 : "r" (addr), "i" (-EFAULT), "0" (err)) 1051 1052 static nokprobe_inline void set_cr0(const struct pt_regs *regs, 1053 struct instruction_op *op) 1054 { 1055 long val = op->val; 1056 1057 op->type |= SETCC; 1058 op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 1059 #ifdef __powerpc64__ 1060 if (!(regs->msr & MSR_64BIT)) 1061 val = (int) val; 1062 #endif 1063 if (val < 0) 1064 op->ccval |= 0x80000000; 1065 else if (val > 0) 1066 op->ccval |= 0x40000000; 1067 else 1068 op->ccval |= 0x20000000; 1069 } 1070 1071 static nokprobe_inline void set_ca32(struct instruction_op *op, bool val) 1072 { 1073 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1074 if (val) 1075 op->xerval |= XER_CA32; 1076 else 1077 op->xerval &= ~XER_CA32; 1078 } 1079 } 1080 1081 static nokprobe_inline void add_with_carry(const struct pt_regs *regs, 1082 struct instruction_op *op, int rd, 1083 unsigned long val1, unsigned long val2, 1084 unsigned long carry_in) 1085 { 1086 unsigned long val = val1 + val2; 1087 1088 if (carry_in) 1089 ++val; 1090 op->type = COMPUTE + SETREG + SETXER; 1091 op->reg = rd; 1092 op->val = val; 1093 #ifdef __powerpc64__ 1094 if (!(regs->msr & MSR_64BIT)) { 1095 val = (unsigned int) val; 1096 val1 = (unsigned int) val1; 1097 } 1098 #endif 1099 op->xerval = regs->xer; 1100 if (val < val1 || (carry_in && val == val1)) 1101 op->xerval |= XER_CA; 1102 else 1103 op->xerval &= ~XER_CA; 1104 1105 set_ca32(op, (unsigned int)val < (unsigned int)val1 || 1106 (carry_in && (unsigned int)val == (unsigned int)val1)); 1107 } 1108 1109 static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, 1110 struct instruction_op *op, 1111 long v1, long v2, int crfld) 1112 { 1113 unsigned int crval, shift; 1114 1115 op->type = COMPUTE + SETCC; 1116 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1117 if (v1 < v2) 1118 crval |= 8; 1119 else if (v1 > v2) 1120 crval |= 4; 1121 else 1122 crval |= 2; 1123 shift = (7 - crfld) * 4; 1124 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1125 } 1126 1127 static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, 1128 struct instruction_op *op, 1129 unsigned long v1, 1130 unsigned long v2, int crfld) 1131 { 1132 unsigned int crval, shift; 1133 1134 op->type = COMPUTE + SETCC; 1135 crval = (regs->xer >> 31) & 1; /* get SO bit */ 1136 if (v1 < v2) 1137 crval |= 8; 1138 else if (v1 > v2) 1139 crval |= 4; 1140 else 1141 crval |= 2; 1142 shift = (7 - crfld) * 4; 1143 op->ccval = (regs->ccr & ~(0xf << shift)) | (crval << shift); 1144 } 1145 1146 static nokprobe_inline void do_cmpb(const struct pt_regs *regs, 1147 struct instruction_op *op, 1148 unsigned long v1, unsigned long v2) 1149 { 1150 unsigned long long out_val, mask; 1151 int i; 1152 1153 out_val = 0; 1154 for (i = 0; i < 8; i++) { 1155 mask = 0xffUL << (i * 8); 1156 if ((v1 & mask) == (v2 & mask)) 1157 out_val |= mask; 1158 } 1159 op->val = out_val; 1160 } 1161 1162 /* 1163 * The size parameter is used to adjust the equivalent popcnt instruction. 1164 * popcntb = 8, popcntw = 32, popcntd = 64 1165 */ 1166 static nokprobe_inline void do_popcnt(const struct pt_regs *regs, 1167 struct instruction_op *op, 1168 unsigned long v1, int size) 1169 { 1170 unsigned long long out = v1; 1171 1172 out -= (out >> 1) & 0x5555555555555555ULL; 1173 out = (0x3333333333333333ULL & out) + 1174 (0x3333333333333333ULL & (out >> 2)); 1175 out = (out + (out >> 4)) & 0x0f0f0f0f0f0f0f0fULL; 1176 1177 if (size == 8) { /* popcntb */ 1178 op->val = out; 1179 return; 1180 } 1181 out += out >> 8; 1182 out += out >> 16; 1183 if (size == 32) { /* popcntw */ 1184 op->val = out & 0x0000003f0000003fULL; 1185 return; 1186 } 1187 1188 out = (out + (out >> 32)) & 0x7f; 1189 op->val = out; /* popcntd */ 1190 } 1191 1192 #ifdef CONFIG_PPC64 1193 static nokprobe_inline void do_bpermd(const struct pt_regs *regs, 1194 struct instruction_op *op, 1195 unsigned long v1, unsigned long v2) 1196 { 1197 unsigned char perm, idx; 1198 unsigned int i; 1199 1200 perm = 0; 1201 for (i = 0; i < 8; i++) { 1202 idx = (v1 >> (i * 8)) & 0xff; 1203 if (idx < 64) 1204 if (v2 & PPC_BIT(idx)) 1205 perm |= 1 << i; 1206 } 1207 op->val = perm; 1208 } 1209 #endif /* CONFIG_PPC64 */ 1210 /* 1211 * The size parameter adjusts the equivalent prty instruction. 1212 * prtyw = 32, prtyd = 64 1213 */ 1214 static nokprobe_inline void do_prty(const struct pt_regs *regs, 1215 struct instruction_op *op, 1216 unsigned long v, int size) 1217 { 1218 unsigned long long res = v ^ (v >> 8); 1219 1220 res ^= res >> 16; 1221 if (size == 32) { /* prtyw */ 1222 op->val = res & 0x0000000100000001ULL; 1223 return; 1224 } 1225 1226 res ^= res >> 32; 1227 op->val = res & 1; /*prtyd */ 1228 } 1229 1230 static nokprobe_inline int trap_compare(long v1, long v2) 1231 { 1232 int ret = 0; 1233 1234 if (v1 < v2) 1235 ret |= 0x10; 1236 else if (v1 > v2) 1237 ret |= 0x08; 1238 else 1239 ret |= 0x04; 1240 if ((unsigned long)v1 < (unsigned long)v2) 1241 ret |= 0x02; 1242 else if ((unsigned long)v1 > (unsigned long)v2) 1243 ret |= 0x01; 1244 return ret; 1245 } 1246 1247 /* 1248 * Elements of 32-bit rotate and mask instructions. 1249 */ 1250 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 1251 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 1252 #ifdef __powerpc64__ 1253 #define MASK64_L(mb) (~0UL >> (mb)) 1254 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 1255 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 1256 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 1257 #else 1258 #define DATA32(x) (x) 1259 #endif 1260 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 1261 1262 /* 1263 * Decode an instruction, and return information about it in *op 1264 * without changing *regs. 1265 * Integer arithmetic and logical instructions, branches, and barrier 1266 * instructions can be emulated just using the information in *op. 1267 * 1268 * Return value is 1 if the instruction can be emulated just by 1269 * updating *regs with the information in *op, -1 if we need the 1270 * GPRs but *regs doesn't contain the full register set, or 0 1271 * otherwise. 1272 */ 1273 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1274 struct ppc_inst instr) 1275 { 1276 #ifdef CONFIG_PPC64 1277 unsigned int suffixopcode, prefixtype, prefix_r; 1278 #endif 1279 unsigned int opcode, ra, rb, rc, rd, spr, u; 1280 unsigned long int imm; 1281 unsigned long int val, val2; 1282 unsigned int mb, me, sh; 1283 unsigned int word, suffix; 1284 long ival; 1285 1286 word = ppc_inst_val(instr); 1287 suffix = ppc_inst_suffix(instr); 1288 1289 op->type = COMPUTE; 1290 1291 opcode = ppc_inst_primary_opcode(instr); 1292 switch (opcode) { 1293 case 16: /* bc */ 1294 op->type = BRANCH; 1295 imm = (signed short)(word & 0xfffc); 1296 if ((word & 2) == 0) 1297 imm += regs->nip; 1298 op->val = truncate_if_32bit(regs->msr, imm); 1299 if (word & 1) 1300 op->type |= SETLK; 1301 if (branch_taken(word, regs, op)) 1302 op->type |= BRTAKEN; 1303 return 1; 1304 #ifdef CONFIG_PPC64 1305 case 17: /* sc */ 1306 if ((word & 0xfe2) == 2) 1307 op->type = SYSCALL; 1308 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && 1309 (word & 0xfe3) == 1) { /* scv */ 1310 op->type = SYSCALL_VECTORED_0; 1311 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1312 goto unknown_opcode; 1313 } else 1314 op->type = UNKNOWN; 1315 return 0; 1316 #endif 1317 case 18: /* b */ 1318 op->type = BRANCH | BRTAKEN; 1319 imm = word & 0x03fffffc; 1320 if (imm & 0x02000000) 1321 imm -= 0x04000000; 1322 if ((word & 2) == 0) 1323 imm += regs->nip; 1324 op->val = truncate_if_32bit(regs->msr, imm); 1325 if (word & 1) 1326 op->type |= SETLK; 1327 return 1; 1328 case 19: 1329 switch ((word >> 1) & 0x3ff) { 1330 case 0: /* mcrf */ 1331 op->type = COMPUTE + SETCC; 1332 rd = 7 - ((word >> 23) & 0x7); 1333 ra = 7 - ((word >> 18) & 0x7); 1334 rd *= 4; 1335 ra *= 4; 1336 val = (regs->ccr >> ra) & 0xf; 1337 op->ccval = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 1338 return 1; 1339 1340 case 16: /* bclr */ 1341 case 528: /* bcctr */ 1342 op->type = BRANCH; 1343 imm = (word & 0x400)? regs->ctr: regs->link; 1344 op->val = truncate_if_32bit(regs->msr, imm); 1345 if (word & 1) 1346 op->type |= SETLK; 1347 if (branch_taken(word, regs, op)) 1348 op->type |= BRTAKEN; 1349 return 1; 1350 1351 case 18: /* rfid, scary */ 1352 if (regs->msr & MSR_PR) 1353 goto priv; 1354 op->type = RFI; 1355 return 0; 1356 1357 case 150: /* isync */ 1358 op->type = BARRIER | BARRIER_ISYNC; 1359 return 1; 1360 1361 case 33: /* crnor */ 1362 case 129: /* crandc */ 1363 case 193: /* crxor */ 1364 case 225: /* crnand */ 1365 case 257: /* crand */ 1366 case 289: /* creqv */ 1367 case 417: /* crorc */ 1368 case 449: /* cror */ 1369 op->type = COMPUTE + SETCC; 1370 ra = (word >> 16) & 0x1f; 1371 rb = (word >> 11) & 0x1f; 1372 rd = (word >> 21) & 0x1f; 1373 ra = (regs->ccr >> (31 - ra)) & 1; 1374 rb = (regs->ccr >> (31 - rb)) & 1; 1375 val = (word >> (6 + ra * 2 + rb)) & 1; 1376 op->ccval = (regs->ccr & ~(1UL << (31 - rd))) | 1377 (val << (31 - rd)); 1378 return 1; 1379 } 1380 break; 1381 case 31: 1382 switch ((word >> 1) & 0x3ff) { 1383 case 598: /* sync */ 1384 op->type = BARRIER + BARRIER_SYNC; 1385 #ifdef __powerpc64__ 1386 switch ((word >> 21) & 3) { 1387 case 1: /* lwsync */ 1388 op->type = BARRIER + BARRIER_LWSYNC; 1389 break; 1390 case 2: /* ptesync */ 1391 op->type = BARRIER + BARRIER_PTESYNC; 1392 break; 1393 } 1394 #endif 1395 return 1; 1396 1397 case 854: /* eieio */ 1398 op->type = BARRIER + BARRIER_EIEIO; 1399 return 1; 1400 } 1401 break; 1402 } 1403 1404 rd = (word >> 21) & 0x1f; 1405 ra = (word >> 16) & 0x1f; 1406 rb = (word >> 11) & 0x1f; 1407 rc = (word >> 6) & 0x1f; 1408 1409 switch (opcode) { 1410 #ifdef __powerpc64__ 1411 case 1: 1412 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 1413 goto unknown_opcode; 1414 1415 prefix_r = GET_PREFIX_R(word); 1416 ra = GET_PREFIX_RA(suffix); 1417 rd = (suffix >> 21) & 0x1f; 1418 op->reg = rd; 1419 op->val = regs->gpr[rd]; 1420 suffixopcode = get_op(suffix); 1421 prefixtype = (word >> 24) & 0x3; 1422 switch (prefixtype) { 1423 case 2: 1424 if (prefix_r && ra) 1425 return 0; 1426 switch (suffixopcode) { 1427 case 14: /* paddi */ 1428 op->type = COMPUTE | PREFIXED; 1429 op->val = mlsd_8lsd_ea(word, suffix, regs); 1430 goto compute_done; 1431 } 1432 } 1433 break; 1434 case 2: /* tdi */ 1435 if (rd & trap_compare(regs->gpr[ra], (short) word)) 1436 goto trap; 1437 return 1; 1438 #endif 1439 case 3: /* twi */ 1440 if (rd & trap_compare((int)regs->gpr[ra], (short) word)) 1441 goto trap; 1442 return 1; 1443 1444 #ifdef __powerpc64__ 1445 case 4: 1446 /* 1447 * There are very many instructions with this primary opcode 1448 * introduced in the ISA as early as v2.03. However, the ones 1449 * we currently emulate were all introduced with ISA 3.0 1450 */ 1451 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1452 goto unknown_opcode; 1453 1454 switch (word & 0x3f) { 1455 case 48: /* maddhd */ 1456 asm volatile(PPC_MADDHD(%0, %1, %2, %3) : 1457 "=r" (op->val) : "r" (regs->gpr[ra]), 1458 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1459 goto compute_done; 1460 1461 case 49: /* maddhdu */ 1462 asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : 1463 "=r" (op->val) : "r" (regs->gpr[ra]), 1464 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1465 goto compute_done; 1466 1467 case 51: /* maddld */ 1468 asm volatile(PPC_MADDLD(%0, %1, %2, %3) : 1469 "=r" (op->val) : "r" (regs->gpr[ra]), 1470 "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); 1471 goto compute_done; 1472 } 1473 1474 /* 1475 * There are other instructions from ISA 3.0 with the same 1476 * primary opcode which do not have emulation support yet. 1477 */ 1478 goto unknown_opcode; 1479 #endif 1480 1481 case 7: /* mulli */ 1482 op->val = regs->gpr[ra] * (short) word; 1483 goto compute_done; 1484 1485 case 8: /* subfic */ 1486 imm = (short) word; 1487 add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1); 1488 return 1; 1489 1490 case 10: /* cmpli */ 1491 imm = (unsigned short) word; 1492 val = regs->gpr[ra]; 1493 #ifdef __powerpc64__ 1494 if ((rd & 1) == 0) 1495 val = (unsigned int) val; 1496 #endif 1497 do_cmp_unsigned(regs, op, val, imm, rd >> 2); 1498 return 1; 1499 1500 case 11: /* cmpi */ 1501 imm = (short) word; 1502 val = regs->gpr[ra]; 1503 #ifdef __powerpc64__ 1504 if ((rd & 1) == 0) 1505 val = (int) val; 1506 #endif 1507 do_cmp_signed(regs, op, val, imm, rd >> 2); 1508 return 1; 1509 1510 case 12: /* addic */ 1511 imm = (short) word; 1512 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1513 return 1; 1514 1515 case 13: /* addic. */ 1516 imm = (short) word; 1517 add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0); 1518 set_cr0(regs, op); 1519 return 1; 1520 1521 case 14: /* addi */ 1522 imm = (short) word; 1523 if (ra) 1524 imm += regs->gpr[ra]; 1525 op->val = imm; 1526 goto compute_done; 1527 1528 case 15: /* addis */ 1529 imm = ((short) word) << 16; 1530 if (ra) 1531 imm += regs->gpr[ra]; 1532 op->val = imm; 1533 goto compute_done; 1534 1535 case 19: 1536 if (((word >> 1) & 0x1f) == 2) { 1537 /* addpcis */ 1538 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1539 goto unknown_opcode; 1540 imm = (short) (word & 0xffc1); /* d0 + d2 fields */ 1541 imm |= (word >> 15) & 0x3e; /* d1 field */ 1542 op->val = regs->nip + (imm << 16) + 4; 1543 goto compute_done; 1544 } 1545 op->type = UNKNOWN; 1546 return 0; 1547 1548 case 20: /* rlwimi */ 1549 mb = (word >> 6) & 0x1f; 1550 me = (word >> 1) & 0x1f; 1551 val = DATA32(regs->gpr[rd]); 1552 imm = MASK32(mb, me); 1553 op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 1554 goto logical_done; 1555 1556 case 21: /* rlwinm */ 1557 mb = (word >> 6) & 0x1f; 1558 me = (word >> 1) & 0x1f; 1559 val = DATA32(regs->gpr[rd]); 1560 op->val = ROTATE(val, rb) & MASK32(mb, me); 1561 goto logical_done; 1562 1563 case 23: /* rlwnm */ 1564 mb = (word >> 6) & 0x1f; 1565 me = (word >> 1) & 0x1f; 1566 rb = regs->gpr[rb] & 0x1f; 1567 val = DATA32(regs->gpr[rd]); 1568 op->val = ROTATE(val, rb) & MASK32(mb, me); 1569 goto logical_done; 1570 1571 case 24: /* ori */ 1572 op->val = regs->gpr[rd] | (unsigned short) word; 1573 goto logical_done_nocc; 1574 1575 case 25: /* oris */ 1576 imm = (unsigned short) word; 1577 op->val = regs->gpr[rd] | (imm << 16); 1578 goto logical_done_nocc; 1579 1580 case 26: /* xori */ 1581 op->val = regs->gpr[rd] ^ (unsigned short) word; 1582 goto logical_done_nocc; 1583 1584 case 27: /* xoris */ 1585 imm = (unsigned short) word; 1586 op->val = regs->gpr[rd] ^ (imm << 16); 1587 goto logical_done_nocc; 1588 1589 case 28: /* andi. */ 1590 op->val = regs->gpr[rd] & (unsigned short) word; 1591 set_cr0(regs, op); 1592 goto logical_done_nocc; 1593 1594 case 29: /* andis. */ 1595 imm = (unsigned short) word; 1596 op->val = regs->gpr[rd] & (imm << 16); 1597 set_cr0(regs, op); 1598 goto logical_done_nocc; 1599 1600 #ifdef __powerpc64__ 1601 case 30: /* rld* */ 1602 mb = ((word >> 6) & 0x1f) | (word & 0x20); 1603 val = regs->gpr[rd]; 1604 if ((word & 0x10) == 0) { 1605 sh = rb | ((word & 2) << 4); 1606 val = ROTATE(val, sh); 1607 switch ((word >> 2) & 3) { 1608 case 0: /* rldicl */ 1609 val &= MASK64_L(mb); 1610 break; 1611 case 1: /* rldicr */ 1612 val &= MASK64_R(mb); 1613 break; 1614 case 2: /* rldic */ 1615 val &= MASK64(mb, 63 - sh); 1616 break; 1617 case 3: /* rldimi */ 1618 imm = MASK64(mb, 63 - sh); 1619 val = (regs->gpr[ra] & ~imm) | 1620 (val & imm); 1621 } 1622 op->val = val; 1623 goto logical_done; 1624 } else { 1625 sh = regs->gpr[rb] & 0x3f; 1626 val = ROTATE(val, sh); 1627 switch ((word >> 1) & 7) { 1628 case 0: /* rldcl */ 1629 op->val = val & MASK64_L(mb); 1630 goto logical_done; 1631 case 1: /* rldcr */ 1632 op->val = val & MASK64_R(mb); 1633 goto logical_done; 1634 } 1635 } 1636 #endif 1637 op->type = UNKNOWN; /* illegal instruction */ 1638 return 0; 1639 1640 case 31: 1641 /* isel occupies 32 minor opcodes */ 1642 if (((word >> 1) & 0x1f) == 15) { 1643 mb = (word >> 6) & 0x1f; /* bc field */ 1644 val = (regs->ccr >> (31 - mb)) & 1; 1645 val2 = (ra) ? regs->gpr[ra] : 0; 1646 1647 op->val = (val) ? val2 : regs->gpr[rb]; 1648 goto compute_done; 1649 } 1650 1651 switch ((word >> 1) & 0x3ff) { 1652 case 4: /* tw */ 1653 if (rd == 0x1f || 1654 (rd & trap_compare((int)regs->gpr[ra], 1655 (int)regs->gpr[rb]))) 1656 goto trap; 1657 return 1; 1658 #ifdef __powerpc64__ 1659 case 68: /* td */ 1660 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 1661 goto trap; 1662 return 1; 1663 #endif 1664 case 83: /* mfmsr */ 1665 if (regs->msr & MSR_PR) 1666 goto priv; 1667 op->type = MFMSR; 1668 op->reg = rd; 1669 return 0; 1670 case 146: /* mtmsr */ 1671 if (regs->msr & MSR_PR) 1672 goto priv; 1673 op->type = MTMSR; 1674 op->reg = rd; 1675 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 1676 return 0; 1677 #ifdef CONFIG_PPC64 1678 case 178: /* mtmsrd */ 1679 if (regs->msr & MSR_PR) 1680 goto priv; 1681 op->type = MTMSR; 1682 op->reg = rd; 1683 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 1684 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 1685 imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 1686 op->val = imm; 1687 return 0; 1688 #endif 1689 1690 case 19: /* mfcr */ 1691 imm = 0xffffffffUL; 1692 if ((word >> 20) & 1) { 1693 imm = 0xf0000000UL; 1694 for (sh = 0; sh < 8; ++sh) { 1695 if (word & (0x80000 >> sh)) 1696 break; 1697 imm >>= 4; 1698 } 1699 } 1700 op->val = regs->ccr & imm; 1701 goto compute_done; 1702 1703 case 128: /* setb */ 1704 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1705 goto unknown_opcode; 1706 /* 1707 * 'ra' encodes the CR field number (bfa) in the top 3 bits. 1708 * Since each CR field is 4 bits, 1709 * we can simply mask off the bottom two bits (bfa * 4) 1710 * to yield the first bit in the CR field. 1711 */ 1712 ra = ra & ~0x3; 1713 /* 'val' stores bits of the CR field (bfa) */ 1714 val = regs->ccr >> (CR0_SHIFT - ra); 1715 /* checks if the LT bit of CR field (bfa) is set */ 1716 if (val & 8) 1717 op->val = -1; 1718 /* checks if the GT bit of CR field (bfa) is set */ 1719 else if (val & 4) 1720 op->val = 1; 1721 else 1722 op->val = 0; 1723 goto compute_done; 1724 1725 case 144: /* mtcrf */ 1726 op->type = COMPUTE + SETCC; 1727 imm = 0xf0000000UL; 1728 val = regs->gpr[rd]; 1729 op->ccval = regs->ccr; 1730 for (sh = 0; sh < 8; ++sh) { 1731 if (word & (0x80000 >> sh)) 1732 op->ccval = (op->ccval & ~imm) | 1733 (val & imm); 1734 imm >>= 4; 1735 } 1736 return 1; 1737 1738 case 339: /* mfspr */ 1739 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1740 op->type = MFSPR; 1741 op->reg = rd; 1742 op->spr = spr; 1743 if (spr == SPRN_XER || spr == SPRN_LR || 1744 spr == SPRN_CTR) 1745 return 1; 1746 return 0; 1747 1748 case 467: /* mtspr */ 1749 spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0); 1750 op->type = MTSPR; 1751 op->val = regs->gpr[rd]; 1752 op->spr = spr; 1753 if (spr == SPRN_XER || spr == SPRN_LR || 1754 spr == SPRN_CTR) 1755 return 1; 1756 return 0; 1757 1758 /* 1759 * Compare instructions 1760 */ 1761 case 0: /* cmp */ 1762 val = regs->gpr[ra]; 1763 val2 = regs->gpr[rb]; 1764 #ifdef __powerpc64__ 1765 if ((rd & 1) == 0) { 1766 /* word (32-bit) compare */ 1767 val = (int) val; 1768 val2 = (int) val2; 1769 } 1770 #endif 1771 do_cmp_signed(regs, op, val, val2, rd >> 2); 1772 return 1; 1773 1774 case 32: /* cmpl */ 1775 val = regs->gpr[ra]; 1776 val2 = regs->gpr[rb]; 1777 #ifdef __powerpc64__ 1778 if ((rd & 1) == 0) { 1779 /* word (32-bit) compare */ 1780 val = (unsigned int) val; 1781 val2 = (unsigned int) val2; 1782 } 1783 #endif 1784 do_cmp_unsigned(regs, op, val, val2, rd >> 2); 1785 return 1; 1786 1787 case 508: /* cmpb */ 1788 do_cmpb(regs, op, regs->gpr[rd], regs->gpr[rb]); 1789 goto logical_done_nocc; 1790 1791 /* 1792 * Arithmetic instructions 1793 */ 1794 case 8: /* subfc */ 1795 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1796 regs->gpr[rb], 1); 1797 goto arith_done; 1798 #ifdef __powerpc64__ 1799 case 9: /* mulhdu */ 1800 asm("mulhdu %0,%1,%2" : "=r" (op->val) : 1801 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1802 goto arith_done; 1803 #endif 1804 case 10: /* addc */ 1805 add_with_carry(regs, op, rd, regs->gpr[ra], 1806 regs->gpr[rb], 0); 1807 goto arith_done; 1808 1809 case 11: /* mulhwu */ 1810 asm("mulhwu %0,%1,%2" : "=r" (op->val) : 1811 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1812 goto arith_done; 1813 1814 case 40: /* subf */ 1815 op->val = regs->gpr[rb] - regs->gpr[ra]; 1816 goto arith_done; 1817 #ifdef __powerpc64__ 1818 case 73: /* mulhd */ 1819 asm("mulhd %0,%1,%2" : "=r" (op->val) : 1820 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1821 goto arith_done; 1822 #endif 1823 case 75: /* mulhw */ 1824 asm("mulhw %0,%1,%2" : "=r" (op->val) : 1825 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1826 goto arith_done; 1827 1828 case 104: /* neg */ 1829 op->val = -regs->gpr[ra]; 1830 goto arith_done; 1831 1832 case 136: /* subfe */ 1833 add_with_carry(regs, op, rd, ~regs->gpr[ra], 1834 regs->gpr[rb], regs->xer & XER_CA); 1835 goto arith_done; 1836 1837 case 138: /* adde */ 1838 add_with_carry(regs, op, rd, regs->gpr[ra], 1839 regs->gpr[rb], regs->xer & XER_CA); 1840 goto arith_done; 1841 1842 case 200: /* subfze */ 1843 add_with_carry(regs, op, rd, ~regs->gpr[ra], 0L, 1844 regs->xer & XER_CA); 1845 goto arith_done; 1846 1847 case 202: /* addze */ 1848 add_with_carry(regs, op, rd, regs->gpr[ra], 0L, 1849 regs->xer & XER_CA); 1850 goto arith_done; 1851 1852 case 232: /* subfme */ 1853 add_with_carry(regs, op, rd, ~regs->gpr[ra], -1L, 1854 regs->xer & XER_CA); 1855 goto arith_done; 1856 #ifdef __powerpc64__ 1857 case 233: /* mulld */ 1858 op->val = regs->gpr[ra] * regs->gpr[rb]; 1859 goto arith_done; 1860 #endif 1861 case 234: /* addme */ 1862 add_with_carry(regs, op, rd, regs->gpr[ra], -1L, 1863 regs->xer & XER_CA); 1864 goto arith_done; 1865 1866 case 235: /* mullw */ 1867 op->val = (long)(int) regs->gpr[ra] * 1868 (int) regs->gpr[rb]; 1869 1870 goto arith_done; 1871 #ifdef __powerpc64__ 1872 case 265: /* modud */ 1873 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1874 goto unknown_opcode; 1875 op->val = regs->gpr[ra] % regs->gpr[rb]; 1876 goto compute_done; 1877 #endif 1878 case 266: /* add */ 1879 op->val = regs->gpr[ra] + regs->gpr[rb]; 1880 goto arith_done; 1881 1882 case 267: /* moduw */ 1883 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1884 goto unknown_opcode; 1885 op->val = (unsigned int) regs->gpr[ra] % 1886 (unsigned int) regs->gpr[rb]; 1887 goto compute_done; 1888 #ifdef __powerpc64__ 1889 case 457: /* divdu */ 1890 op->val = regs->gpr[ra] / regs->gpr[rb]; 1891 goto arith_done; 1892 #endif 1893 case 459: /* divwu */ 1894 op->val = (unsigned int) regs->gpr[ra] / 1895 (unsigned int) regs->gpr[rb]; 1896 goto arith_done; 1897 #ifdef __powerpc64__ 1898 case 489: /* divd */ 1899 op->val = (long int) regs->gpr[ra] / 1900 (long int) regs->gpr[rb]; 1901 goto arith_done; 1902 #endif 1903 case 491: /* divw */ 1904 op->val = (int) regs->gpr[ra] / 1905 (int) regs->gpr[rb]; 1906 goto arith_done; 1907 #ifdef __powerpc64__ 1908 case 425: /* divde[.] */ 1909 asm volatile(PPC_DIVDE(%0, %1, %2) : 1910 "=r" (op->val) : "r" (regs->gpr[ra]), 1911 "r" (regs->gpr[rb])); 1912 goto arith_done; 1913 case 393: /* divdeu[.] */ 1914 asm volatile(PPC_DIVDEU(%0, %1, %2) : 1915 "=r" (op->val) : "r" (regs->gpr[ra]), 1916 "r" (regs->gpr[rb])); 1917 goto arith_done; 1918 #endif 1919 case 755: /* darn */ 1920 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1921 goto unknown_opcode; 1922 switch (ra & 0x3) { 1923 case 0: 1924 /* 32-bit conditioned */ 1925 asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); 1926 goto compute_done; 1927 1928 case 1: 1929 /* 64-bit conditioned */ 1930 asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); 1931 goto compute_done; 1932 1933 case 2: 1934 /* 64-bit raw */ 1935 asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); 1936 goto compute_done; 1937 } 1938 1939 goto unknown_opcode; 1940 #ifdef __powerpc64__ 1941 case 777: /* modsd */ 1942 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1943 goto unknown_opcode; 1944 op->val = (long int) regs->gpr[ra] % 1945 (long int) regs->gpr[rb]; 1946 goto compute_done; 1947 #endif 1948 case 779: /* modsw */ 1949 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1950 goto unknown_opcode; 1951 op->val = (int) regs->gpr[ra] % 1952 (int) regs->gpr[rb]; 1953 goto compute_done; 1954 1955 1956 /* 1957 * Logical instructions 1958 */ 1959 case 26: /* cntlzw */ 1960 val = (unsigned int) regs->gpr[rd]; 1961 op->val = ( val ? __builtin_clz(val) : 32 ); 1962 goto logical_done; 1963 #ifdef __powerpc64__ 1964 case 58: /* cntlzd */ 1965 val = regs->gpr[rd]; 1966 op->val = ( val ? __builtin_clzl(val) : 64 ); 1967 goto logical_done; 1968 #endif 1969 case 28: /* and */ 1970 op->val = regs->gpr[rd] & regs->gpr[rb]; 1971 goto logical_done; 1972 1973 case 60: /* andc */ 1974 op->val = regs->gpr[rd] & ~regs->gpr[rb]; 1975 goto logical_done; 1976 1977 case 122: /* popcntb */ 1978 do_popcnt(regs, op, regs->gpr[rd], 8); 1979 goto logical_done_nocc; 1980 1981 case 124: /* nor */ 1982 op->val = ~(regs->gpr[rd] | regs->gpr[rb]); 1983 goto logical_done; 1984 1985 case 154: /* prtyw */ 1986 do_prty(regs, op, regs->gpr[rd], 32); 1987 goto logical_done_nocc; 1988 1989 case 186: /* prtyd */ 1990 do_prty(regs, op, regs->gpr[rd], 64); 1991 goto logical_done_nocc; 1992 #ifdef CONFIG_PPC64 1993 case 252: /* bpermd */ 1994 do_bpermd(regs, op, regs->gpr[rd], regs->gpr[rb]); 1995 goto logical_done_nocc; 1996 #endif 1997 case 284: /* xor */ 1998 op->val = ~(regs->gpr[rd] ^ regs->gpr[rb]); 1999 goto logical_done; 2000 2001 case 316: /* xor */ 2002 op->val = regs->gpr[rd] ^ regs->gpr[rb]; 2003 goto logical_done; 2004 2005 case 378: /* popcntw */ 2006 do_popcnt(regs, op, regs->gpr[rd], 32); 2007 goto logical_done_nocc; 2008 2009 case 412: /* orc */ 2010 op->val = regs->gpr[rd] | ~regs->gpr[rb]; 2011 goto logical_done; 2012 2013 case 444: /* or */ 2014 op->val = regs->gpr[rd] | regs->gpr[rb]; 2015 goto logical_done; 2016 2017 case 476: /* nand */ 2018 op->val = ~(regs->gpr[rd] & regs->gpr[rb]); 2019 goto logical_done; 2020 #ifdef CONFIG_PPC64 2021 case 506: /* popcntd */ 2022 do_popcnt(regs, op, regs->gpr[rd], 64); 2023 goto logical_done_nocc; 2024 #endif 2025 case 538: /* cnttzw */ 2026 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2027 goto unknown_opcode; 2028 val = (unsigned int) regs->gpr[rd]; 2029 op->val = (val ? __builtin_ctz(val) : 32); 2030 goto logical_done; 2031 #ifdef __powerpc64__ 2032 case 570: /* cnttzd */ 2033 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2034 goto unknown_opcode; 2035 val = regs->gpr[rd]; 2036 op->val = (val ? __builtin_ctzl(val) : 64); 2037 goto logical_done; 2038 #endif 2039 case 922: /* extsh */ 2040 op->val = (signed short) regs->gpr[rd]; 2041 goto logical_done; 2042 2043 case 954: /* extsb */ 2044 op->val = (signed char) regs->gpr[rd]; 2045 goto logical_done; 2046 #ifdef __powerpc64__ 2047 case 986: /* extsw */ 2048 op->val = (signed int) regs->gpr[rd]; 2049 goto logical_done; 2050 #endif 2051 2052 /* 2053 * Shift instructions 2054 */ 2055 case 24: /* slw */ 2056 sh = regs->gpr[rb] & 0x3f; 2057 if (sh < 32) 2058 op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; 2059 else 2060 op->val = 0; 2061 goto logical_done; 2062 2063 case 536: /* srw */ 2064 sh = regs->gpr[rb] & 0x3f; 2065 if (sh < 32) 2066 op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; 2067 else 2068 op->val = 0; 2069 goto logical_done; 2070 2071 case 792: /* sraw */ 2072 op->type = COMPUTE + SETREG + SETXER; 2073 sh = regs->gpr[rb] & 0x3f; 2074 ival = (signed int) regs->gpr[rd]; 2075 op->val = ival >> (sh < 32 ? sh : 31); 2076 op->xerval = regs->xer; 2077 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 2078 op->xerval |= XER_CA; 2079 else 2080 op->xerval &= ~XER_CA; 2081 set_ca32(op, op->xerval & XER_CA); 2082 goto logical_done; 2083 2084 case 824: /* srawi */ 2085 op->type = COMPUTE + SETREG + SETXER; 2086 sh = rb; 2087 ival = (signed int) regs->gpr[rd]; 2088 op->val = ival >> sh; 2089 op->xerval = regs->xer; 2090 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2091 op->xerval |= XER_CA; 2092 else 2093 op->xerval &= ~XER_CA; 2094 set_ca32(op, op->xerval & XER_CA); 2095 goto logical_done; 2096 2097 #ifdef __powerpc64__ 2098 case 27: /* sld */ 2099 sh = regs->gpr[rb] & 0x7f; 2100 if (sh < 64) 2101 op->val = regs->gpr[rd] << sh; 2102 else 2103 op->val = 0; 2104 goto logical_done; 2105 2106 case 539: /* srd */ 2107 sh = regs->gpr[rb] & 0x7f; 2108 if (sh < 64) 2109 op->val = regs->gpr[rd] >> sh; 2110 else 2111 op->val = 0; 2112 goto logical_done; 2113 2114 case 794: /* srad */ 2115 op->type = COMPUTE + SETREG + SETXER; 2116 sh = regs->gpr[rb] & 0x7f; 2117 ival = (signed long int) regs->gpr[rd]; 2118 op->val = ival >> (sh < 64 ? sh : 63); 2119 op->xerval = regs->xer; 2120 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 2121 op->xerval |= XER_CA; 2122 else 2123 op->xerval &= ~XER_CA; 2124 set_ca32(op, op->xerval & XER_CA); 2125 goto logical_done; 2126 2127 case 826: /* sradi with sh_5 = 0 */ 2128 case 827: /* sradi with sh_5 = 1 */ 2129 op->type = COMPUTE + SETREG + SETXER; 2130 sh = rb | ((word & 2) << 4); 2131 ival = (signed long int) regs->gpr[rd]; 2132 op->val = ival >> sh; 2133 op->xerval = regs->xer; 2134 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 2135 op->xerval |= XER_CA; 2136 else 2137 op->xerval &= ~XER_CA; 2138 set_ca32(op, op->xerval & XER_CA); 2139 goto logical_done; 2140 2141 case 890: /* extswsli with sh_5 = 0 */ 2142 case 891: /* extswsli with sh_5 = 1 */ 2143 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2144 goto unknown_opcode; 2145 op->type = COMPUTE + SETREG; 2146 sh = rb | ((word & 2) << 4); 2147 val = (signed int) regs->gpr[rd]; 2148 if (sh) 2149 op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); 2150 else 2151 op->val = val; 2152 goto logical_done; 2153 2154 #endif /* __powerpc64__ */ 2155 2156 /* 2157 * Cache instructions 2158 */ 2159 case 54: /* dcbst */ 2160 op->type = MKOP(CACHEOP, DCBST, 0); 2161 op->ea = xform_ea(word, regs); 2162 return 0; 2163 2164 case 86: /* dcbf */ 2165 op->type = MKOP(CACHEOP, DCBF, 0); 2166 op->ea = xform_ea(word, regs); 2167 return 0; 2168 2169 case 246: /* dcbtst */ 2170 op->type = MKOP(CACHEOP, DCBTST, 0); 2171 op->ea = xform_ea(word, regs); 2172 op->reg = rd; 2173 return 0; 2174 2175 case 278: /* dcbt */ 2176 op->type = MKOP(CACHEOP, DCBTST, 0); 2177 op->ea = xform_ea(word, regs); 2178 op->reg = rd; 2179 return 0; 2180 2181 case 982: /* icbi */ 2182 op->type = MKOP(CACHEOP, ICBI, 0); 2183 op->ea = xform_ea(word, regs); 2184 return 0; 2185 2186 case 1014: /* dcbz */ 2187 op->type = MKOP(CACHEOP, DCBZ, 0); 2188 op->ea = xform_ea(word, regs); 2189 return 0; 2190 } 2191 break; 2192 } 2193 2194 /* 2195 * Loads and stores. 2196 */ 2197 op->type = UNKNOWN; 2198 op->update_reg = ra; 2199 op->reg = rd; 2200 op->val = regs->gpr[rd]; 2201 u = (word >> 20) & UPDATE; 2202 op->vsx_flags = 0; 2203 2204 switch (opcode) { 2205 case 31: 2206 u = word & UPDATE; 2207 op->ea = xform_ea(word, regs); 2208 switch ((word >> 1) & 0x3ff) { 2209 case 20: /* lwarx */ 2210 op->type = MKOP(LARX, 0, 4); 2211 break; 2212 2213 case 150: /* stwcx. */ 2214 op->type = MKOP(STCX, 0, 4); 2215 break; 2216 2217 #ifdef __powerpc64__ 2218 case 84: /* ldarx */ 2219 op->type = MKOP(LARX, 0, 8); 2220 break; 2221 2222 case 214: /* stdcx. */ 2223 op->type = MKOP(STCX, 0, 8); 2224 break; 2225 2226 case 52: /* lbarx */ 2227 op->type = MKOP(LARX, 0, 1); 2228 break; 2229 2230 case 694: /* stbcx. */ 2231 op->type = MKOP(STCX, 0, 1); 2232 break; 2233 2234 case 116: /* lharx */ 2235 op->type = MKOP(LARX, 0, 2); 2236 break; 2237 2238 case 726: /* sthcx. */ 2239 op->type = MKOP(STCX, 0, 2); 2240 break; 2241 2242 case 276: /* lqarx */ 2243 if (!((rd & 1) || rd == ra || rd == rb)) 2244 op->type = MKOP(LARX, 0, 16); 2245 break; 2246 2247 case 182: /* stqcx. */ 2248 if (!(rd & 1)) 2249 op->type = MKOP(STCX, 0, 16); 2250 break; 2251 #endif 2252 2253 case 23: /* lwzx */ 2254 case 55: /* lwzux */ 2255 op->type = MKOP(LOAD, u, 4); 2256 break; 2257 2258 case 87: /* lbzx */ 2259 case 119: /* lbzux */ 2260 op->type = MKOP(LOAD, u, 1); 2261 break; 2262 2263 #ifdef CONFIG_ALTIVEC 2264 /* 2265 * Note: for the load/store vector element instructions, 2266 * bits of the EA say which field of the VMX register to use. 2267 */ 2268 case 7: /* lvebx */ 2269 op->type = MKOP(LOAD_VMX, 0, 1); 2270 op->element_size = 1; 2271 break; 2272 2273 case 39: /* lvehx */ 2274 op->type = MKOP(LOAD_VMX, 0, 2); 2275 op->element_size = 2; 2276 break; 2277 2278 case 71: /* lvewx */ 2279 op->type = MKOP(LOAD_VMX, 0, 4); 2280 op->element_size = 4; 2281 break; 2282 2283 case 103: /* lvx */ 2284 case 359: /* lvxl */ 2285 op->type = MKOP(LOAD_VMX, 0, 16); 2286 op->element_size = 16; 2287 break; 2288 2289 case 135: /* stvebx */ 2290 op->type = MKOP(STORE_VMX, 0, 1); 2291 op->element_size = 1; 2292 break; 2293 2294 case 167: /* stvehx */ 2295 op->type = MKOP(STORE_VMX, 0, 2); 2296 op->element_size = 2; 2297 break; 2298 2299 case 199: /* stvewx */ 2300 op->type = MKOP(STORE_VMX, 0, 4); 2301 op->element_size = 4; 2302 break; 2303 2304 case 231: /* stvx */ 2305 case 487: /* stvxl */ 2306 op->type = MKOP(STORE_VMX, 0, 16); 2307 break; 2308 #endif /* CONFIG_ALTIVEC */ 2309 2310 #ifdef __powerpc64__ 2311 case 21: /* ldx */ 2312 case 53: /* ldux */ 2313 op->type = MKOP(LOAD, u, 8); 2314 break; 2315 2316 case 149: /* stdx */ 2317 case 181: /* stdux */ 2318 op->type = MKOP(STORE, u, 8); 2319 break; 2320 #endif 2321 2322 case 151: /* stwx */ 2323 case 183: /* stwux */ 2324 op->type = MKOP(STORE, u, 4); 2325 break; 2326 2327 case 215: /* stbx */ 2328 case 247: /* stbux */ 2329 op->type = MKOP(STORE, u, 1); 2330 break; 2331 2332 case 279: /* lhzx */ 2333 case 311: /* lhzux */ 2334 op->type = MKOP(LOAD, u, 2); 2335 break; 2336 2337 #ifdef __powerpc64__ 2338 case 341: /* lwax */ 2339 case 373: /* lwaux */ 2340 op->type = MKOP(LOAD, SIGNEXT | u, 4); 2341 break; 2342 #endif 2343 2344 case 343: /* lhax */ 2345 case 375: /* lhaux */ 2346 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2347 break; 2348 2349 case 407: /* sthx */ 2350 case 439: /* sthux */ 2351 op->type = MKOP(STORE, u, 2); 2352 break; 2353 2354 #ifdef __powerpc64__ 2355 case 532: /* ldbrx */ 2356 op->type = MKOP(LOAD, BYTEREV, 8); 2357 break; 2358 2359 #endif 2360 case 533: /* lswx */ 2361 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 2362 break; 2363 2364 case 534: /* lwbrx */ 2365 op->type = MKOP(LOAD, BYTEREV, 4); 2366 break; 2367 2368 case 597: /* lswi */ 2369 if (rb == 0) 2370 rb = 32; /* # bytes to load */ 2371 op->type = MKOP(LOAD_MULTI, 0, rb); 2372 op->ea = ra ? regs->gpr[ra] : 0; 2373 break; 2374 2375 #ifdef CONFIG_PPC_FPU 2376 case 535: /* lfsx */ 2377 case 567: /* lfsux */ 2378 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2379 break; 2380 2381 case 599: /* lfdx */ 2382 case 631: /* lfdux */ 2383 op->type = MKOP(LOAD_FP, u, 8); 2384 break; 2385 2386 case 663: /* stfsx */ 2387 case 695: /* stfsux */ 2388 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2389 break; 2390 2391 case 727: /* stfdx */ 2392 case 759: /* stfdux */ 2393 op->type = MKOP(STORE_FP, u, 8); 2394 break; 2395 2396 #ifdef __powerpc64__ 2397 case 791: /* lfdpx */ 2398 op->type = MKOP(LOAD_FP, 0, 16); 2399 break; 2400 2401 case 855: /* lfiwax */ 2402 op->type = MKOP(LOAD_FP, SIGNEXT, 4); 2403 break; 2404 2405 case 887: /* lfiwzx */ 2406 op->type = MKOP(LOAD_FP, 0, 4); 2407 break; 2408 2409 case 919: /* stfdpx */ 2410 op->type = MKOP(STORE_FP, 0, 16); 2411 break; 2412 2413 case 983: /* stfiwx */ 2414 op->type = MKOP(STORE_FP, 0, 4); 2415 break; 2416 #endif /* __powerpc64 */ 2417 #endif /* CONFIG_PPC_FPU */ 2418 2419 #ifdef __powerpc64__ 2420 case 660: /* stdbrx */ 2421 op->type = MKOP(STORE, BYTEREV, 8); 2422 op->val = byterev_8(regs->gpr[rd]); 2423 break; 2424 2425 #endif 2426 case 661: /* stswx */ 2427 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 2428 break; 2429 2430 case 662: /* stwbrx */ 2431 op->type = MKOP(STORE, BYTEREV, 4); 2432 op->val = byterev_4(regs->gpr[rd]); 2433 break; 2434 2435 case 725: /* stswi */ 2436 if (rb == 0) 2437 rb = 32; /* # bytes to store */ 2438 op->type = MKOP(STORE_MULTI, 0, rb); 2439 op->ea = ra ? regs->gpr[ra] : 0; 2440 break; 2441 2442 case 790: /* lhbrx */ 2443 op->type = MKOP(LOAD, BYTEREV, 2); 2444 break; 2445 2446 case 918: /* sthbrx */ 2447 op->type = MKOP(STORE, BYTEREV, 2); 2448 op->val = byterev_2(regs->gpr[rd]); 2449 break; 2450 2451 #ifdef CONFIG_VSX 2452 case 12: /* lxsiwzx */ 2453 op->reg = rd | ((word & 1) << 5); 2454 op->type = MKOP(LOAD_VSX, 0, 4); 2455 op->element_size = 8; 2456 break; 2457 2458 case 76: /* lxsiwax */ 2459 op->reg = rd | ((word & 1) << 5); 2460 op->type = MKOP(LOAD_VSX, SIGNEXT, 4); 2461 op->element_size = 8; 2462 break; 2463 2464 case 140: /* stxsiwx */ 2465 op->reg = rd | ((word & 1) << 5); 2466 op->type = MKOP(STORE_VSX, 0, 4); 2467 op->element_size = 8; 2468 break; 2469 2470 case 268: /* lxvx */ 2471 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2472 goto unknown_opcode; 2473 op->reg = rd | ((word & 1) << 5); 2474 op->type = MKOP(LOAD_VSX, 0, 16); 2475 op->element_size = 16; 2476 op->vsx_flags = VSX_CHECK_VEC; 2477 break; 2478 2479 case 269: /* lxvl */ 2480 case 301: { /* lxvll */ 2481 int nb; 2482 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2483 goto unknown_opcode; 2484 op->reg = rd | ((word & 1) << 5); 2485 op->ea = ra ? regs->gpr[ra] : 0; 2486 nb = regs->gpr[rb] & 0xff; 2487 if (nb > 16) 2488 nb = 16; 2489 op->type = MKOP(LOAD_VSX, 0, nb); 2490 op->element_size = 16; 2491 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2492 VSX_CHECK_VEC; 2493 break; 2494 } 2495 case 332: /* lxvdsx */ 2496 op->reg = rd | ((word & 1) << 5); 2497 op->type = MKOP(LOAD_VSX, 0, 8); 2498 op->element_size = 8; 2499 op->vsx_flags = VSX_SPLAT; 2500 break; 2501 2502 case 333: /* lxvpx */ 2503 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2504 goto unknown_opcode; 2505 op->reg = VSX_REGISTER_XTP(rd); 2506 op->type = MKOP(LOAD_VSX, 0, 32); 2507 op->element_size = 32; 2508 break; 2509 2510 case 364: /* lxvwsx */ 2511 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2512 goto unknown_opcode; 2513 op->reg = rd | ((word & 1) << 5); 2514 op->type = MKOP(LOAD_VSX, 0, 4); 2515 op->element_size = 4; 2516 op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC; 2517 break; 2518 2519 case 396: /* stxvx */ 2520 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2521 goto unknown_opcode; 2522 op->reg = rd | ((word & 1) << 5); 2523 op->type = MKOP(STORE_VSX, 0, 16); 2524 op->element_size = 16; 2525 op->vsx_flags = VSX_CHECK_VEC; 2526 break; 2527 2528 case 397: /* stxvl */ 2529 case 429: { /* stxvll */ 2530 int nb; 2531 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2532 goto unknown_opcode; 2533 op->reg = rd | ((word & 1) << 5); 2534 op->ea = ra ? regs->gpr[ra] : 0; 2535 nb = regs->gpr[rb] & 0xff; 2536 if (nb > 16) 2537 nb = 16; 2538 op->type = MKOP(STORE_VSX, 0, nb); 2539 op->element_size = 16; 2540 op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) | 2541 VSX_CHECK_VEC; 2542 break; 2543 } 2544 case 461: /* stxvpx */ 2545 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2546 goto unknown_opcode; 2547 op->reg = VSX_REGISTER_XTP(rd); 2548 op->type = MKOP(STORE_VSX, 0, 32); 2549 op->element_size = 32; 2550 break; 2551 case 524: /* lxsspx */ 2552 op->reg = rd | ((word & 1) << 5); 2553 op->type = MKOP(LOAD_VSX, 0, 4); 2554 op->element_size = 8; 2555 op->vsx_flags = VSX_FPCONV; 2556 break; 2557 2558 case 588: /* lxsdx */ 2559 op->reg = rd | ((word & 1) << 5); 2560 op->type = MKOP(LOAD_VSX, 0, 8); 2561 op->element_size = 8; 2562 break; 2563 2564 case 652: /* stxsspx */ 2565 op->reg = rd | ((word & 1) << 5); 2566 op->type = MKOP(STORE_VSX, 0, 4); 2567 op->element_size = 8; 2568 op->vsx_flags = VSX_FPCONV; 2569 break; 2570 2571 case 716: /* stxsdx */ 2572 op->reg = rd | ((word & 1) << 5); 2573 op->type = MKOP(STORE_VSX, 0, 8); 2574 op->element_size = 8; 2575 break; 2576 2577 case 780: /* lxvw4x */ 2578 op->reg = rd | ((word & 1) << 5); 2579 op->type = MKOP(LOAD_VSX, 0, 16); 2580 op->element_size = 4; 2581 break; 2582 2583 case 781: /* lxsibzx */ 2584 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2585 goto unknown_opcode; 2586 op->reg = rd | ((word & 1) << 5); 2587 op->type = MKOP(LOAD_VSX, 0, 1); 2588 op->element_size = 8; 2589 op->vsx_flags = VSX_CHECK_VEC; 2590 break; 2591 2592 case 812: /* lxvh8x */ 2593 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2594 goto unknown_opcode; 2595 op->reg = rd | ((word & 1) << 5); 2596 op->type = MKOP(LOAD_VSX, 0, 16); 2597 op->element_size = 2; 2598 op->vsx_flags = VSX_CHECK_VEC; 2599 break; 2600 2601 case 813: /* lxsihzx */ 2602 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2603 goto unknown_opcode; 2604 op->reg = rd | ((word & 1) << 5); 2605 op->type = MKOP(LOAD_VSX, 0, 2); 2606 op->element_size = 8; 2607 op->vsx_flags = VSX_CHECK_VEC; 2608 break; 2609 2610 case 844: /* lxvd2x */ 2611 op->reg = rd | ((word & 1) << 5); 2612 op->type = MKOP(LOAD_VSX, 0, 16); 2613 op->element_size = 8; 2614 break; 2615 2616 case 876: /* lxvb16x */ 2617 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2618 goto unknown_opcode; 2619 op->reg = rd | ((word & 1) << 5); 2620 op->type = MKOP(LOAD_VSX, 0, 16); 2621 op->element_size = 1; 2622 op->vsx_flags = VSX_CHECK_VEC; 2623 break; 2624 2625 case 908: /* stxvw4x */ 2626 op->reg = rd | ((word & 1) << 5); 2627 op->type = MKOP(STORE_VSX, 0, 16); 2628 op->element_size = 4; 2629 break; 2630 2631 case 909: /* stxsibx */ 2632 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2633 goto unknown_opcode; 2634 op->reg = rd | ((word & 1) << 5); 2635 op->type = MKOP(STORE_VSX, 0, 1); 2636 op->element_size = 8; 2637 op->vsx_flags = VSX_CHECK_VEC; 2638 break; 2639 2640 case 940: /* stxvh8x */ 2641 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2642 goto unknown_opcode; 2643 op->reg = rd | ((word & 1) << 5); 2644 op->type = MKOP(STORE_VSX, 0, 16); 2645 op->element_size = 2; 2646 op->vsx_flags = VSX_CHECK_VEC; 2647 break; 2648 2649 case 941: /* stxsihx */ 2650 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2651 goto unknown_opcode; 2652 op->reg = rd | ((word & 1) << 5); 2653 op->type = MKOP(STORE_VSX, 0, 2); 2654 op->element_size = 8; 2655 op->vsx_flags = VSX_CHECK_VEC; 2656 break; 2657 2658 case 972: /* stxvd2x */ 2659 op->reg = rd | ((word & 1) << 5); 2660 op->type = MKOP(STORE_VSX, 0, 16); 2661 op->element_size = 8; 2662 break; 2663 2664 case 1004: /* stxvb16x */ 2665 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2666 goto unknown_opcode; 2667 op->reg = rd | ((word & 1) << 5); 2668 op->type = MKOP(STORE_VSX, 0, 16); 2669 op->element_size = 1; 2670 op->vsx_flags = VSX_CHECK_VEC; 2671 break; 2672 2673 #endif /* CONFIG_VSX */ 2674 } 2675 break; 2676 2677 case 32: /* lwz */ 2678 case 33: /* lwzu */ 2679 op->type = MKOP(LOAD, u, 4); 2680 op->ea = dform_ea(word, regs); 2681 break; 2682 2683 case 34: /* lbz */ 2684 case 35: /* lbzu */ 2685 op->type = MKOP(LOAD, u, 1); 2686 op->ea = dform_ea(word, regs); 2687 break; 2688 2689 case 36: /* stw */ 2690 case 37: /* stwu */ 2691 op->type = MKOP(STORE, u, 4); 2692 op->ea = dform_ea(word, regs); 2693 break; 2694 2695 case 38: /* stb */ 2696 case 39: /* stbu */ 2697 op->type = MKOP(STORE, u, 1); 2698 op->ea = dform_ea(word, regs); 2699 break; 2700 2701 case 40: /* lhz */ 2702 case 41: /* lhzu */ 2703 op->type = MKOP(LOAD, u, 2); 2704 op->ea = dform_ea(word, regs); 2705 break; 2706 2707 case 42: /* lha */ 2708 case 43: /* lhau */ 2709 op->type = MKOP(LOAD, SIGNEXT | u, 2); 2710 op->ea = dform_ea(word, regs); 2711 break; 2712 2713 case 44: /* sth */ 2714 case 45: /* sthu */ 2715 op->type = MKOP(STORE, u, 2); 2716 op->ea = dform_ea(word, regs); 2717 break; 2718 2719 case 46: /* lmw */ 2720 if (ra >= rd) 2721 break; /* invalid form, ra in range to load */ 2722 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 2723 op->ea = dform_ea(word, regs); 2724 break; 2725 2726 case 47: /* stmw */ 2727 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 2728 op->ea = dform_ea(word, regs); 2729 break; 2730 2731 #ifdef CONFIG_PPC_FPU 2732 case 48: /* lfs */ 2733 case 49: /* lfsu */ 2734 op->type = MKOP(LOAD_FP, u | FPCONV, 4); 2735 op->ea = dform_ea(word, regs); 2736 break; 2737 2738 case 50: /* lfd */ 2739 case 51: /* lfdu */ 2740 op->type = MKOP(LOAD_FP, u, 8); 2741 op->ea = dform_ea(word, regs); 2742 break; 2743 2744 case 52: /* stfs */ 2745 case 53: /* stfsu */ 2746 op->type = MKOP(STORE_FP, u | FPCONV, 4); 2747 op->ea = dform_ea(word, regs); 2748 break; 2749 2750 case 54: /* stfd */ 2751 case 55: /* stfdu */ 2752 op->type = MKOP(STORE_FP, u, 8); 2753 op->ea = dform_ea(word, regs); 2754 break; 2755 #endif 2756 2757 #ifdef __powerpc64__ 2758 case 56: /* lq */ 2759 if (!((rd & 1) || (rd == ra))) 2760 op->type = MKOP(LOAD, 0, 16); 2761 op->ea = dqform_ea(word, regs); 2762 break; 2763 #endif 2764 2765 #ifdef CONFIG_VSX 2766 case 57: /* lfdp, lxsd, lxssp */ 2767 op->ea = dsform_ea(word, regs); 2768 switch (word & 3) { 2769 case 0: /* lfdp */ 2770 if (rd & 1) 2771 break; /* reg must be even */ 2772 op->type = MKOP(LOAD_FP, 0, 16); 2773 break; 2774 case 2: /* lxsd */ 2775 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2776 goto unknown_opcode; 2777 op->reg = rd + 32; 2778 op->type = MKOP(LOAD_VSX, 0, 8); 2779 op->element_size = 8; 2780 op->vsx_flags = VSX_CHECK_VEC; 2781 break; 2782 case 3: /* lxssp */ 2783 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2784 goto unknown_opcode; 2785 op->reg = rd + 32; 2786 op->type = MKOP(LOAD_VSX, 0, 4); 2787 op->element_size = 8; 2788 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2789 break; 2790 } 2791 break; 2792 #endif /* CONFIG_VSX */ 2793 2794 #ifdef __powerpc64__ 2795 case 58: /* ld[u], lwa */ 2796 op->ea = dsform_ea(word, regs); 2797 switch (word & 3) { 2798 case 0: /* ld */ 2799 op->type = MKOP(LOAD, 0, 8); 2800 break; 2801 case 1: /* ldu */ 2802 op->type = MKOP(LOAD, UPDATE, 8); 2803 break; 2804 case 2: /* lwa */ 2805 op->type = MKOP(LOAD, SIGNEXT, 4); 2806 break; 2807 } 2808 break; 2809 #endif 2810 2811 #ifdef CONFIG_VSX 2812 case 6: 2813 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2814 goto unknown_opcode; 2815 op->ea = dqform_ea(word, regs); 2816 op->reg = VSX_REGISTER_XTP(rd); 2817 op->element_size = 32; 2818 switch (word & 0xf) { 2819 case 0: /* lxvp */ 2820 op->type = MKOP(LOAD_VSX, 0, 32); 2821 break; 2822 case 1: /* stxvp */ 2823 op->type = MKOP(STORE_VSX, 0, 32); 2824 break; 2825 } 2826 break; 2827 2828 case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ 2829 switch (word & 7) { 2830 case 0: /* stfdp with LSB of DS field = 0 */ 2831 case 4: /* stfdp with LSB of DS field = 1 */ 2832 op->ea = dsform_ea(word, regs); 2833 op->type = MKOP(STORE_FP, 0, 16); 2834 break; 2835 2836 case 1: /* lxv */ 2837 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2838 goto unknown_opcode; 2839 op->ea = dqform_ea(word, regs); 2840 if (word & 8) 2841 op->reg = rd + 32; 2842 op->type = MKOP(LOAD_VSX, 0, 16); 2843 op->element_size = 16; 2844 op->vsx_flags = VSX_CHECK_VEC; 2845 break; 2846 2847 case 2: /* stxsd with LSB of DS field = 0 */ 2848 case 6: /* stxsd with LSB of DS field = 1 */ 2849 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2850 goto unknown_opcode; 2851 op->ea = dsform_ea(word, regs); 2852 op->reg = rd + 32; 2853 op->type = MKOP(STORE_VSX, 0, 8); 2854 op->element_size = 8; 2855 op->vsx_flags = VSX_CHECK_VEC; 2856 break; 2857 2858 case 3: /* stxssp with LSB of DS field = 0 */ 2859 case 7: /* stxssp with LSB of DS field = 1 */ 2860 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2861 goto unknown_opcode; 2862 op->ea = dsform_ea(word, regs); 2863 op->reg = rd + 32; 2864 op->type = MKOP(STORE_VSX, 0, 4); 2865 op->element_size = 8; 2866 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2867 break; 2868 2869 case 5: /* stxv */ 2870 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 2871 goto unknown_opcode; 2872 op->ea = dqform_ea(word, regs); 2873 if (word & 8) 2874 op->reg = rd + 32; 2875 op->type = MKOP(STORE_VSX, 0, 16); 2876 op->element_size = 16; 2877 op->vsx_flags = VSX_CHECK_VEC; 2878 break; 2879 } 2880 break; 2881 #endif /* CONFIG_VSX */ 2882 2883 #ifdef __powerpc64__ 2884 case 62: /* std[u] */ 2885 op->ea = dsform_ea(word, regs); 2886 switch (word & 3) { 2887 case 0: /* std */ 2888 op->type = MKOP(STORE, 0, 8); 2889 break; 2890 case 1: /* stdu */ 2891 op->type = MKOP(STORE, UPDATE, 8); 2892 break; 2893 case 2: /* stq */ 2894 if (!(rd & 1)) 2895 op->type = MKOP(STORE, 0, 16); 2896 break; 2897 } 2898 break; 2899 case 1: /* Prefixed instructions */ 2900 if (!cpu_has_feature(CPU_FTR_ARCH_31)) 2901 goto unknown_opcode; 2902 2903 prefix_r = GET_PREFIX_R(word); 2904 ra = GET_PREFIX_RA(suffix); 2905 op->update_reg = ra; 2906 rd = (suffix >> 21) & 0x1f; 2907 op->reg = rd; 2908 op->val = regs->gpr[rd]; 2909 2910 suffixopcode = get_op(suffix); 2911 prefixtype = (word >> 24) & 0x3; 2912 switch (prefixtype) { 2913 case 0: /* Type 00 Eight-Byte Load/Store */ 2914 if (prefix_r && ra) 2915 break; 2916 op->ea = mlsd_8lsd_ea(word, suffix, regs); 2917 switch (suffixopcode) { 2918 case 41: /* plwa */ 2919 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); 2920 break; 2921 #ifdef CONFIG_VSX 2922 case 42: /* plxsd */ 2923 op->reg = rd + 32; 2924 op->type = MKOP(LOAD_VSX, PREFIXED, 8); 2925 op->element_size = 8; 2926 op->vsx_flags = VSX_CHECK_VEC; 2927 break; 2928 case 43: /* plxssp */ 2929 op->reg = rd + 32; 2930 op->type = MKOP(LOAD_VSX, PREFIXED, 4); 2931 op->element_size = 8; 2932 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2933 break; 2934 case 46: /* pstxsd */ 2935 op->reg = rd + 32; 2936 op->type = MKOP(STORE_VSX, PREFIXED, 8); 2937 op->element_size = 8; 2938 op->vsx_flags = VSX_CHECK_VEC; 2939 break; 2940 case 47: /* pstxssp */ 2941 op->reg = rd + 32; 2942 op->type = MKOP(STORE_VSX, PREFIXED, 4); 2943 op->element_size = 8; 2944 op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC; 2945 break; 2946 case 51: /* plxv1 */ 2947 op->reg += 32; 2948 fallthrough; 2949 case 50: /* plxv0 */ 2950 op->type = MKOP(LOAD_VSX, PREFIXED, 16); 2951 op->element_size = 16; 2952 op->vsx_flags = VSX_CHECK_VEC; 2953 break; 2954 case 55: /* pstxv1 */ 2955 op->reg = rd + 32; 2956 fallthrough; 2957 case 54: /* pstxv0 */ 2958 op->type = MKOP(STORE_VSX, PREFIXED, 16); 2959 op->element_size = 16; 2960 op->vsx_flags = VSX_CHECK_VEC; 2961 break; 2962 #endif /* CONFIG_VSX */ 2963 case 56: /* plq */ 2964 op->type = MKOP(LOAD, PREFIXED, 16); 2965 break; 2966 case 57: /* pld */ 2967 op->type = MKOP(LOAD, PREFIXED, 8); 2968 break; 2969 #ifdef CONFIG_VSX 2970 case 58: /* plxvp */ 2971 op->reg = VSX_REGISTER_XTP(rd); 2972 op->type = MKOP(LOAD_VSX, PREFIXED, 32); 2973 op->element_size = 32; 2974 break; 2975 #endif /* CONFIG_VSX */ 2976 case 60: /* pstq */ 2977 op->type = MKOP(STORE, PREFIXED, 16); 2978 break; 2979 case 61: /* pstd */ 2980 op->type = MKOP(STORE, PREFIXED, 8); 2981 break; 2982 #ifdef CONFIG_VSX 2983 case 62: /* pstxvp */ 2984 op->reg = VSX_REGISTER_XTP(rd); 2985 op->type = MKOP(STORE_VSX, PREFIXED, 32); 2986 op->element_size = 32; 2987 break; 2988 #endif /* CONFIG_VSX */ 2989 } 2990 break; 2991 case 1: /* Type 01 Eight-Byte Register-to-Register */ 2992 break; 2993 case 2: /* Type 10 Modified Load/Store */ 2994 if (prefix_r && ra) 2995 break; 2996 op->ea = mlsd_8lsd_ea(word, suffix, regs); 2997 switch (suffixopcode) { 2998 case 32: /* plwz */ 2999 op->type = MKOP(LOAD, PREFIXED, 4); 3000 break; 3001 case 34: /* plbz */ 3002 op->type = MKOP(LOAD, PREFIXED, 1); 3003 break; 3004 case 36: /* pstw */ 3005 op->type = MKOP(STORE, PREFIXED, 4); 3006 break; 3007 case 38: /* pstb */ 3008 op->type = MKOP(STORE, PREFIXED, 1); 3009 break; 3010 case 40: /* plhz */ 3011 op->type = MKOP(LOAD, PREFIXED, 2); 3012 break; 3013 case 42: /* plha */ 3014 op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2); 3015 break; 3016 case 44: /* psth */ 3017 op->type = MKOP(STORE, PREFIXED, 2); 3018 break; 3019 case 48: /* plfs */ 3020 op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4); 3021 break; 3022 case 50: /* plfd */ 3023 op->type = MKOP(LOAD_FP, PREFIXED, 8); 3024 break; 3025 case 52: /* pstfs */ 3026 op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4); 3027 break; 3028 case 54: /* pstfd */ 3029 op->type = MKOP(STORE_FP, PREFIXED, 8); 3030 break; 3031 } 3032 break; 3033 case 3: /* Type 11 Modified Register-to-Register */ 3034 break; 3035 } 3036 #endif /* __powerpc64__ */ 3037 3038 } 3039 3040 if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { 3041 switch (GETTYPE(op->type)) { 3042 case LOAD: 3043 if (ra == rd) 3044 goto unknown_opcode; 3045 fallthrough; 3046 case STORE: 3047 case LOAD_FP: 3048 case STORE_FP: 3049 if (ra == 0) 3050 goto unknown_opcode; 3051 } 3052 } 3053 3054 #ifdef CONFIG_VSX 3055 if ((GETTYPE(op->type) == LOAD_VSX || 3056 GETTYPE(op->type) == STORE_VSX) && 3057 !cpu_has_feature(CPU_FTR_VSX)) { 3058 return -1; 3059 } 3060 #endif /* CONFIG_VSX */ 3061 3062 return 0; 3063 3064 unknown_opcode: 3065 op->type = UNKNOWN; 3066 return 0; 3067 3068 logical_done: 3069 if (word & 1) 3070 set_cr0(regs, op); 3071 logical_done_nocc: 3072 op->reg = ra; 3073 op->type |= SETREG; 3074 return 1; 3075 3076 arith_done: 3077 if (word & 1) 3078 set_cr0(regs, op); 3079 compute_done: 3080 op->reg = rd; 3081 op->type |= SETREG; 3082 return 1; 3083 3084 priv: 3085 op->type = INTERRUPT | 0x700; 3086 op->val = SRR1_PROGPRIV; 3087 return 0; 3088 3089 trap: 3090 op->type = INTERRUPT | 0x700; 3091 op->val = SRR1_PROGTRAP; 3092 return 0; 3093 } 3094 EXPORT_SYMBOL_GPL(analyse_instr); 3095 NOKPROBE_SYMBOL(analyse_instr); 3096 3097 /* 3098 * For PPC32 we always use stwu with r1 to change the stack pointer. 3099 * So this emulated store may corrupt the exception frame, now we 3100 * have to provide the exception frame trampoline, which is pushed 3101 * below the kprobed function stack. So we only update gpr[1] but 3102 * don't emulate the real store operation. We will do real store 3103 * operation safely in exception return code by checking this flag. 3104 */ 3105 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) 3106 { 3107 /* 3108 * Check if we already set since that means we'll 3109 * lose the previous value. 3110 */ 3111 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 3112 set_thread_flag(TIF_EMULATE_STACK_STORE); 3113 return 0; 3114 } 3115 3116 static nokprobe_inline void do_signext(unsigned long *valp, int size) 3117 { 3118 switch (size) { 3119 case 2: 3120 *valp = (signed short) *valp; 3121 break; 3122 case 4: 3123 *valp = (signed int) *valp; 3124 break; 3125 } 3126 } 3127 3128 static nokprobe_inline void do_byterev(unsigned long *valp, int size) 3129 { 3130 switch (size) { 3131 case 2: 3132 *valp = byterev_2(*valp); 3133 break; 3134 case 4: 3135 *valp = byterev_4(*valp); 3136 break; 3137 #ifdef __powerpc64__ 3138 case 8: 3139 *valp = byterev_8(*valp); 3140 break; 3141 #endif 3142 } 3143 } 3144 3145 /* 3146 * Emulate an instruction that can be executed just by updating 3147 * fields in *regs. 3148 */ 3149 void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) 3150 { 3151 unsigned long next_pc; 3152 3153 next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type)); 3154 switch (GETTYPE(op->type)) { 3155 case COMPUTE: 3156 if (op->type & SETREG) 3157 regs->gpr[op->reg] = op->val; 3158 if (op->type & SETCC) 3159 regs->ccr = op->ccval; 3160 if (op->type & SETXER) 3161 regs->xer = op->xerval; 3162 break; 3163 3164 case BRANCH: 3165 if (op->type & SETLK) 3166 regs->link = next_pc; 3167 if (op->type & BRTAKEN) 3168 next_pc = op->val; 3169 if (op->type & DECCTR) 3170 --regs->ctr; 3171 break; 3172 3173 case BARRIER: 3174 switch (op->type & BARRIER_MASK) { 3175 case BARRIER_SYNC: 3176 mb(); 3177 break; 3178 case BARRIER_ISYNC: 3179 isync(); 3180 break; 3181 case BARRIER_EIEIO: 3182 eieio(); 3183 break; 3184 case BARRIER_LWSYNC: 3185 asm volatile("lwsync" : : : "memory"); 3186 break; 3187 case BARRIER_PTESYNC: 3188 asm volatile("ptesync" : : : "memory"); 3189 break; 3190 } 3191 break; 3192 3193 case MFSPR: 3194 switch (op->spr) { 3195 case SPRN_XER: 3196 regs->gpr[op->reg] = regs->xer & 0xffffffffUL; 3197 break; 3198 case SPRN_LR: 3199 regs->gpr[op->reg] = regs->link; 3200 break; 3201 case SPRN_CTR: 3202 regs->gpr[op->reg] = regs->ctr; 3203 break; 3204 default: 3205 WARN_ON_ONCE(1); 3206 } 3207 break; 3208 3209 case MTSPR: 3210 switch (op->spr) { 3211 case SPRN_XER: 3212 regs->xer = op->val & 0xffffffffUL; 3213 break; 3214 case SPRN_LR: 3215 regs->link = op->val; 3216 break; 3217 case SPRN_CTR: 3218 regs->ctr = op->val; 3219 break; 3220 default: 3221 WARN_ON_ONCE(1); 3222 } 3223 break; 3224 3225 default: 3226 WARN_ON_ONCE(1); 3227 } 3228 regs_set_return_ip(regs, next_pc); 3229 } 3230 NOKPROBE_SYMBOL(emulate_update_regs); 3231 3232 /* 3233 * Emulate a previously-analysed load or store instruction. 3234 * Return values are: 3235 * 0 = instruction emulated successfully 3236 * -EFAULT = address out of range or access faulted (regs->dar 3237 * contains the faulting address) 3238 * -EACCES = misaligned access, instruction requires alignment 3239 * -EINVAL = unknown operation in *op 3240 */ 3241 int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) 3242 { 3243 int err, size, type; 3244 int i, rd, nb; 3245 unsigned int cr; 3246 unsigned long val; 3247 unsigned long ea; 3248 bool cross_endian; 3249 3250 err = 0; 3251 size = GETSIZE(op->type); 3252 type = GETTYPE(op->type); 3253 cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 3254 ea = truncate_if_32bit(regs->msr, op->ea); 3255 3256 switch (type) { 3257 case LARX: 3258 if (ea & (size - 1)) 3259 return -EACCES; /* can't handle misaligned */ 3260 if (!address_ok(regs, ea, size)) 3261 return -EFAULT; 3262 err = 0; 3263 val = 0; 3264 switch (size) { 3265 #ifdef __powerpc64__ 3266 case 1: 3267 __get_user_asmx(val, ea, err, "lbarx"); 3268 break; 3269 case 2: 3270 __get_user_asmx(val, ea, err, "lharx"); 3271 break; 3272 #endif 3273 case 4: 3274 __get_user_asmx(val, ea, err, "lwarx"); 3275 break; 3276 #ifdef __powerpc64__ 3277 case 8: 3278 __get_user_asmx(val, ea, err, "ldarx"); 3279 break; 3280 case 16: 3281 err = do_lqarx(ea, ®s->gpr[op->reg]); 3282 break; 3283 #endif 3284 default: 3285 return -EINVAL; 3286 } 3287 if (err) { 3288 regs->dar = ea; 3289 break; 3290 } 3291 if (size < 16) 3292 regs->gpr[op->reg] = val; 3293 break; 3294 3295 case STCX: 3296 if (ea & (size - 1)) 3297 return -EACCES; /* can't handle misaligned */ 3298 if (!address_ok(regs, ea, size)) 3299 return -EFAULT; 3300 err = 0; 3301 switch (size) { 3302 #ifdef __powerpc64__ 3303 case 1: 3304 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3305 break; 3306 case 2: 3307 __put_user_asmx(op->val, ea, err, "stbcx.", cr); 3308 break; 3309 #endif 3310 case 4: 3311 __put_user_asmx(op->val, ea, err, "stwcx.", cr); 3312 break; 3313 #ifdef __powerpc64__ 3314 case 8: 3315 __put_user_asmx(op->val, ea, err, "stdcx.", cr); 3316 break; 3317 case 16: 3318 err = do_stqcx(ea, regs->gpr[op->reg], 3319 regs->gpr[op->reg + 1], &cr); 3320 break; 3321 #endif 3322 default: 3323 return -EINVAL; 3324 } 3325 if (!err) 3326 regs->ccr = (regs->ccr & 0x0fffffff) | 3327 (cr & 0xe0000000) | 3328 ((regs->xer >> 3) & 0x10000000); 3329 else 3330 regs->dar = ea; 3331 break; 3332 3333 case LOAD: 3334 #ifdef __powerpc64__ 3335 if (size == 16) { 3336 err = emulate_lq(regs, ea, op->reg, cross_endian); 3337 break; 3338 } 3339 #endif 3340 err = read_mem(®s->gpr[op->reg], ea, size, regs); 3341 if (!err) { 3342 if (op->type & SIGNEXT) 3343 do_signext(®s->gpr[op->reg], size); 3344 if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) 3345 do_byterev(®s->gpr[op->reg], size); 3346 } 3347 break; 3348 3349 #ifdef CONFIG_PPC_FPU 3350 case LOAD_FP: 3351 /* 3352 * If the instruction is in userspace, we can emulate it even 3353 * if the VMX state is not live, because we have the state 3354 * stored in the thread_struct. If the instruction is in 3355 * the kernel, we must not touch the state in the thread_struct. 3356 */ 3357 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3358 return 0; 3359 err = do_fp_load(op, ea, regs, cross_endian); 3360 break; 3361 #endif 3362 #ifdef CONFIG_ALTIVEC 3363 case LOAD_VMX: 3364 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3365 return 0; 3366 err = do_vec_load(op->reg, ea, size, regs, cross_endian); 3367 break; 3368 #endif 3369 #ifdef CONFIG_VSX 3370 case LOAD_VSX: { 3371 unsigned long msrbit = MSR_VSX; 3372 3373 /* 3374 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3375 * when the target of the instruction is a vector register. 3376 */ 3377 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3378 msrbit = MSR_VEC; 3379 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3380 return 0; 3381 err = do_vsx_load(op, ea, regs, cross_endian); 3382 break; 3383 } 3384 #endif 3385 case LOAD_MULTI: 3386 if (!address_ok(regs, ea, size)) 3387 return -EFAULT; 3388 rd = op->reg; 3389 for (i = 0; i < size; i += 4) { 3390 unsigned int v32 = 0; 3391 3392 nb = size - i; 3393 if (nb > 4) 3394 nb = 4; 3395 err = copy_mem_in((u8 *) &v32, ea, nb, regs); 3396 if (err) 3397 break; 3398 if (unlikely(cross_endian)) 3399 v32 = byterev_4(v32); 3400 regs->gpr[rd] = v32; 3401 ea += 4; 3402 /* reg number wraps from 31 to 0 for lsw[ix] */ 3403 rd = (rd + 1) & 0x1f; 3404 } 3405 break; 3406 3407 case STORE: 3408 #ifdef __powerpc64__ 3409 if (size == 16) { 3410 err = emulate_stq(regs, ea, op->reg, cross_endian); 3411 break; 3412 } 3413 #endif 3414 if ((op->type & UPDATE) && size == sizeof(long) && 3415 op->reg == 1 && op->update_reg == 1 && 3416 !(regs->msr & MSR_PR) && 3417 ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 3418 err = handle_stack_update(ea, regs); 3419 break; 3420 } 3421 if (unlikely(cross_endian)) 3422 do_byterev(&op->val, size); 3423 err = write_mem(op->val, ea, size, regs); 3424 break; 3425 3426 #ifdef CONFIG_PPC_FPU 3427 case STORE_FP: 3428 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) 3429 return 0; 3430 err = do_fp_store(op, ea, regs, cross_endian); 3431 break; 3432 #endif 3433 #ifdef CONFIG_ALTIVEC 3434 case STORE_VMX: 3435 if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) 3436 return 0; 3437 err = do_vec_store(op->reg, ea, size, regs, cross_endian); 3438 break; 3439 #endif 3440 #ifdef CONFIG_VSX 3441 case STORE_VSX: { 3442 unsigned long msrbit = MSR_VSX; 3443 3444 /* 3445 * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX 3446 * when the target of the instruction is a vector register. 3447 */ 3448 if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) 3449 msrbit = MSR_VEC; 3450 if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) 3451 return 0; 3452 err = do_vsx_store(op, ea, regs, cross_endian); 3453 break; 3454 } 3455 #endif 3456 case STORE_MULTI: 3457 if (!address_ok(regs, ea, size)) 3458 return -EFAULT; 3459 rd = op->reg; 3460 for (i = 0; i < size; i += 4) { 3461 unsigned int v32 = regs->gpr[rd]; 3462 3463 nb = size - i; 3464 if (nb > 4) 3465 nb = 4; 3466 if (unlikely(cross_endian)) 3467 v32 = byterev_4(v32); 3468 err = copy_mem_out((u8 *) &v32, ea, nb, regs); 3469 if (err) 3470 break; 3471 ea += 4; 3472 /* reg number wraps from 31 to 0 for stsw[ix] */ 3473 rd = (rd + 1) & 0x1f; 3474 } 3475 break; 3476 3477 default: 3478 return -EINVAL; 3479 } 3480 3481 if (err) 3482 return err; 3483 3484 if (op->type & UPDATE) 3485 regs->gpr[op->update_reg] = op->ea; 3486 3487 return 0; 3488 } 3489 NOKPROBE_SYMBOL(emulate_loadstore); 3490 3491 /* 3492 * Emulate instructions that cause a transfer of control, 3493 * loads and stores, and a few other instructions. 3494 * Returns 1 if the step was emulated, 0 if not, 3495 * or -1 if the instruction is one that should not be stepped, 3496 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3497 */ 3498 int emulate_step(struct pt_regs *regs, struct ppc_inst instr) 3499 { 3500 struct instruction_op op; 3501 int r, err, type; 3502 unsigned long val; 3503 unsigned long ea; 3504 3505 r = analyse_instr(&op, regs, instr); 3506 if (r < 0) 3507 return r; 3508 if (r > 0) { 3509 emulate_update_regs(regs, &op); 3510 return 1; 3511 } 3512 3513 err = 0; 3514 type = GETTYPE(op.type); 3515 3516 if (OP_IS_LOAD_STORE(type)) { 3517 err = emulate_loadstore(regs, &op); 3518 if (err) 3519 return 0; 3520 goto instr_done; 3521 } 3522 3523 switch (type) { 3524 case CACHEOP: 3525 ea = truncate_if_32bit(regs->msr, op.ea); 3526 if (!address_ok(regs, ea, 8)) 3527 return 0; 3528 switch (op.type & CACHEOP_MASK) { 3529 case DCBST: 3530 __cacheop_user_asmx(ea, err, "dcbst"); 3531 break; 3532 case DCBF: 3533 __cacheop_user_asmx(ea, err, "dcbf"); 3534 break; 3535 case DCBTST: 3536 if (op.reg == 0) 3537 prefetchw((void *) ea); 3538 break; 3539 case DCBT: 3540 if (op.reg == 0) 3541 prefetch((void *) ea); 3542 break; 3543 case ICBI: 3544 __cacheop_user_asmx(ea, err, "icbi"); 3545 break; 3546 case DCBZ: 3547 err = emulate_dcbz(ea, regs); 3548 break; 3549 } 3550 if (err) { 3551 regs->dar = ea; 3552 return 0; 3553 } 3554 goto instr_done; 3555 3556 case MFMSR: 3557 regs->gpr[op.reg] = regs->msr & MSR_MASK; 3558 goto instr_done; 3559 3560 case MTMSR: 3561 val = regs->gpr[op.reg]; 3562 if ((val & MSR_RI) == 0) 3563 /* can't step mtmsr[d] that would clear MSR_RI */ 3564 return -1; 3565 /* here op.val is the mask of bits to change */ 3566 regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); 3567 goto instr_done; 3568 3569 #ifdef CONFIG_PPC64 3570 case SYSCALL: /* sc */ 3571 /* 3572 * N.B. this uses knowledge about how the syscall 3573 * entry code works. If that is changed, this will 3574 * need to be changed also. 3575 */ 3576 if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && 3577 cpu_has_feature(CPU_FTR_REAL_LE) && 3578 regs->gpr[0] == 0x1ebe) { 3579 regs_set_return_msr(regs, regs->msr ^ MSR_LE); 3580 goto instr_done; 3581 } 3582 regs->gpr[9] = regs->gpr[13]; 3583 regs->gpr[10] = MSR_KERNEL; 3584 regs->gpr[11] = regs->nip + 4; 3585 regs->gpr[12] = regs->msr & MSR_MASK; 3586 regs->gpr[13] = (unsigned long) get_paca(); 3587 regs_set_return_ip(regs, (unsigned long) &system_call_common); 3588 regs_set_return_msr(regs, MSR_KERNEL); 3589 return 1; 3590 3591 #ifdef CONFIG_PPC_BOOK3S_64 3592 case SYSCALL_VECTORED_0: /* scv 0 */ 3593 regs->gpr[9] = regs->gpr[13]; 3594 regs->gpr[10] = MSR_KERNEL; 3595 regs->gpr[11] = regs->nip + 4; 3596 regs->gpr[12] = regs->msr & MSR_MASK; 3597 regs->gpr[13] = (unsigned long) get_paca(); 3598 regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate); 3599 regs_set_return_msr(regs, MSR_KERNEL); 3600 return 1; 3601 #endif 3602 3603 case RFI: 3604 return -1; 3605 #endif 3606 } 3607 return 0; 3608 3609 instr_done: 3610 regs_set_return_ip(regs, 3611 truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); 3612 return 1; 3613 } 3614 NOKPROBE_SYMBOL(emulate_step); 3615