1 /* 2 * Single-step support. 3 * 4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/kprobes.h> 13 #include <linux/ptrace.h> 14 #include <linux/prefetch.h> 15 #include <asm/sstep.h> 16 #include <asm/processor.h> 17 #include <linux/uaccess.h> 18 #include <asm/cpu_has_feature.h> 19 #include <asm/cputable.h> 20 21 extern char system_call_common[]; 22 23 #ifdef CONFIG_PPC64 24 /* Bits in SRR1 that are copied from MSR */ 25 #define MSR_MASK 0xffffffff87c0ffffUL 26 #else 27 #define MSR_MASK 0x87c0ffff 28 #endif 29 30 /* Bits in XER */ 31 #define XER_SO 0x80000000U 32 #define XER_OV 0x40000000U 33 #define XER_CA 0x20000000U 34 35 #ifdef CONFIG_PPC_FPU 36 /* 37 * Functions in ldstfp.S 38 */ 39 extern int do_lfs(int rn, unsigned long ea); 40 extern int do_lfd(int rn, unsigned long ea); 41 extern int do_stfs(int rn, unsigned long ea); 42 extern int do_stfd(int rn, unsigned long ea); 43 extern int do_lvx(int rn, unsigned long ea); 44 extern int do_stvx(int rn, unsigned long ea); 45 extern int do_lxvd2x(int rn, unsigned long ea); 46 extern int do_stxvd2x(int rn, unsigned long ea); 47 #endif 48 49 /* 50 * Emulate the truncation of 64 bit values in 32-bit mode. 51 */ 52 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) 53 { 54 #ifdef __powerpc64__ 55 if ((msr & MSR_64BIT) == 0) 56 val &= 0xffffffffUL; 57 #endif 58 return val; 59 } 60 61 /* 62 * Determine whether a conditional branch instruction would branch. 63 */ 64 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) 65 { 66 unsigned int bo = (instr >> 21) & 0x1f; 67 unsigned int bi; 68 69 if ((bo & 4) == 0) { 70 /* decrement counter */ 71 --regs->ctr; 72 if (((bo >> 1) & 1) ^ (regs->ctr == 0)) 73 return 0; 74 } 75 if ((bo & 0x10) == 0) { 76 /* check bit from CR */ 77 bi = (instr >> 16) & 0x1f; 78 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) 79 return 0; 80 } 81 return 1; 82 } 83 84 85 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb) 86 { 87 if (!user_mode(regs)) 88 return 1; 89 return __access_ok(ea, nb, USER_DS); 90 } 91 92 /* 93 * Calculate effective address for a D-form instruction 94 */ 95 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs) 96 { 97 int ra; 98 unsigned long ea; 99 100 ra = (instr >> 16) & 0x1f; 101 ea = (signed short) instr; /* sign-extend */ 102 if (ra) 103 ea += regs->gpr[ra]; 104 105 return truncate_if_32bit(regs->msr, ea); 106 } 107 108 #ifdef __powerpc64__ 109 /* 110 * Calculate effective address for a DS-form instruction 111 */ 112 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs) 113 { 114 int ra; 115 unsigned long ea; 116 117 ra = (instr >> 16) & 0x1f; 118 ea = (signed short) (instr & ~3); /* sign-extend */ 119 if (ra) 120 ea += regs->gpr[ra]; 121 122 return truncate_if_32bit(regs->msr, ea); 123 } 124 #endif /* __powerpc64 */ 125 126 /* 127 * Calculate effective address for an X-form instruction 128 */ 129 static unsigned long __kprobes xform_ea(unsigned int instr, 130 struct pt_regs *regs) 131 { 132 int ra, rb; 133 unsigned long ea; 134 135 ra = (instr >> 16) & 0x1f; 136 rb = (instr >> 11) & 0x1f; 137 ea = regs->gpr[rb]; 138 if (ra) 139 ea += regs->gpr[ra]; 140 141 return truncate_if_32bit(regs->msr, ea); 142 } 143 144 /* 145 * Return the largest power of 2, not greater than sizeof(unsigned long), 146 * such that x is a multiple of it. 147 */ 148 static inline unsigned long max_align(unsigned long x) 149 { 150 x |= sizeof(unsigned long); 151 return x & -x; /* isolates rightmost bit */ 152 } 153 154 155 static inline unsigned long byterev_2(unsigned long x) 156 { 157 return ((x >> 8) & 0xff) | ((x & 0xff) << 8); 158 } 159 160 static inline unsigned long byterev_4(unsigned long x) 161 { 162 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) | 163 ((x & 0xff00) << 8) | ((x & 0xff) << 24); 164 } 165 166 #ifdef __powerpc64__ 167 static inline unsigned long byterev_8(unsigned long x) 168 { 169 return (byterev_4(x) << 32) | byterev_4(x >> 32); 170 } 171 #endif 172 173 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea, 174 int nb) 175 { 176 int err = 0; 177 unsigned long x = 0; 178 179 switch (nb) { 180 case 1: 181 err = __get_user(x, (unsigned char __user *) ea); 182 break; 183 case 2: 184 err = __get_user(x, (unsigned short __user *) ea); 185 break; 186 case 4: 187 err = __get_user(x, (unsigned int __user *) ea); 188 break; 189 #ifdef __powerpc64__ 190 case 8: 191 err = __get_user(x, (unsigned long __user *) ea); 192 break; 193 #endif 194 } 195 if (!err) 196 *dest = x; 197 return err; 198 } 199 200 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, 201 int nb, struct pt_regs *regs) 202 { 203 int err; 204 unsigned long x, b, c; 205 #ifdef __LITTLE_ENDIAN__ 206 int len = nb; /* save a copy of the length for byte reversal */ 207 #endif 208 209 /* unaligned, do this in pieces */ 210 x = 0; 211 for (; nb > 0; nb -= c) { 212 #ifdef __LITTLE_ENDIAN__ 213 c = 1; 214 #endif 215 #ifdef __BIG_ENDIAN__ 216 c = max_align(ea); 217 #endif 218 if (c > nb) 219 c = max_align(nb); 220 err = read_mem_aligned(&b, ea, c); 221 if (err) 222 return err; 223 x = (x << (8 * c)) + b; 224 ea += c; 225 } 226 #ifdef __LITTLE_ENDIAN__ 227 switch (len) { 228 case 2: 229 *dest = byterev_2(x); 230 break; 231 case 4: 232 *dest = byterev_4(x); 233 break; 234 #ifdef __powerpc64__ 235 case 8: 236 *dest = byterev_8(x); 237 break; 238 #endif 239 } 240 #endif 241 #ifdef __BIG_ENDIAN__ 242 *dest = x; 243 #endif 244 return 0; 245 } 246 247 /* 248 * Read memory at address ea for nb bytes, return 0 for success 249 * or -EFAULT if an error occurred. 250 */ 251 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb, 252 struct pt_regs *regs) 253 { 254 if (!address_ok(regs, ea, nb)) 255 return -EFAULT; 256 if ((ea & (nb - 1)) == 0) 257 return read_mem_aligned(dest, ea, nb); 258 return read_mem_unaligned(dest, ea, nb, regs); 259 } 260 261 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea, 262 int nb) 263 { 264 int err = 0; 265 266 switch (nb) { 267 case 1: 268 err = __put_user(val, (unsigned char __user *) ea); 269 break; 270 case 2: 271 err = __put_user(val, (unsigned short __user *) ea); 272 break; 273 case 4: 274 err = __put_user(val, (unsigned int __user *) ea); 275 break; 276 #ifdef __powerpc64__ 277 case 8: 278 err = __put_user(val, (unsigned long __user *) ea); 279 break; 280 #endif 281 } 282 return err; 283 } 284 285 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, 286 int nb, struct pt_regs *regs) 287 { 288 int err; 289 unsigned long c; 290 291 #ifdef __LITTLE_ENDIAN__ 292 switch (nb) { 293 case 2: 294 val = byterev_2(val); 295 break; 296 case 4: 297 val = byterev_4(val); 298 break; 299 #ifdef __powerpc64__ 300 case 8: 301 val = byterev_8(val); 302 break; 303 #endif 304 } 305 #endif 306 /* unaligned or little-endian, do this in pieces */ 307 for (; nb > 0; nb -= c) { 308 #ifdef __LITTLE_ENDIAN__ 309 c = 1; 310 #endif 311 #ifdef __BIG_ENDIAN__ 312 c = max_align(ea); 313 #endif 314 if (c > nb) 315 c = max_align(nb); 316 err = write_mem_aligned(val >> (nb - c) * 8, ea, c); 317 if (err) 318 return err; 319 ea += c; 320 } 321 return 0; 322 } 323 324 /* 325 * Write memory at address ea for nb bytes, return 0 for success 326 * or -EFAULT if an error occurred. 327 */ 328 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, 329 struct pt_regs *regs) 330 { 331 if (!address_ok(regs, ea, nb)) 332 return -EFAULT; 333 if ((ea & (nb - 1)) == 0) 334 return write_mem_aligned(val, ea, nb); 335 return write_mem_unaligned(val, ea, nb, regs); 336 } 337 338 #ifdef CONFIG_PPC_FPU 339 /* 340 * Check the address and alignment, and call func to do the actual 341 * load or store. 342 */ 343 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), 344 unsigned long ea, int nb, 345 struct pt_regs *regs) 346 { 347 int err; 348 union { 349 double dbl; 350 unsigned long ul[2]; 351 struct { 352 #ifdef __BIG_ENDIAN__ 353 unsigned _pad_; 354 unsigned word; 355 #endif 356 #ifdef __LITTLE_ENDIAN__ 357 unsigned word; 358 unsigned _pad_; 359 #endif 360 } single; 361 } data; 362 unsigned long ptr; 363 364 if (!address_ok(regs, ea, nb)) 365 return -EFAULT; 366 if ((ea & 3) == 0) 367 return (*func)(rn, ea); 368 ptr = (unsigned long) &data.ul; 369 if (sizeof(unsigned long) == 8 || nb == 4) { 370 err = read_mem_unaligned(&data.ul[0], ea, nb, regs); 371 if (nb == 4) 372 ptr = (unsigned long)&(data.single.word); 373 } else { 374 /* reading a double on 32-bit */ 375 err = read_mem_unaligned(&data.ul[0], ea, 4, regs); 376 if (!err) 377 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs); 378 } 379 if (err) 380 return err; 381 return (*func)(rn, ptr); 382 } 383 384 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), 385 unsigned long ea, int nb, 386 struct pt_regs *regs) 387 { 388 int err; 389 union { 390 double dbl; 391 unsigned long ul[2]; 392 struct { 393 #ifdef __BIG_ENDIAN__ 394 unsigned _pad_; 395 unsigned word; 396 #endif 397 #ifdef __LITTLE_ENDIAN__ 398 unsigned word; 399 unsigned _pad_; 400 #endif 401 } single; 402 } data; 403 unsigned long ptr; 404 405 if (!address_ok(regs, ea, nb)) 406 return -EFAULT; 407 if ((ea & 3) == 0) 408 return (*func)(rn, ea); 409 ptr = (unsigned long) &data.ul[0]; 410 if (sizeof(unsigned long) == 8 || nb == 4) { 411 if (nb == 4) 412 ptr = (unsigned long)&(data.single.word); 413 err = (*func)(rn, ptr); 414 if (err) 415 return err; 416 err = write_mem_unaligned(data.ul[0], ea, nb, regs); 417 } else { 418 /* writing a double on 32-bit */ 419 err = (*func)(rn, ptr); 420 if (err) 421 return err; 422 err = write_mem_unaligned(data.ul[0], ea, 4, regs); 423 if (!err) 424 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs); 425 } 426 return err; 427 } 428 #endif 429 430 #ifdef CONFIG_ALTIVEC 431 /* For Altivec/VMX, no need to worry about alignment */ 432 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long), 433 unsigned long ea, struct pt_regs *regs) 434 { 435 if (!address_ok(regs, ea & ~0xfUL, 16)) 436 return -EFAULT; 437 return (*func)(rn, ea); 438 } 439 440 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long), 441 unsigned long ea, struct pt_regs *regs) 442 { 443 if (!address_ok(regs, ea & ~0xfUL, 16)) 444 return -EFAULT; 445 return (*func)(rn, ea); 446 } 447 #endif /* CONFIG_ALTIVEC */ 448 449 #ifdef CONFIG_VSX 450 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long), 451 unsigned long ea, struct pt_regs *regs) 452 { 453 int err; 454 unsigned long val[2]; 455 456 if (!address_ok(regs, ea, 16)) 457 return -EFAULT; 458 if ((ea & 3) == 0) 459 return (*func)(rn, ea); 460 err = read_mem_unaligned(&val[0], ea, 8, regs); 461 if (!err) 462 err = read_mem_unaligned(&val[1], ea + 8, 8, regs); 463 if (!err) 464 err = (*func)(rn, (unsigned long) &val[0]); 465 return err; 466 } 467 468 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long), 469 unsigned long ea, struct pt_regs *regs) 470 { 471 int err; 472 unsigned long val[2]; 473 474 if (!address_ok(regs, ea, 16)) 475 return -EFAULT; 476 if ((ea & 3) == 0) 477 return (*func)(rn, ea); 478 err = (*func)(rn, (unsigned long) &val[0]); 479 if (err) 480 return err; 481 err = write_mem_unaligned(val[0], ea, 8, regs); 482 if (!err) 483 err = write_mem_unaligned(val[1], ea + 8, 8, regs); 484 return err; 485 } 486 #endif /* CONFIG_VSX */ 487 488 #define __put_user_asmx(x, addr, err, op, cr) \ 489 __asm__ __volatile__( \ 490 "1: " op " %2,0,%3\n" \ 491 " mfcr %1\n" \ 492 "2:\n" \ 493 ".section .fixup,\"ax\"\n" \ 494 "3: li %0,%4\n" \ 495 " b 2b\n" \ 496 ".previous\n" \ 497 EX_TABLE(1b, 3b) \ 498 : "=r" (err), "=r" (cr) \ 499 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)) 500 501 #define __get_user_asmx(x, addr, err, op) \ 502 __asm__ __volatile__( \ 503 "1: "op" %1,0,%2\n" \ 504 "2:\n" \ 505 ".section .fixup,\"ax\"\n" \ 506 "3: li %0,%3\n" \ 507 " b 2b\n" \ 508 ".previous\n" \ 509 EX_TABLE(1b, 3b) \ 510 : "=r" (err), "=r" (x) \ 511 : "r" (addr), "i" (-EFAULT), "0" (err)) 512 513 #define __cacheop_user_asmx(addr, err, op) \ 514 __asm__ __volatile__( \ 515 "1: "op" 0,%1\n" \ 516 "2:\n" \ 517 ".section .fixup,\"ax\"\n" \ 518 "3: li %0,%3\n" \ 519 " b 2b\n" \ 520 ".previous\n" \ 521 EX_TABLE(1b, 3b) \ 522 : "=r" (err) \ 523 : "r" (addr), "i" (-EFAULT), "0" (err)) 524 525 static void __kprobes set_cr0(struct pt_regs *regs, int rd) 526 { 527 long val = regs->gpr[rd]; 528 529 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); 530 #ifdef __powerpc64__ 531 if (!(regs->msr & MSR_64BIT)) 532 val = (int) val; 533 #endif 534 if (val < 0) 535 regs->ccr |= 0x80000000; 536 else if (val > 0) 537 regs->ccr |= 0x40000000; 538 else 539 regs->ccr |= 0x20000000; 540 } 541 542 static void __kprobes add_with_carry(struct pt_regs *regs, int rd, 543 unsigned long val1, unsigned long val2, 544 unsigned long carry_in) 545 { 546 unsigned long val = val1 + val2; 547 548 if (carry_in) 549 ++val; 550 regs->gpr[rd] = val; 551 #ifdef __powerpc64__ 552 if (!(regs->msr & MSR_64BIT)) { 553 val = (unsigned int) val; 554 val1 = (unsigned int) val1; 555 } 556 #endif 557 if (val < val1 || (carry_in && val == val1)) 558 regs->xer |= XER_CA; 559 else 560 regs->xer &= ~XER_CA; 561 } 562 563 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2, 564 int crfld) 565 { 566 unsigned int crval, shift; 567 568 crval = (regs->xer >> 31) & 1; /* get SO bit */ 569 if (v1 < v2) 570 crval |= 8; 571 else if (v1 > v2) 572 crval |= 4; 573 else 574 crval |= 2; 575 shift = (7 - crfld) * 4; 576 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); 577 } 578 579 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1, 580 unsigned long v2, int crfld) 581 { 582 unsigned int crval, shift; 583 584 crval = (regs->xer >> 31) & 1; /* get SO bit */ 585 if (v1 < v2) 586 crval |= 8; 587 else if (v1 > v2) 588 crval |= 4; 589 else 590 crval |= 2; 591 shift = (7 - crfld) * 4; 592 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift); 593 } 594 595 static int __kprobes trap_compare(long v1, long v2) 596 { 597 int ret = 0; 598 599 if (v1 < v2) 600 ret |= 0x10; 601 else if (v1 > v2) 602 ret |= 0x08; 603 else 604 ret |= 0x04; 605 if ((unsigned long)v1 < (unsigned long)v2) 606 ret |= 0x02; 607 else if ((unsigned long)v1 > (unsigned long)v2) 608 ret |= 0x01; 609 return ret; 610 } 611 612 /* 613 * Elements of 32-bit rotate and mask instructions. 614 */ 615 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \ 616 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb))) 617 #ifdef __powerpc64__ 618 #define MASK64_L(mb) (~0UL >> (mb)) 619 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me)) 620 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb))) 621 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32)) 622 #else 623 #define DATA32(x) (x) 624 #endif 625 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x)) 626 627 /* 628 * Decode an instruction, and execute it if that can be done just by 629 * modifying *regs (i.e. integer arithmetic and logical instructions, 630 * branches, and barrier instructions). 631 * Returns 1 if the instruction has been executed, or 0 if not. 632 * Sets *op to indicate what the instruction does. 633 */ 634 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs, 635 unsigned int instr) 636 { 637 unsigned int opcode, ra, rb, rd, spr, u; 638 unsigned long int imm; 639 unsigned long int val, val2; 640 unsigned int mb, me, sh; 641 long ival; 642 643 op->type = COMPUTE; 644 645 opcode = instr >> 26; 646 switch (opcode) { 647 case 16: /* bc */ 648 op->type = BRANCH; 649 imm = (signed short)(instr & 0xfffc); 650 if ((instr & 2) == 0) 651 imm += regs->nip; 652 regs->nip += 4; 653 regs->nip = truncate_if_32bit(regs->msr, regs->nip); 654 if (instr & 1) 655 regs->link = regs->nip; 656 if (branch_taken(instr, regs)) 657 regs->nip = truncate_if_32bit(regs->msr, imm); 658 return 1; 659 #ifdef CONFIG_PPC64 660 case 17: /* sc */ 661 if ((instr & 0xfe2) == 2) 662 op->type = SYSCALL; 663 else 664 op->type = UNKNOWN; 665 return 0; 666 #endif 667 case 18: /* b */ 668 op->type = BRANCH; 669 imm = instr & 0x03fffffc; 670 if (imm & 0x02000000) 671 imm -= 0x04000000; 672 if ((instr & 2) == 0) 673 imm += regs->nip; 674 if (instr & 1) 675 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); 676 imm = truncate_if_32bit(regs->msr, imm); 677 regs->nip = imm; 678 return 1; 679 case 19: 680 switch ((instr >> 1) & 0x3ff) { 681 case 0: /* mcrf */ 682 rd = (instr >> 21) & 0x1c; 683 ra = (instr >> 16) & 0x1c; 684 val = (regs->ccr >> ra) & 0xf; 685 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); 686 goto instr_done; 687 688 case 16: /* bclr */ 689 case 528: /* bcctr */ 690 op->type = BRANCH; 691 imm = (instr & 0x400)? regs->ctr: regs->link; 692 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 693 imm = truncate_if_32bit(regs->msr, imm); 694 if (instr & 1) 695 regs->link = regs->nip; 696 if (branch_taken(instr, regs)) 697 regs->nip = imm; 698 return 1; 699 700 case 18: /* rfid, scary */ 701 if (regs->msr & MSR_PR) 702 goto priv; 703 op->type = RFI; 704 return 0; 705 706 case 150: /* isync */ 707 op->type = BARRIER; 708 isync(); 709 goto instr_done; 710 711 case 33: /* crnor */ 712 case 129: /* crandc */ 713 case 193: /* crxor */ 714 case 225: /* crnand */ 715 case 257: /* crand */ 716 case 289: /* creqv */ 717 case 417: /* crorc */ 718 case 449: /* cror */ 719 ra = (instr >> 16) & 0x1f; 720 rb = (instr >> 11) & 0x1f; 721 rd = (instr >> 21) & 0x1f; 722 ra = (regs->ccr >> (31 - ra)) & 1; 723 rb = (regs->ccr >> (31 - rb)) & 1; 724 val = (instr >> (6 + ra * 2 + rb)) & 1; 725 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) | 726 (val << (31 - rd)); 727 goto instr_done; 728 } 729 break; 730 case 31: 731 switch ((instr >> 1) & 0x3ff) { 732 case 598: /* sync */ 733 op->type = BARRIER; 734 #ifdef __powerpc64__ 735 switch ((instr >> 21) & 3) { 736 case 1: /* lwsync */ 737 asm volatile("lwsync" : : : "memory"); 738 goto instr_done; 739 case 2: /* ptesync */ 740 asm volatile("ptesync" : : : "memory"); 741 goto instr_done; 742 } 743 #endif 744 mb(); 745 goto instr_done; 746 747 case 854: /* eieio */ 748 op->type = BARRIER; 749 eieio(); 750 goto instr_done; 751 } 752 break; 753 } 754 755 /* Following cases refer to regs->gpr[], so we need all regs */ 756 if (!FULL_REGS(regs)) 757 return 0; 758 759 rd = (instr >> 21) & 0x1f; 760 ra = (instr >> 16) & 0x1f; 761 rb = (instr >> 11) & 0x1f; 762 763 switch (opcode) { 764 #ifdef __powerpc64__ 765 case 2: /* tdi */ 766 if (rd & trap_compare(regs->gpr[ra], (short) instr)) 767 goto trap; 768 goto instr_done; 769 #endif 770 case 3: /* twi */ 771 if (rd & trap_compare((int)regs->gpr[ra], (short) instr)) 772 goto trap; 773 goto instr_done; 774 775 case 7: /* mulli */ 776 regs->gpr[rd] = regs->gpr[ra] * (short) instr; 777 goto instr_done; 778 779 case 8: /* subfic */ 780 imm = (short) instr; 781 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1); 782 goto instr_done; 783 784 case 10: /* cmpli */ 785 imm = (unsigned short) instr; 786 val = regs->gpr[ra]; 787 #ifdef __powerpc64__ 788 if ((rd & 1) == 0) 789 val = (unsigned int) val; 790 #endif 791 do_cmp_unsigned(regs, val, imm, rd >> 2); 792 goto instr_done; 793 794 case 11: /* cmpi */ 795 imm = (short) instr; 796 val = regs->gpr[ra]; 797 #ifdef __powerpc64__ 798 if ((rd & 1) == 0) 799 val = (int) val; 800 #endif 801 do_cmp_signed(regs, val, imm, rd >> 2); 802 goto instr_done; 803 804 case 12: /* addic */ 805 imm = (short) instr; 806 add_with_carry(regs, rd, regs->gpr[ra], imm, 0); 807 goto instr_done; 808 809 case 13: /* addic. */ 810 imm = (short) instr; 811 add_with_carry(regs, rd, regs->gpr[ra], imm, 0); 812 set_cr0(regs, rd); 813 goto instr_done; 814 815 case 14: /* addi */ 816 imm = (short) instr; 817 if (ra) 818 imm += regs->gpr[ra]; 819 regs->gpr[rd] = imm; 820 goto instr_done; 821 822 case 15: /* addis */ 823 imm = ((short) instr) << 16; 824 if (ra) 825 imm += regs->gpr[ra]; 826 regs->gpr[rd] = imm; 827 goto instr_done; 828 829 case 20: /* rlwimi */ 830 mb = (instr >> 6) & 0x1f; 831 me = (instr >> 1) & 0x1f; 832 val = DATA32(regs->gpr[rd]); 833 imm = MASK32(mb, me); 834 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm); 835 goto logical_done; 836 837 case 21: /* rlwinm */ 838 mb = (instr >> 6) & 0x1f; 839 me = (instr >> 1) & 0x1f; 840 val = DATA32(regs->gpr[rd]); 841 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); 842 goto logical_done; 843 844 case 23: /* rlwnm */ 845 mb = (instr >> 6) & 0x1f; 846 me = (instr >> 1) & 0x1f; 847 rb = regs->gpr[rb] & 0x1f; 848 val = DATA32(regs->gpr[rd]); 849 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me); 850 goto logical_done; 851 852 case 24: /* ori */ 853 imm = (unsigned short) instr; 854 regs->gpr[ra] = regs->gpr[rd] | imm; 855 goto instr_done; 856 857 case 25: /* oris */ 858 imm = (unsigned short) instr; 859 regs->gpr[ra] = regs->gpr[rd] | (imm << 16); 860 goto instr_done; 861 862 case 26: /* xori */ 863 imm = (unsigned short) instr; 864 regs->gpr[ra] = regs->gpr[rd] ^ imm; 865 goto instr_done; 866 867 case 27: /* xoris */ 868 imm = (unsigned short) instr; 869 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16); 870 goto instr_done; 871 872 case 28: /* andi. */ 873 imm = (unsigned short) instr; 874 regs->gpr[ra] = regs->gpr[rd] & imm; 875 set_cr0(regs, ra); 876 goto instr_done; 877 878 case 29: /* andis. */ 879 imm = (unsigned short) instr; 880 regs->gpr[ra] = regs->gpr[rd] & (imm << 16); 881 set_cr0(regs, ra); 882 goto instr_done; 883 884 #ifdef __powerpc64__ 885 case 30: /* rld* */ 886 mb = ((instr >> 6) & 0x1f) | (instr & 0x20); 887 val = regs->gpr[rd]; 888 if ((instr & 0x10) == 0) { 889 sh = rb | ((instr & 2) << 4); 890 val = ROTATE(val, sh); 891 switch ((instr >> 2) & 3) { 892 case 0: /* rldicl */ 893 regs->gpr[ra] = val & MASK64_L(mb); 894 goto logical_done; 895 case 1: /* rldicr */ 896 regs->gpr[ra] = val & MASK64_R(mb); 897 goto logical_done; 898 case 2: /* rldic */ 899 regs->gpr[ra] = val & MASK64(mb, 63 - sh); 900 goto logical_done; 901 case 3: /* rldimi */ 902 imm = MASK64(mb, 63 - sh); 903 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | 904 (val & imm); 905 goto logical_done; 906 } 907 } else { 908 sh = regs->gpr[rb] & 0x3f; 909 val = ROTATE(val, sh); 910 switch ((instr >> 1) & 7) { 911 case 0: /* rldcl */ 912 regs->gpr[ra] = val & MASK64_L(mb); 913 goto logical_done; 914 case 1: /* rldcr */ 915 regs->gpr[ra] = val & MASK64_R(mb); 916 goto logical_done; 917 } 918 } 919 #endif 920 break; /* illegal instruction */ 921 922 case 31: 923 switch ((instr >> 1) & 0x3ff) { 924 case 4: /* tw */ 925 if (rd == 0x1f || 926 (rd & trap_compare((int)regs->gpr[ra], 927 (int)regs->gpr[rb]))) 928 goto trap; 929 goto instr_done; 930 #ifdef __powerpc64__ 931 case 68: /* td */ 932 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb])) 933 goto trap; 934 goto instr_done; 935 #endif 936 case 83: /* mfmsr */ 937 if (regs->msr & MSR_PR) 938 goto priv; 939 op->type = MFMSR; 940 op->reg = rd; 941 return 0; 942 case 146: /* mtmsr */ 943 if (regs->msr & MSR_PR) 944 goto priv; 945 op->type = MTMSR; 946 op->reg = rd; 947 op->val = 0xffffffff & ~(MSR_ME | MSR_LE); 948 return 0; 949 #ifdef CONFIG_PPC64 950 case 178: /* mtmsrd */ 951 if (regs->msr & MSR_PR) 952 goto priv; 953 op->type = MTMSR; 954 op->reg = rd; 955 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 956 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */ 957 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL; 958 op->val = imm; 959 return 0; 960 #endif 961 962 case 19: /* mfcr */ 963 regs->gpr[rd] = regs->ccr; 964 regs->gpr[rd] &= 0xffffffffUL; 965 goto instr_done; 966 967 case 144: /* mtcrf */ 968 imm = 0xf0000000UL; 969 val = regs->gpr[rd]; 970 for (sh = 0; sh < 8; ++sh) { 971 if (instr & (0x80000 >> sh)) 972 regs->ccr = (regs->ccr & ~imm) | 973 (val & imm); 974 imm >>= 4; 975 } 976 goto instr_done; 977 978 case 339: /* mfspr */ 979 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); 980 switch (spr) { 981 case SPRN_XER: /* mfxer */ 982 regs->gpr[rd] = regs->xer; 983 regs->gpr[rd] &= 0xffffffffUL; 984 goto instr_done; 985 case SPRN_LR: /* mflr */ 986 regs->gpr[rd] = regs->link; 987 goto instr_done; 988 case SPRN_CTR: /* mfctr */ 989 regs->gpr[rd] = regs->ctr; 990 goto instr_done; 991 default: 992 op->type = MFSPR; 993 op->reg = rd; 994 op->spr = spr; 995 return 0; 996 } 997 break; 998 999 case 467: /* mtspr */ 1000 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0); 1001 switch (spr) { 1002 case SPRN_XER: /* mtxer */ 1003 regs->xer = (regs->gpr[rd] & 0xffffffffUL); 1004 goto instr_done; 1005 case SPRN_LR: /* mtlr */ 1006 regs->link = regs->gpr[rd]; 1007 goto instr_done; 1008 case SPRN_CTR: /* mtctr */ 1009 regs->ctr = regs->gpr[rd]; 1010 goto instr_done; 1011 default: 1012 op->type = MTSPR; 1013 op->val = regs->gpr[rd]; 1014 op->spr = spr; 1015 return 0; 1016 } 1017 break; 1018 1019 /* 1020 * Compare instructions 1021 */ 1022 case 0: /* cmp */ 1023 val = regs->gpr[ra]; 1024 val2 = regs->gpr[rb]; 1025 #ifdef __powerpc64__ 1026 if ((rd & 1) == 0) { 1027 /* word (32-bit) compare */ 1028 val = (int) val; 1029 val2 = (int) val2; 1030 } 1031 #endif 1032 do_cmp_signed(regs, val, val2, rd >> 2); 1033 goto instr_done; 1034 1035 case 32: /* cmpl */ 1036 val = regs->gpr[ra]; 1037 val2 = regs->gpr[rb]; 1038 #ifdef __powerpc64__ 1039 if ((rd & 1) == 0) { 1040 /* word (32-bit) compare */ 1041 val = (unsigned int) val; 1042 val2 = (unsigned int) val2; 1043 } 1044 #endif 1045 do_cmp_unsigned(regs, val, val2, rd >> 2); 1046 goto instr_done; 1047 1048 /* 1049 * Arithmetic instructions 1050 */ 1051 case 8: /* subfc */ 1052 add_with_carry(regs, rd, ~regs->gpr[ra], 1053 regs->gpr[rb], 1); 1054 goto arith_done; 1055 #ifdef __powerpc64__ 1056 case 9: /* mulhdu */ 1057 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) : 1058 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1059 goto arith_done; 1060 #endif 1061 case 10: /* addc */ 1062 add_with_carry(regs, rd, regs->gpr[ra], 1063 regs->gpr[rb], 0); 1064 goto arith_done; 1065 1066 case 11: /* mulhwu */ 1067 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) : 1068 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1069 goto arith_done; 1070 1071 case 40: /* subf */ 1072 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra]; 1073 goto arith_done; 1074 #ifdef __powerpc64__ 1075 case 73: /* mulhd */ 1076 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) : 1077 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1078 goto arith_done; 1079 #endif 1080 case 75: /* mulhw */ 1081 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) : 1082 "r" (regs->gpr[ra]), "r" (regs->gpr[rb])); 1083 goto arith_done; 1084 1085 case 104: /* neg */ 1086 regs->gpr[rd] = -regs->gpr[ra]; 1087 goto arith_done; 1088 1089 case 136: /* subfe */ 1090 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb], 1091 regs->xer & XER_CA); 1092 goto arith_done; 1093 1094 case 138: /* adde */ 1095 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb], 1096 regs->xer & XER_CA); 1097 goto arith_done; 1098 1099 case 200: /* subfze */ 1100 add_with_carry(regs, rd, ~regs->gpr[ra], 0L, 1101 regs->xer & XER_CA); 1102 goto arith_done; 1103 1104 case 202: /* addze */ 1105 add_with_carry(regs, rd, regs->gpr[ra], 0L, 1106 regs->xer & XER_CA); 1107 goto arith_done; 1108 1109 case 232: /* subfme */ 1110 add_with_carry(regs, rd, ~regs->gpr[ra], -1L, 1111 regs->xer & XER_CA); 1112 goto arith_done; 1113 #ifdef __powerpc64__ 1114 case 233: /* mulld */ 1115 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb]; 1116 goto arith_done; 1117 #endif 1118 case 234: /* addme */ 1119 add_with_carry(regs, rd, regs->gpr[ra], -1L, 1120 regs->xer & XER_CA); 1121 goto arith_done; 1122 1123 case 235: /* mullw */ 1124 regs->gpr[rd] = (unsigned int) regs->gpr[ra] * 1125 (unsigned int) regs->gpr[rb]; 1126 goto arith_done; 1127 1128 case 266: /* add */ 1129 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb]; 1130 goto arith_done; 1131 #ifdef __powerpc64__ 1132 case 457: /* divdu */ 1133 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb]; 1134 goto arith_done; 1135 #endif 1136 case 459: /* divwu */ 1137 regs->gpr[rd] = (unsigned int) regs->gpr[ra] / 1138 (unsigned int) regs->gpr[rb]; 1139 goto arith_done; 1140 #ifdef __powerpc64__ 1141 case 489: /* divd */ 1142 regs->gpr[rd] = (long int) regs->gpr[ra] / 1143 (long int) regs->gpr[rb]; 1144 goto arith_done; 1145 #endif 1146 case 491: /* divw */ 1147 regs->gpr[rd] = (int) regs->gpr[ra] / 1148 (int) regs->gpr[rb]; 1149 goto arith_done; 1150 1151 1152 /* 1153 * Logical instructions 1154 */ 1155 case 26: /* cntlzw */ 1156 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) : 1157 "r" (regs->gpr[rd])); 1158 goto logical_done; 1159 #ifdef __powerpc64__ 1160 case 58: /* cntlzd */ 1161 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) : 1162 "r" (regs->gpr[rd])); 1163 goto logical_done; 1164 #endif 1165 case 28: /* and */ 1166 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb]; 1167 goto logical_done; 1168 1169 case 60: /* andc */ 1170 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb]; 1171 goto logical_done; 1172 1173 case 124: /* nor */ 1174 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]); 1175 goto logical_done; 1176 1177 case 284: /* xor */ 1178 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]); 1179 goto logical_done; 1180 1181 case 316: /* xor */ 1182 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb]; 1183 goto logical_done; 1184 1185 case 412: /* orc */ 1186 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb]; 1187 goto logical_done; 1188 1189 case 444: /* or */ 1190 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb]; 1191 goto logical_done; 1192 1193 case 476: /* nand */ 1194 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]); 1195 goto logical_done; 1196 1197 case 922: /* extsh */ 1198 regs->gpr[ra] = (signed short) regs->gpr[rd]; 1199 goto logical_done; 1200 1201 case 954: /* extsb */ 1202 regs->gpr[ra] = (signed char) regs->gpr[rd]; 1203 goto logical_done; 1204 #ifdef __powerpc64__ 1205 case 986: /* extsw */ 1206 regs->gpr[ra] = (signed int) regs->gpr[rd]; 1207 goto logical_done; 1208 #endif 1209 1210 /* 1211 * Shift instructions 1212 */ 1213 case 24: /* slw */ 1214 sh = regs->gpr[rb] & 0x3f; 1215 if (sh < 32) 1216 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL; 1217 else 1218 regs->gpr[ra] = 0; 1219 goto logical_done; 1220 1221 case 536: /* srw */ 1222 sh = regs->gpr[rb] & 0x3f; 1223 if (sh < 32) 1224 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh; 1225 else 1226 regs->gpr[ra] = 0; 1227 goto logical_done; 1228 1229 case 792: /* sraw */ 1230 sh = regs->gpr[rb] & 0x3f; 1231 ival = (signed int) regs->gpr[rd]; 1232 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1233 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) 1234 regs->xer |= XER_CA; 1235 else 1236 regs->xer &= ~XER_CA; 1237 goto logical_done; 1238 1239 case 824: /* srawi */ 1240 sh = rb; 1241 ival = (signed int) regs->gpr[rd]; 1242 regs->gpr[ra] = ival >> sh; 1243 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 1244 regs->xer |= XER_CA; 1245 else 1246 regs->xer &= ~XER_CA; 1247 goto logical_done; 1248 1249 #ifdef __powerpc64__ 1250 case 27: /* sld */ 1251 sh = regs->gpr[rb] & 0x7f; 1252 if (sh < 64) 1253 regs->gpr[ra] = regs->gpr[rd] << sh; 1254 else 1255 regs->gpr[ra] = 0; 1256 goto logical_done; 1257 1258 case 539: /* srd */ 1259 sh = regs->gpr[rb] & 0x7f; 1260 if (sh < 64) 1261 regs->gpr[ra] = regs->gpr[rd] >> sh; 1262 else 1263 regs->gpr[ra] = 0; 1264 goto logical_done; 1265 1266 case 794: /* srad */ 1267 sh = regs->gpr[rb] & 0x7f; 1268 ival = (signed long int) regs->gpr[rd]; 1269 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1270 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) 1271 regs->xer |= XER_CA; 1272 else 1273 regs->xer &= ~XER_CA; 1274 goto logical_done; 1275 1276 case 826: /* sradi with sh_5 = 0 */ 1277 case 827: /* sradi with sh_5 = 1 */ 1278 sh = rb | ((instr & 2) << 4); 1279 ival = (signed long int) regs->gpr[rd]; 1280 regs->gpr[ra] = ival >> sh; 1281 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) 1282 regs->xer |= XER_CA; 1283 else 1284 regs->xer &= ~XER_CA; 1285 goto logical_done; 1286 #endif /* __powerpc64__ */ 1287 1288 /* 1289 * Cache instructions 1290 */ 1291 case 54: /* dcbst */ 1292 op->type = MKOP(CACHEOP, DCBST, 0); 1293 op->ea = xform_ea(instr, regs); 1294 return 0; 1295 1296 case 86: /* dcbf */ 1297 op->type = MKOP(CACHEOP, DCBF, 0); 1298 op->ea = xform_ea(instr, regs); 1299 return 0; 1300 1301 case 246: /* dcbtst */ 1302 op->type = MKOP(CACHEOP, DCBTST, 0); 1303 op->ea = xform_ea(instr, regs); 1304 op->reg = rd; 1305 return 0; 1306 1307 case 278: /* dcbt */ 1308 op->type = MKOP(CACHEOP, DCBTST, 0); 1309 op->ea = xform_ea(instr, regs); 1310 op->reg = rd; 1311 return 0; 1312 1313 case 982: /* icbi */ 1314 op->type = MKOP(CACHEOP, ICBI, 0); 1315 op->ea = xform_ea(instr, regs); 1316 return 0; 1317 } 1318 break; 1319 } 1320 1321 /* 1322 * Loads and stores. 1323 */ 1324 op->type = UNKNOWN; 1325 op->update_reg = ra; 1326 op->reg = rd; 1327 op->val = regs->gpr[rd]; 1328 u = (instr >> 20) & UPDATE; 1329 1330 switch (opcode) { 1331 case 31: 1332 u = instr & UPDATE; 1333 op->ea = xform_ea(instr, regs); 1334 switch ((instr >> 1) & 0x3ff) { 1335 case 20: /* lwarx */ 1336 op->type = MKOP(LARX, 0, 4); 1337 break; 1338 1339 case 150: /* stwcx. */ 1340 op->type = MKOP(STCX, 0, 4); 1341 break; 1342 1343 #ifdef __powerpc64__ 1344 case 84: /* ldarx */ 1345 op->type = MKOP(LARX, 0, 8); 1346 break; 1347 1348 case 214: /* stdcx. */ 1349 op->type = MKOP(STCX, 0, 8); 1350 break; 1351 1352 case 21: /* ldx */ 1353 case 53: /* ldux */ 1354 op->type = MKOP(LOAD, u, 8); 1355 break; 1356 #endif 1357 1358 case 23: /* lwzx */ 1359 case 55: /* lwzux */ 1360 op->type = MKOP(LOAD, u, 4); 1361 break; 1362 1363 case 87: /* lbzx */ 1364 case 119: /* lbzux */ 1365 op->type = MKOP(LOAD, u, 1); 1366 break; 1367 1368 #ifdef CONFIG_ALTIVEC 1369 case 103: /* lvx */ 1370 case 359: /* lvxl */ 1371 if (!(regs->msr & MSR_VEC)) 1372 goto vecunavail; 1373 op->type = MKOP(LOAD_VMX, 0, 16); 1374 break; 1375 1376 case 231: /* stvx */ 1377 case 487: /* stvxl */ 1378 if (!(regs->msr & MSR_VEC)) 1379 goto vecunavail; 1380 op->type = MKOP(STORE_VMX, 0, 16); 1381 break; 1382 #endif /* CONFIG_ALTIVEC */ 1383 1384 #ifdef __powerpc64__ 1385 case 149: /* stdx */ 1386 case 181: /* stdux */ 1387 op->type = MKOP(STORE, u, 8); 1388 break; 1389 #endif 1390 1391 case 151: /* stwx */ 1392 case 183: /* stwux */ 1393 op->type = MKOP(STORE, u, 4); 1394 break; 1395 1396 case 215: /* stbx */ 1397 case 247: /* stbux */ 1398 op->type = MKOP(STORE, u, 1); 1399 break; 1400 1401 case 279: /* lhzx */ 1402 case 311: /* lhzux */ 1403 op->type = MKOP(LOAD, u, 2); 1404 break; 1405 1406 #ifdef __powerpc64__ 1407 case 341: /* lwax */ 1408 case 373: /* lwaux */ 1409 op->type = MKOP(LOAD, SIGNEXT | u, 4); 1410 break; 1411 #endif 1412 1413 case 343: /* lhax */ 1414 case 375: /* lhaux */ 1415 op->type = MKOP(LOAD, SIGNEXT | u, 2); 1416 break; 1417 1418 case 407: /* sthx */ 1419 case 439: /* sthux */ 1420 op->type = MKOP(STORE, u, 2); 1421 break; 1422 1423 #ifdef __powerpc64__ 1424 case 532: /* ldbrx */ 1425 op->type = MKOP(LOAD, BYTEREV, 8); 1426 break; 1427 1428 #endif 1429 case 533: /* lswx */ 1430 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f); 1431 break; 1432 1433 case 534: /* lwbrx */ 1434 op->type = MKOP(LOAD, BYTEREV, 4); 1435 break; 1436 1437 case 597: /* lswi */ 1438 if (rb == 0) 1439 rb = 32; /* # bytes to load */ 1440 op->type = MKOP(LOAD_MULTI, 0, rb); 1441 op->ea = 0; 1442 if (ra) 1443 op->ea = truncate_if_32bit(regs->msr, 1444 regs->gpr[ra]); 1445 break; 1446 1447 #ifdef CONFIG_PPC_FPU 1448 case 535: /* lfsx */ 1449 case 567: /* lfsux */ 1450 if (!(regs->msr & MSR_FP)) 1451 goto fpunavail; 1452 op->type = MKOP(LOAD_FP, u, 4); 1453 break; 1454 1455 case 599: /* lfdx */ 1456 case 631: /* lfdux */ 1457 if (!(regs->msr & MSR_FP)) 1458 goto fpunavail; 1459 op->type = MKOP(LOAD_FP, u, 8); 1460 break; 1461 1462 case 663: /* stfsx */ 1463 case 695: /* stfsux */ 1464 if (!(regs->msr & MSR_FP)) 1465 goto fpunavail; 1466 op->type = MKOP(STORE_FP, u, 4); 1467 break; 1468 1469 case 727: /* stfdx */ 1470 case 759: /* stfdux */ 1471 if (!(regs->msr & MSR_FP)) 1472 goto fpunavail; 1473 op->type = MKOP(STORE_FP, u, 8); 1474 break; 1475 #endif 1476 1477 #ifdef __powerpc64__ 1478 case 660: /* stdbrx */ 1479 op->type = MKOP(STORE, BYTEREV, 8); 1480 op->val = byterev_8(regs->gpr[rd]); 1481 break; 1482 1483 #endif 1484 case 661: /* stswx */ 1485 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f); 1486 break; 1487 1488 case 662: /* stwbrx */ 1489 op->type = MKOP(STORE, BYTEREV, 4); 1490 op->val = byterev_4(regs->gpr[rd]); 1491 break; 1492 1493 case 725: 1494 if (rb == 0) 1495 rb = 32; /* # bytes to store */ 1496 op->type = MKOP(STORE_MULTI, 0, rb); 1497 op->ea = 0; 1498 if (ra) 1499 op->ea = truncate_if_32bit(regs->msr, 1500 regs->gpr[ra]); 1501 break; 1502 1503 case 790: /* lhbrx */ 1504 op->type = MKOP(LOAD, BYTEREV, 2); 1505 break; 1506 1507 case 918: /* sthbrx */ 1508 op->type = MKOP(STORE, BYTEREV, 2); 1509 op->val = byterev_2(regs->gpr[rd]); 1510 break; 1511 1512 #ifdef CONFIG_VSX 1513 case 844: /* lxvd2x */ 1514 case 876: /* lxvd2ux */ 1515 if (!(regs->msr & MSR_VSX)) 1516 goto vsxunavail; 1517 op->reg = rd | ((instr & 1) << 5); 1518 op->type = MKOP(LOAD_VSX, u, 16); 1519 break; 1520 1521 case 972: /* stxvd2x */ 1522 case 1004: /* stxvd2ux */ 1523 if (!(regs->msr & MSR_VSX)) 1524 goto vsxunavail; 1525 op->reg = rd | ((instr & 1) << 5); 1526 op->type = MKOP(STORE_VSX, u, 16); 1527 break; 1528 1529 #endif /* CONFIG_VSX */ 1530 } 1531 break; 1532 1533 case 32: /* lwz */ 1534 case 33: /* lwzu */ 1535 op->type = MKOP(LOAD, u, 4); 1536 op->ea = dform_ea(instr, regs); 1537 break; 1538 1539 case 34: /* lbz */ 1540 case 35: /* lbzu */ 1541 op->type = MKOP(LOAD, u, 1); 1542 op->ea = dform_ea(instr, regs); 1543 break; 1544 1545 case 36: /* stw */ 1546 case 37: /* stwu */ 1547 op->type = MKOP(STORE, u, 4); 1548 op->ea = dform_ea(instr, regs); 1549 break; 1550 1551 case 38: /* stb */ 1552 case 39: /* stbu */ 1553 op->type = MKOP(STORE, u, 1); 1554 op->ea = dform_ea(instr, regs); 1555 break; 1556 1557 case 40: /* lhz */ 1558 case 41: /* lhzu */ 1559 op->type = MKOP(LOAD, u, 2); 1560 op->ea = dform_ea(instr, regs); 1561 break; 1562 1563 case 42: /* lha */ 1564 case 43: /* lhau */ 1565 op->type = MKOP(LOAD, SIGNEXT | u, 2); 1566 op->ea = dform_ea(instr, regs); 1567 break; 1568 1569 case 44: /* sth */ 1570 case 45: /* sthu */ 1571 op->type = MKOP(STORE, u, 2); 1572 op->ea = dform_ea(instr, regs); 1573 break; 1574 1575 case 46: /* lmw */ 1576 if (ra >= rd) 1577 break; /* invalid form, ra in range to load */ 1578 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd)); 1579 op->ea = dform_ea(instr, regs); 1580 break; 1581 1582 case 47: /* stmw */ 1583 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd)); 1584 op->ea = dform_ea(instr, regs); 1585 break; 1586 1587 #ifdef CONFIG_PPC_FPU 1588 case 48: /* lfs */ 1589 case 49: /* lfsu */ 1590 if (!(regs->msr & MSR_FP)) 1591 goto fpunavail; 1592 op->type = MKOP(LOAD_FP, u, 4); 1593 op->ea = dform_ea(instr, regs); 1594 break; 1595 1596 case 50: /* lfd */ 1597 case 51: /* lfdu */ 1598 if (!(regs->msr & MSR_FP)) 1599 goto fpunavail; 1600 op->type = MKOP(LOAD_FP, u, 8); 1601 op->ea = dform_ea(instr, regs); 1602 break; 1603 1604 case 52: /* stfs */ 1605 case 53: /* stfsu */ 1606 if (!(regs->msr & MSR_FP)) 1607 goto fpunavail; 1608 op->type = MKOP(STORE_FP, u, 4); 1609 op->ea = dform_ea(instr, regs); 1610 break; 1611 1612 case 54: /* stfd */ 1613 case 55: /* stfdu */ 1614 if (!(regs->msr & MSR_FP)) 1615 goto fpunavail; 1616 op->type = MKOP(STORE_FP, u, 8); 1617 op->ea = dform_ea(instr, regs); 1618 break; 1619 #endif 1620 1621 #ifdef __powerpc64__ 1622 case 58: /* ld[u], lwa */ 1623 op->ea = dsform_ea(instr, regs); 1624 switch (instr & 3) { 1625 case 0: /* ld */ 1626 op->type = MKOP(LOAD, 0, 8); 1627 break; 1628 case 1: /* ldu */ 1629 op->type = MKOP(LOAD, UPDATE, 8); 1630 break; 1631 case 2: /* lwa */ 1632 op->type = MKOP(LOAD, SIGNEXT, 4); 1633 break; 1634 } 1635 break; 1636 1637 case 62: /* std[u] */ 1638 op->ea = dsform_ea(instr, regs); 1639 switch (instr & 3) { 1640 case 0: /* std */ 1641 op->type = MKOP(STORE, 0, 8); 1642 break; 1643 case 1: /* stdu */ 1644 op->type = MKOP(STORE, UPDATE, 8); 1645 break; 1646 } 1647 break; 1648 #endif /* __powerpc64__ */ 1649 1650 } 1651 return 0; 1652 1653 logical_done: 1654 if (instr & 1) 1655 set_cr0(regs, ra); 1656 goto instr_done; 1657 1658 arith_done: 1659 if (instr & 1) 1660 set_cr0(regs, rd); 1661 1662 instr_done: 1663 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 1664 return 1; 1665 1666 priv: 1667 op->type = INTERRUPT | 0x700; 1668 op->val = SRR1_PROGPRIV; 1669 return 0; 1670 1671 trap: 1672 op->type = INTERRUPT | 0x700; 1673 op->val = SRR1_PROGTRAP; 1674 return 0; 1675 1676 #ifdef CONFIG_PPC_FPU 1677 fpunavail: 1678 op->type = INTERRUPT | 0x800; 1679 return 0; 1680 #endif 1681 1682 #ifdef CONFIG_ALTIVEC 1683 vecunavail: 1684 op->type = INTERRUPT | 0xf20; 1685 return 0; 1686 #endif 1687 1688 #ifdef CONFIG_VSX 1689 vsxunavail: 1690 op->type = INTERRUPT | 0xf40; 1691 return 0; 1692 #endif 1693 } 1694 EXPORT_SYMBOL_GPL(analyse_instr); 1695 1696 /* 1697 * For PPC32 we always use stwu with r1 to change the stack pointer. 1698 * So this emulated store may corrupt the exception frame, now we 1699 * have to provide the exception frame trampoline, which is pushed 1700 * below the kprobed function stack. So we only update gpr[1] but 1701 * don't emulate the real store operation. We will do real store 1702 * operation safely in exception return code by checking this flag. 1703 */ 1704 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs) 1705 { 1706 #ifdef CONFIG_PPC32 1707 /* 1708 * Check if we will touch kernel stack overflow 1709 */ 1710 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { 1711 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n"); 1712 return -EINVAL; 1713 } 1714 #endif /* CONFIG_PPC32 */ 1715 /* 1716 * Check if we already set since that means we'll 1717 * lose the previous value. 1718 */ 1719 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE)); 1720 set_thread_flag(TIF_EMULATE_STACK_STORE); 1721 return 0; 1722 } 1723 1724 static __kprobes void do_signext(unsigned long *valp, int size) 1725 { 1726 switch (size) { 1727 case 2: 1728 *valp = (signed short) *valp; 1729 break; 1730 case 4: 1731 *valp = (signed int) *valp; 1732 break; 1733 } 1734 } 1735 1736 static __kprobes void do_byterev(unsigned long *valp, int size) 1737 { 1738 switch (size) { 1739 case 2: 1740 *valp = byterev_2(*valp); 1741 break; 1742 case 4: 1743 *valp = byterev_4(*valp); 1744 break; 1745 #ifdef __powerpc64__ 1746 case 8: 1747 *valp = byterev_8(*valp); 1748 break; 1749 #endif 1750 } 1751 } 1752 1753 /* 1754 * Emulate instructions that cause a transfer of control, 1755 * loads and stores, and a few other instructions. 1756 * Returns 1 if the step was emulated, 0 if not, 1757 * or -1 if the instruction is one that should not be stepped, 1758 * such as an rfid, or a mtmsrd that would clear MSR_RI. 1759 */ 1760 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) 1761 { 1762 struct instruction_op op; 1763 int r, err, size; 1764 unsigned long val; 1765 unsigned int cr; 1766 int i, rd, nb; 1767 1768 r = analyse_instr(&op, regs, instr); 1769 if (r != 0) 1770 return r; 1771 1772 err = 0; 1773 size = GETSIZE(op.type); 1774 switch (op.type & INSTR_TYPE_MASK) { 1775 case CACHEOP: 1776 if (!address_ok(regs, op.ea, 8)) 1777 return 0; 1778 switch (op.type & CACHEOP_MASK) { 1779 case DCBST: 1780 __cacheop_user_asmx(op.ea, err, "dcbst"); 1781 break; 1782 case DCBF: 1783 __cacheop_user_asmx(op.ea, err, "dcbf"); 1784 break; 1785 case DCBTST: 1786 if (op.reg == 0) 1787 prefetchw((void *) op.ea); 1788 break; 1789 case DCBT: 1790 if (op.reg == 0) 1791 prefetch((void *) op.ea); 1792 break; 1793 case ICBI: 1794 __cacheop_user_asmx(op.ea, err, "icbi"); 1795 break; 1796 } 1797 if (err) 1798 return 0; 1799 goto instr_done; 1800 1801 case LARX: 1802 if (regs->msr & MSR_LE) 1803 return 0; 1804 if (op.ea & (size - 1)) 1805 break; /* can't handle misaligned */ 1806 if (!address_ok(regs, op.ea, size)) 1807 return 0; 1808 err = 0; 1809 switch (size) { 1810 case 4: 1811 __get_user_asmx(val, op.ea, err, "lwarx"); 1812 break; 1813 #ifdef __powerpc64__ 1814 case 8: 1815 __get_user_asmx(val, op.ea, err, "ldarx"); 1816 break; 1817 #endif 1818 default: 1819 return 0; 1820 } 1821 if (!err) 1822 regs->gpr[op.reg] = val; 1823 goto ldst_done; 1824 1825 case STCX: 1826 if (regs->msr & MSR_LE) 1827 return 0; 1828 if (op.ea & (size - 1)) 1829 break; /* can't handle misaligned */ 1830 if (!address_ok(regs, op.ea, size)) 1831 return 0; 1832 err = 0; 1833 switch (size) { 1834 case 4: 1835 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr); 1836 break; 1837 #ifdef __powerpc64__ 1838 case 8: 1839 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr); 1840 break; 1841 #endif 1842 default: 1843 return 0; 1844 } 1845 if (!err) 1846 regs->ccr = (regs->ccr & 0x0fffffff) | 1847 (cr & 0xe0000000) | 1848 ((regs->xer >> 3) & 0x10000000); 1849 goto ldst_done; 1850 1851 case LOAD: 1852 if (regs->msr & MSR_LE) 1853 return 0; 1854 err = read_mem(®s->gpr[op.reg], op.ea, size, regs); 1855 if (!err) { 1856 if (op.type & SIGNEXT) 1857 do_signext(®s->gpr[op.reg], size); 1858 if (op.type & BYTEREV) 1859 do_byterev(®s->gpr[op.reg], size); 1860 } 1861 goto ldst_done; 1862 1863 #ifdef CONFIG_PPC_FPU 1864 case LOAD_FP: 1865 if (regs->msr & MSR_LE) 1866 return 0; 1867 if (size == 4) 1868 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); 1869 else 1870 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); 1871 goto ldst_done; 1872 #endif 1873 #ifdef CONFIG_ALTIVEC 1874 case LOAD_VMX: 1875 if (regs->msr & MSR_LE) 1876 return 0; 1877 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); 1878 goto ldst_done; 1879 #endif 1880 #ifdef CONFIG_VSX 1881 case LOAD_VSX: 1882 if (regs->msr & MSR_LE) 1883 return 0; 1884 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); 1885 goto ldst_done; 1886 #endif 1887 case LOAD_MULTI: 1888 if (regs->msr & MSR_LE) 1889 return 0; 1890 rd = op.reg; 1891 for (i = 0; i < size; i += 4) { 1892 nb = size - i; 1893 if (nb > 4) 1894 nb = 4; 1895 err = read_mem(®s->gpr[rd], op.ea, nb, regs); 1896 if (err) 1897 return 0; 1898 if (nb < 4) /* left-justify last bytes */ 1899 regs->gpr[rd] <<= 32 - 8 * nb; 1900 op.ea += 4; 1901 ++rd; 1902 } 1903 goto instr_done; 1904 1905 case STORE: 1906 if (regs->msr & MSR_LE) 1907 return 0; 1908 if ((op.type & UPDATE) && size == sizeof(long) && 1909 op.reg == 1 && op.update_reg == 1 && 1910 !(regs->msr & MSR_PR) && 1911 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { 1912 err = handle_stack_update(op.ea, regs); 1913 goto ldst_done; 1914 } 1915 err = write_mem(op.val, op.ea, size, regs); 1916 goto ldst_done; 1917 1918 #ifdef CONFIG_PPC_FPU 1919 case STORE_FP: 1920 if (regs->msr & MSR_LE) 1921 return 0; 1922 if (size == 4) 1923 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); 1924 else 1925 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); 1926 goto ldst_done; 1927 #endif 1928 #ifdef CONFIG_ALTIVEC 1929 case STORE_VMX: 1930 if (regs->msr & MSR_LE) 1931 return 0; 1932 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); 1933 goto ldst_done; 1934 #endif 1935 #ifdef CONFIG_VSX 1936 case STORE_VSX: 1937 if (regs->msr & MSR_LE) 1938 return 0; 1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); 1940 goto ldst_done; 1941 #endif 1942 case STORE_MULTI: 1943 if (regs->msr & MSR_LE) 1944 return 0; 1945 rd = op.reg; 1946 for (i = 0; i < size; i += 4) { 1947 val = regs->gpr[rd]; 1948 nb = size - i; 1949 if (nb > 4) 1950 nb = 4; 1951 else 1952 val >>= 32 - 8 * nb; 1953 err = write_mem(val, op.ea, nb, regs); 1954 if (err) 1955 return 0; 1956 op.ea += 4; 1957 ++rd; 1958 } 1959 goto instr_done; 1960 1961 case MFMSR: 1962 regs->gpr[op.reg] = regs->msr & MSR_MASK; 1963 goto instr_done; 1964 1965 case MTMSR: 1966 val = regs->gpr[op.reg]; 1967 if ((val & MSR_RI) == 0) 1968 /* can't step mtmsr[d] that would clear MSR_RI */ 1969 return -1; 1970 /* here op.val is the mask of bits to change */ 1971 regs->msr = (regs->msr & ~op.val) | (val & op.val); 1972 goto instr_done; 1973 1974 #ifdef CONFIG_PPC64 1975 case SYSCALL: /* sc */ 1976 /* 1977 * N.B. this uses knowledge about how the syscall 1978 * entry code works. If that is changed, this will 1979 * need to be changed also. 1980 */ 1981 if (regs->gpr[0] == 0x1ebe && 1982 cpu_has_feature(CPU_FTR_REAL_LE)) { 1983 regs->msr ^= MSR_LE; 1984 goto instr_done; 1985 } 1986 regs->gpr[9] = regs->gpr[13]; 1987 regs->gpr[10] = MSR_KERNEL; 1988 regs->gpr[11] = regs->nip + 4; 1989 regs->gpr[12] = regs->msr & MSR_MASK; 1990 regs->gpr[13] = (unsigned long) get_paca(); 1991 regs->nip = (unsigned long) &system_call_common; 1992 regs->msr = MSR_KERNEL; 1993 return 1; 1994 1995 case RFI: 1996 return -1; 1997 #endif 1998 } 1999 return 0; 2000 2001 ldst_done: 2002 if (err) 2003 return 0; 2004 if (op.type & UPDATE) 2005 regs->gpr[op.update_reg] = op.ea; 2006 2007 instr_done: 2008 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); 2009 return 1; 2010 } 2011