1 /* ptrace.c: Sparc process tracing support. 2 * 3 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) 4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5 * 6 * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson, 7 * and David Mosberger. 8 * 9 * Added Linux support -miguel (weird, eh?, the original code was meant 10 * to emulate SunOS). 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/errno.h> 17 #include <linux/export.h> 18 #include <linux/ptrace.h> 19 #include <linux/user.h> 20 #include <linux/smp.h> 21 #include <linux/security.h> 22 #include <linux/seccomp.h> 23 #include <linux/audit.h> 24 #include <linux/signal.h> 25 #include <linux/regset.h> 26 #include <linux/tracehook.h> 27 #include <trace/syscall.h> 28 #include <linux/compat.h> 29 #include <linux/elf.h> 30 #include <linux/context_tracking.h> 31 32 #include <asm/asi.h> 33 #include <asm/pgtable.h> 34 #include <asm/uaccess.h> 35 #include <asm/psrcompat.h> 36 #include <asm/visasm.h> 37 #include <asm/spitfire.h> 38 #include <asm/page.h> 39 #include <asm/cpudata.h> 40 #include <asm/cacheflush.h> 41 42 #define CREATE_TRACE_POINTS 43 #include <trace/events/syscalls.h> 44 45 #include "entry.h" 46 47 /* #define ALLOW_INIT_TRACING */ 48 49 /* 50 * Called by kernel/ptrace.c when detaching.. 51 * 52 * Make sure single step bits etc are not set. 53 */ 54 void ptrace_disable(struct task_struct *child) 55 { 56 /* nothing to do */ 57 } 58 59 /* To get the necessary page struct, access_process_vm() first calls 60 * get_user_pages(). This has done a flush_dcache_page() on the 61 * accessed page. Then our caller (copy_{to,from}_user_page()) did 62 * to memcpy to read/write the data from that page. 63 * 64 * Now, the only thing we have to do is: 65 * 1) flush the D-cache if it's possible than an illegal alias 66 * has been created 67 * 2) flush the I-cache if this is pre-cheetah and we did a write 68 */ 69 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 70 unsigned long uaddr, void *kaddr, 71 unsigned long len, int write) 72 { 73 BUG_ON(len > PAGE_SIZE); 74 75 if (tlb_type == hypervisor) 76 return; 77 78 preempt_disable(); 79 80 #ifdef DCACHE_ALIASING_POSSIBLE 81 /* If bit 13 of the kernel address we used to access the 82 * user page is the same as the virtual address that page 83 * is mapped to in the user's address space, we can skip the 84 * D-cache flush. 85 */ 86 if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { 87 unsigned long start = __pa(kaddr); 88 unsigned long end = start + len; 89 unsigned long dcache_line_size; 90 91 dcache_line_size = local_cpu_data().dcache_line_size; 92 93 if (tlb_type == spitfire) { 94 for (; start < end; start += dcache_line_size) 95 spitfire_put_dcache_tag(start & 0x3fe0, 0x0); 96 } else { 97 start &= ~(dcache_line_size - 1); 98 for (; start < end; start += dcache_line_size) 99 __asm__ __volatile__( 100 "stxa %%g0, [%0] %1\n\t" 101 "membar #Sync" 102 : /* no outputs */ 103 : "r" (start), 104 "i" (ASI_DCACHE_INVALIDATE)); 105 } 106 } 107 #endif 108 if (write && tlb_type == spitfire) { 109 unsigned long start = (unsigned long) kaddr; 110 unsigned long end = start + len; 111 unsigned long icache_line_size; 112 113 icache_line_size = local_cpu_data().icache_line_size; 114 115 for (; start < end; start += icache_line_size) 116 flushi(start); 117 } 118 119 preempt_enable(); 120 } 121 EXPORT_SYMBOL_GPL(flush_ptrace_access); 122 123 static int get_from_target(struct task_struct *target, unsigned long uaddr, 124 void *kbuf, int len) 125 { 126 if (target == current) { 127 if (copy_from_user(kbuf, (void __user *) uaddr, len)) 128 return -EFAULT; 129 } else { 130 int len2 = access_process_vm(target, uaddr, kbuf, len, 131 FOLL_FORCE); 132 if (len2 != len) 133 return -EFAULT; 134 } 135 return 0; 136 } 137 138 static int set_to_target(struct task_struct *target, unsigned long uaddr, 139 void *kbuf, int len) 140 { 141 if (target == current) { 142 if (copy_to_user((void __user *) uaddr, kbuf, len)) 143 return -EFAULT; 144 } else { 145 int len2 = access_process_vm(target, uaddr, kbuf, len, 146 FOLL_FORCE | FOLL_WRITE); 147 if (len2 != len) 148 return -EFAULT; 149 } 150 return 0; 151 } 152 153 static int regwindow64_get(struct task_struct *target, 154 const struct pt_regs *regs, 155 struct reg_window *wbuf) 156 { 157 unsigned long rw_addr = regs->u_regs[UREG_I6]; 158 159 if (!test_thread_64bit_stack(rw_addr)) { 160 struct reg_window32 win32; 161 int i; 162 163 if (get_from_target(target, rw_addr, &win32, sizeof(win32))) 164 return -EFAULT; 165 for (i = 0; i < 8; i++) 166 wbuf->locals[i] = win32.locals[i]; 167 for (i = 0; i < 8; i++) 168 wbuf->ins[i] = win32.ins[i]; 169 } else { 170 rw_addr += STACK_BIAS; 171 if (get_from_target(target, rw_addr, wbuf, sizeof(*wbuf))) 172 return -EFAULT; 173 } 174 175 return 0; 176 } 177 178 static int regwindow64_set(struct task_struct *target, 179 const struct pt_regs *regs, 180 struct reg_window *wbuf) 181 { 182 unsigned long rw_addr = regs->u_regs[UREG_I6]; 183 184 if (!test_thread_64bit_stack(rw_addr)) { 185 struct reg_window32 win32; 186 int i; 187 188 for (i = 0; i < 8; i++) 189 win32.locals[i] = wbuf->locals[i]; 190 for (i = 0; i < 8; i++) 191 win32.ins[i] = wbuf->ins[i]; 192 193 if (set_to_target(target, rw_addr, &win32, sizeof(win32))) 194 return -EFAULT; 195 } else { 196 rw_addr += STACK_BIAS; 197 if (set_to_target(target, rw_addr, wbuf, sizeof(*wbuf))) 198 return -EFAULT; 199 } 200 201 return 0; 202 } 203 204 enum sparc_regset { 205 REGSET_GENERAL, 206 REGSET_FP, 207 }; 208 209 static int genregs64_get(struct task_struct *target, 210 const struct user_regset *regset, 211 unsigned int pos, unsigned int count, 212 void *kbuf, void __user *ubuf) 213 { 214 const struct pt_regs *regs = task_pt_regs(target); 215 int ret; 216 217 if (target == current) 218 flushw_user(); 219 220 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 221 regs->u_regs, 222 0, 16 * sizeof(u64)); 223 if (!ret && count && pos < (32 * sizeof(u64))) { 224 struct reg_window window; 225 226 if (regwindow64_get(target, regs, &window)) 227 return -EFAULT; 228 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 229 &window, 230 16 * sizeof(u64), 231 32 * sizeof(u64)); 232 } 233 234 if (!ret) { 235 /* TSTATE, TPC, TNPC */ 236 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 237 ®s->tstate, 238 32 * sizeof(u64), 239 35 * sizeof(u64)); 240 } 241 242 if (!ret) { 243 unsigned long y = regs->y; 244 245 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 246 &y, 247 35 * sizeof(u64), 248 36 * sizeof(u64)); 249 } 250 251 if (!ret) { 252 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 253 36 * sizeof(u64), -1); 254 255 } 256 return ret; 257 } 258 259 static int genregs64_set(struct task_struct *target, 260 const struct user_regset *regset, 261 unsigned int pos, unsigned int count, 262 const void *kbuf, const void __user *ubuf) 263 { 264 struct pt_regs *regs = task_pt_regs(target); 265 int ret; 266 267 if (target == current) 268 flushw_user(); 269 270 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 271 regs->u_regs, 272 0, 16 * sizeof(u64)); 273 if (!ret && count && pos < (32 * sizeof(u64))) { 274 struct reg_window window; 275 276 if (regwindow64_get(target, regs, &window)) 277 return -EFAULT; 278 279 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 280 &window, 281 16 * sizeof(u64), 282 32 * sizeof(u64)); 283 284 if (!ret && 285 regwindow64_set(target, regs, &window)) 286 return -EFAULT; 287 } 288 289 if (!ret && count > 0) { 290 unsigned long tstate; 291 292 /* TSTATE */ 293 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 294 &tstate, 295 32 * sizeof(u64), 296 33 * sizeof(u64)); 297 if (!ret) { 298 /* Only the condition codes and the "in syscall" 299 * state can be modified in the %tstate register. 300 */ 301 tstate &= (TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); 302 regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); 303 regs->tstate |= tstate; 304 } 305 } 306 307 if (!ret) { 308 /* TPC, TNPC */ 309 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 310 ®s->tpc, 311 33 * sizeof(u64), 312 35 * sizeof(u64)); 313 } 314 315 if (!ret) { 316 unsigned long y; 317 318 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 319 &y, 320 35 * sizeof(u64), 321 36 * sizeof(u64)); 322 if (!ret) 323 regs->y = y; 324 } 325 326 if (!ret) 327 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 328 36 * sizeof(u64), -1); 329 330 return ret; 331 } 332 333 static int fpregs64_get(struct task_struct *target, 334 const struct user_regset *regset, 335 unsigned int pos, unsigned int count, 336 void *kbuf, void __user *ubuf) 337 { 338 const unsigned long *fpregs = task_thread_info(target)->fpregs; 339 unsigned long fprs, fsr, gsr; 340 int ret; 341 342 if (target == current) 343 save_and_clear_fpu(); 344 345 fprs = task_thread_info(target)->fpsaved[0]; 346 347 if (fprs & FPRS_DL) 348 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 349 fpregs, 350 0, 16 * sizeof(u64)); 351 else 352 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 353 0, 354 16 * sizeof(u64)); 355 356 if (!ret) { 357 if (fprs & FPRS_DU) 358 ret = user_regset_copyout(&pos, &count, 359 &kbuf, &ubuf, 360 fpregs + 16, 361 16 * sizeof(u64), 362 32 * sizeof(u64)); 363 else 364 ret = user_regset_copyout_zero(&pos, &count, 365 &kbuf, &ubuf, 366 16 * sizeof(u64), 367 32 * sizeof(u64)); 368 } 369 370 if (fprs & FPRS_FEF) { 371 fsr = task_thread_info(target)->xfsr[0]; 372 gsr = task_thread_info(target)->gsr[0]; 373 } else { 374 fsr = gsr = 0; 375 } 376 377 if (!ret) 378 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 379 &fsr, 380 32 * sizeof(u64), 381 33 * sizeof(u64)); 382 if (!ret) 383 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 384 &gsr, 385 33 * sizeof(u64), 386 34 * sizeof(u64)); 387 if (!ret) 388 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 389 &fprs, 390 34 * sizeof(u64), 391 35 * sizeof(u64)); 392 393 if (!ret) 394 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 395 35 * sizeof(u64), -1); 396 397 return ret; 398 } 399 400 static int fpregs64_set(struct task_struct *target, 401 const struct user_regset *regset, 402 unsigned int pos, unsigned int count, 403 const void *kbuf, const void __user *ubuf) 404 { 405 unsigned long *fpregs = task_thread_info(target)->fpregs; 406 unsigned long fprs; 407 int ret; 408 409 if (target == current) 410 save_and_clear_fpu(); 411 412 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 413 fpregs, 414 0, 32 * sizeof(u64)); 415 if (!ret) 416 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 417 task_thread_info(target)->xfsr, 418 32 * sizeof(u64), 419 33 * sizeof(u64)); 420 if (!ret) 421 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 422 task_thread_info(target)->gsr, 423 33 * sizeof(u64), 424 34 * sizeof(u64)); 425 426 fprs = task_thread_info(target)->fpsaved[0]; 427 if (!ret && count > 0) { 428 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 429 &fprs, 430 34 * sizeof(u64), 431 35 * sizeof(u64)); 432 } 433 434 fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU); 435 task_thread_info(target)->fpsaved[0] = fprs; 436 437 if (!ret) 438 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 439 35 * sizeof(u64), -1); 440 return ret; 441 } 442 443 static const struct user_regset sparc64_regsets[] = { 444 /* Format is: 445 * G0 --> G7 446 * O0 --> O7 447 * L0 --> L7 448 * I0 --> I7 449 * TSTATE, TPC, TNPC, Y 450 */ 451 [REGSET_GENERAL] = { 452 .core_note_type = NT_PRSTATUS, 453 .n = 36, 454 .size = sizeof(u64), .align = sizeof(u64), 455 .get = genregs64_get, .set = genregs64_set 456 }, 457 /* Format is: 458 * F0 --> F63 459 * FSR 460 * GSR 461 * FPRS 462 */ 463 [REGSET_FP] = { 464 .core_note_type = NT_PRFPREG, 465 .n = 35, 466 .size = sizeof(u64), .align = sizeof(u64), 467 .get = fpregs64_get, .set = fpregs64_set 468 }, 469 }; 470 471 static const struct user_regset_view user_sparc64_view = { 472 .name = "sparc64", .e_machine = EM_SPARCV9, 473 .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets) 474 }; 475 476 #ifdef CONFIG_COMPAT 477 static int genregs32_get(struct task_struct *target, 478 const struct user_regset *regset, 479 unsigned int pos, unsigned int count, 480 void *kbuf, void __user *ubuf) 481 { 482 const struct pt_regs *regs = task_pt_regs(target); 483 compat_ulong_t __user *reg_window; 484 compat_ulong_t *k = kbuf; 485 compat_ulong_t __user *u = ubuf; 486 compat_ulong_t reg; 487 488 if (target == current) 489 flushw_user(); 490 491 pos /= sizeof(reg); 492 count /= sizeof(reg); 493 494 if (kbuf) { 495 for (; count > 0 && pos < 16; count--) 496 *k++ = regs->u_regs[pos++]; 497 498 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 499 reg_window -= 16; 500 if (target == current) { 501 for (; count > 0 && pos < 32; count--) { 502 if (get_user(*k++, ®_window[pos++])) 503 return -EFAULT; 504 } 505 } else { 506 for (; count > 0 && pos < 32; count--) { 507 if (access_process_vm(target, 508 (unsigned long) 509 ®_window[pos], 510 k, sizeof(*k), 511 FOLL_FORCE) 512 != sizeof(*k)) 513 return -EFAULT; 514 k++; 515 pos++; 516 } 517 } 518 } else { 519 for (; count > 0 && pos < 16; count--) { 520 if (put_user((compat_ulong_t) regs->u_regs[pos++], u++)) 521 return -EFAULT; 522 } 523 524 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 525 reg_window -= 16; 526 if (target == current) { 527 for (; count > 0 && pos < 32; count--) { 528 if (get_user(reg, ®_window[pos++]) || 529 put_user(reg, u++)) 530 return -EFAULT; 531 } 532 } else { 533 for (; count > 0 && pos < 32; count--) { 534 if (access_process_vm(target, 535 (unsigned long) 536 ®_window[pos], 537 ®, sizeof(reg), 538 FOLL_FORCE) 539 != sizeof(reg)) 540 return -EFAULT; 541 if (access_process_vm(target, 542 (unsigned long) u, 543 ®, sizeof(reg), 544 FOLL_FORCE | FOLL_WRITE) 545 != sizeof(reg)) 546 return -EFAULT; 547 pos++; 548 u++; 549 } 550 } 551 } 552 while (count > 0) { 553 switch (pos) { 554 case 32: /* PSR */ 555 reg = tstate_to_psr(regs->tstate); 556 break; 557 case 33: /* PC */ 558 reg = regs->tpc; 559 break; 560 case 34: /* NPC */ 561 reg = regs->tnpc; 562 break; 563 case 35: /* Y */ 564 reg = regs->y; 565 break; 566 case 36: /* WIM */ 567 case 37: /* TBR */ 568 reg = 0; 569 break; 570 default: 571 goto finish; 572 } 573 574 if (kbuf) 575 *k++ = reg; 576 else if (put_user(reg, u++)) 577 return -EFAULT; 578 pos++; 579 count--; 580 } 581 finish: 582 pos *= sizeof(reg); 583 count *= sizeof(reg); 584 585 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 586 38 * sizeof(reg), -1); 587 } 588 589 static int genregs32_set(struct task_struct *target, 590 const struct user_regset *regset, 591 unsigned int pos, unsigned int count, 592 const void *kbuf, const void __user *ubuf) 593 { 594 struct pt_regs *regs = task_pt_regs(target); 595 compat_ulong_t __user *reg_window; 596 const compat_ulong_t *k = kbuf; 597 const compat_ulong_t __user *u = ubuf; 598 compat_ulong_t reg; 599 600 if (target == current) 601 flushw_user(); 602 603 pos /= sizeof(reg); 604 count /= sizeof(reg); 605 606 if (kbuf) { 607 for (; count > 0 && pos < 16; count--) 608 regs->u_regs[pos++] = *k++; 609 610 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 611 reg_window -= 16; 612 if (target == current) { 613 for (; count > 0 && pos < 32; count--) { 614 if (put_user(*k++, ®_window[pos++])) 615 return -EFAULT; 616 } 617 } else { 618 for (; count > 0 && pos < 32; count--) { 619 if (access_process_vm(target, 620 (unsigned long) 621 ®_window[pos], 622 (void *) k, 623 sizeof(*k), 624 FOLL_FORCE | FOLL_WRITE) 625 != sizeof(*k)) 626 return -EFAULT; 627 k++; 628 pos++; 629 } 630 } 631 } else { 632 for (; count > 0 && pos < 16; count--) { 633 if (get_user(reg, u++)) 634 return -EFAULT; 635 regs->u_regs[pos++] = reg; 636 } 637 638 reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6]; 639 reg_window -= 16; 640 if (target == current) { 641 for (; count > 0 && pos < 32; count--) { 642 if (get_user(reg, u++) || 643 put_user(reg, ®_window[pos++])) 644 return -EFAULT; 645 } 646 } else { 647 for (; count > 0 && pos < 32; count--) { 648 if (access_process_vm(target, 649 (unsigned long) 650 u, 651 ®, sizeof(reg), 652 FOLL_FORCE) 653 != sizeof(reg)) 654 return -EFAULT; 655 if (access_process_vm(target, 656 (unsigned long) 657 ®_window[pos], 658 ®, sizeof(reg), 659 FOLL_FORCE | FOLL_WRITE) 660 != sizeof(reg)) 661 return -EFAULT; 662 pos++; 663 u++; 664 } 665 } 666 } 667 while (count > 0) { 668 unsigned long tstate; 669 670 if (kbuf) 671 reg = *k++; 672 else if (get_user(reg, u++)) 673 return -EFAULT; 674 675 switch (pos) { 676 case 32: /* PSR */ 677 tstate = regs->tstate; 678 tstate &= ~(TSTATE_ICC | TSTATE_XCC | TSTATE_SYSCALL); 679 tstate |= psr_to_tstate_icc(reg); 680 if (reg & PSR_SYSCALL) 681 tstate |= TSTATE_SYSCALL; 682 regs->tstate = tstate; 683 break; 684 case 33: /* PC */ 685 regs->tpc = reg; 686 break; 687 case 34: /* NPC */ 688 regs->tnpc = reg; 689 break; 690 case 35: /* Y */ 691 regs->y = reg; 692 break; 693 case 36: /* WIM */ 694 case 37: /* TBR */ 695 break; 696 default: 697 goto finish; 698 } 699 700 pos++; 701 count--; 702 } 703 finish: 704 pos *= sizeof(reg); 705 count *= sizeof(reg); 706 707 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 708 38 * sizeof(reg), -1); 709 } 710 711 static int fpregs32_get(struct task_struct *target, 712 const struct user_regset *regset, 713 unsigned int pos, unsigned int count, 714 void *kbuf, void __user *ubuf) 715 { 716 const unsigned long *fpregs = task_thread_info(target)->fpregs; 717 compat_ulong_t enabled; 718 unsigned long fprs; 719 compat_ulong_t fsr; 720 int ret = 0; 721 722 if (target == current) 723 save_and_clear_fpu(); 724 725 fprs = task_thread_info(target)->fpsaved[0]; 726 if (fprs & FPRS_FEF) { 727 fsr = task_thread_info(target)->xfsr[0]; 728 enabled = 1; 729 } else { 730 fsr = 0; 731 enabled = 0; 732 } 733 734 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 735 fpregs, 736 0, 32 * sizeof(u32)); 737 738 if (!ret) 739 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 740 32 * sizeof(u32), 741 33 * sizeof(u32)); 742 if (!ret) 743 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 744 &fsr, 745 33 * sizeof(u32), 746 34 * sizeof(u32)); 747 748 if (!ret) { 749 compat_ulong_t val; 750 751 val = (enabled << 8) | (8 << 16); 752 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 753 &val, 754 34 * sizeof(u32), 755 35 * sizeof(u32)); 756 } 757 758 if (!ret) 759 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 760 35 * sizeof(u32), -1); 761 762 return ret; 763 } 764 765 static int fpregs32_set(struct task_struct *target, 766 const struct user_regset *regset, 767 unsigned int pos, unsigned int count, 768 const void *kbuf, const void __user *ubuf) 769 { 770 unsigned long *fpregs = task_thread_info(target)->fpregs; 771 unsigned long fprs; 772 int ret; 773 774 if (target == current) 775 save_and_clear_fpu(); 776 777 fprs = task_thread_info(target)->fpsaved[0]; 778 779 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 780 fpregs, 781 0, 32 * sizeof(u32)); 782 if (!ret) 783 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 784 32 * sizeof(u32), 785 33 * sizeof(u32)); 786 if (!ret && count > 0) { 787 compat_ulong_t fsr; 788 unsigned long val; 789 790 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 791 &fsr, 792 33 * sizeof(u32), 793 34 * sizeof(u32)); 794 if (!ret) { 795 val = task_thread_info(target)->xfsr[0]; 796 val &= 0xffffffff00000000UL; 797 val |= fsr; 798 task_thread_info(target)->xfsr[0] = val; 799 } 800 } 801 802 fprs |= (FPRS_FEF | FPRS_DL); 803 task_thread_info(target)->fpsaved[0] = fprs; 804 805 if (!ret) 806 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 807 34 * sizeof(u32), -1); 808 return ret; 809 } 810 811 static const struct user_regset sparc32_regsets[] = { 812 /* Format is: 813 * G0 --> G7 814 * O0 --> O7 815 * L0 --> L7 816 * I0 --> I7 817 * PSR, PC, nPC, Y, WIM, TBR 818 */ 819 [REGSET_GENERAL] = { 820 .core_note_type = NT_PRSTATUS, 821 .n = 38, 822 .size = sizeof(u32), .align = sizeof(u32), 823 .get = genregs32_get, .set = genregs32_set 824 }, 825 /* Format is: 826 * F0 --> F31 827 * empty 32-bit word 828 * FSR (32--bit word) 829 * FPU QUEUE COUNT (8-bit char) 830 * FPU QUEUE ENTRYSIZE (8-bit char) 831 * FPU ENABLED (8-bit char) 832 * empty 8-bit char 833 * FPU QUEUE (64 32-bit ints) 834 */ 835 [REGSET_FP] = { 836 .core_note_type = NT_PRFPREG, 837 .n = 99, 838 .size = sizeof(u32), .align = sizeof(u32), 839 .get = fpregs32_get, .set = fpregs32_set 840 }, 841 }; 842 843 static const struct user_regset_view user_sparc32_view = { 844 .name = "sparc", .e_machine = EM_SPARC, 845 .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets) 846 }; 847 #endif /* CONFIG_COMPAT */ 848 849 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 850 { 851 #ifdef CONFIG_COMPAT 852 if (test_tsk_thread_flag(task, TIF_32BIT)) 853 return &user_sparc32_view; 854 #endif 855 return &user_sparc64_view; 856 } 857 858 #ifdef CONFIG_COMPAT 859 struct compat_fps { 860 unsigned int regs[32]; 861 unsigned int fsr; 862 unsigned int flags; 863 unsigned int extra; 864 unsigned int fpqd; 865 struct compat_fq { 866 unsigned int insnaddr; 867 unsigned int insn; 868 } fpq[16]; 869 }; 870 871 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 872 compat_ulong_t caddr, compat_ulong_t cdata) 873 { 874 const struct user_regset_view *view = task_user_regset_view(current); 875 compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4]; 876 struct pt_regs32 __user *pregs; 877 struct compat_fps __user *fps; 878 unsigned long addr2 = caddr2; 879 unsigned long addr = caddr; 880 unsigned long data = cdata; 881 int ret; 882 883 pregs = (struct pt_regs32 __user *) addr; 884 fps = (struct compat_fps __user *) addr; 885 886 switch (request) { 887 case PTRACE_PEEKUSR: 888 ret = (addr != 0) ? -EIO : 0; 889 break; 890 891 case PTRACE_GETREGS: 892 ret = copy_regset_to_user(child, view, REGSET_GENERAL, 893 32 * sizeof(u32), 894 4 * sizeof(u32), 895 &pregs->psr); 896 if (!ret) 897 ret = copy_regset_to_user(child, view, REGSET_GENERAL, 898 1 * sizeof(u32), 899 15 * sizeof(u32), 900 &pregs->u_regs[0]); 901 break; 902 903 case PTRACE_SETREGS: 904 ret = copy_regset_from_user(child, view, REGSET_GENERAL, 905 32 * sizeof(u32), 906 4 * sizeof(u32), 907 &pregs->psr); 908 if (!ret) 909 ret = copy_regset_from_user(child, view, REGSET_GENERAL, 910 1 * sizeof(u32), 911 15 * sizeof(u32), 912 &pregs->u_regs[0]); 913 break; 914 915 case PTRACE_GETFPREGS: 916 ret = copy_regset_to_user(child, view, REGSET_FP, 917 0 * sizeof(u32), 918 32 * sizeof(u32), 919 &fps->regs[0]); 920 if (!ret) 921 ret = copy_regset_to_user(child, view, REGSET_FP, 922 33 * sizeof(u32), 923 1 * sizeof(u32), 924 &fps->fsr); 925 if (!ret) { 926 if (__put_user(0, &fps->flags) || 927 __put_user(0, &fps->extra) || 928 __put_user(0, &fps->fpqd) || 929 clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) 930 ret = -EFAULT; 931 } 932 break; 933 934 case PTRACE_SETFPREGS: 935 ret = copy_regset_from_user(child, view, REGSET_FP, 936 0 * sizeof(u32), 937 32 * sizeof(u32), 938 &fps->regs[0]); 939 if (!ret) 940 ret = copy_regset_from_user(child, view, REGSET_FP, 941 33 * sizeof(u32), 942 1 * sizeof(u32), 943 &fps->fsr); 944 break; 945 946 case PTRACE_READTEXT: 947 case PTRACE_READDATA: 948 ret = ptrace_readdata(child, addr, 949 (char __user *)addr2, data); 950 if (ret == data) 951 ret = 0; 952 else if (ret >= 0) 953 ret = -EIO; 954 break; 955 956 case PTRACE_WRITETEXT: 957 case PTRACE_WRITEDATA: 958 ret = ptrace_writedata(child, (char __user *) addr2, 959 addr, data); 960 if (ret == data) 961 ret = 0; 962 else if (ret >= 0) 963 ret = -EIO; 964 break; 965 966 default: 967 if (request == PTRACE_SPARC_DETACH) 968 request = PTRACE_DETACH; 969 ret = compat_ptrace_request(child, request, addr, data); 970 break; 971 } 972 973 return ret; 974 } 975 #endif /* CONFIG_COMPAT */ 976 977 struct fps { 978 unsigned int regs[64]; 979 unsigned long fsr; 980 }; 981 982 long arch_ptrace(struct task_struct *child, long request, 983 unsigned long addr, unsigned long data) 984 { 985 const struct user_regset_view *view = task_user_regset_view(current); 986 unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; 987 struct pt_regs __user *pregs; 988 struct fps __user *fps; 989 void __user *addr2p; 990 int ret; 991 992 pregs = (struct pt_regs __user *) addr; 993 fps = (struct fps __user *) addr; 994 addr2p = (void __user *) addr2; 995 996 switch (request) { 997 case PTRACE_PEEKUSR: 998 ret = (addr != 0) ? -EIO : 0; 999 break; 1000 1001 case PTRACE_GETREGS64: 1002 ret = copy_regset_to_user(child, view, REGSET_GENERAL, 1003 1 * sizeof(u64), 1004 15 * sizeof(u64), 1005 &pregs->u_regs[0]); 1006 if (!ret) { 1007 /* XXX doesn't handle 'y' register correctly XXX */ 1008 ret = copy_regset_to_user(child, view, REGSET_GENERAL, 1009 32 * sizeof(u64), 1010 4 * sizeof(u64), 1011 &pregs->tstate); 1012 } 1013 break; 1014 1015 case PTRACE_SETREGS64: 1016 ret = copy_regset_from_user(child, view, REGSET_GENERAL, 1017 1 * sizeof(u64), 1018 15 * sizeof(u64), 1019 &pregs->u_regs[0]); 1020 if (!ret) { 1021 /* XXX doesn't handle 'y' register correctly XXX */ 1022 ret = copy_regset_from_user(child, view, REGSET_GENERAL, 1023 32 * sizeof(u64), 1024 4 * sizeof(u64), 1025 &pregs->tstate); 1026 } 1027 break; 1028 1029 case PTRACE_GETFPREGS64: 1030 ret = copy_regset_to_user(child, view, REGSET_FP, 1031 0 * sizeof(u64), 1032 33 * sizeof(u64), 1033 fps); 1034 break; 1035 1036 case PTRACE_SETFPREGS64: 1037 ret = copy_regset_from_user(child, view, REGSET_FP, 1038 0 * sizeof(u64), 1039 33 * sizeof(u64), 1040 fps); 1041 break; 1042 1043 case PTRACE_READTEXT: 1044 case PTRACE_READDATA: 1045 ret = ptrace_readdata(child, addr, addr2p, data); 1046 if (ret == data) 1047 ret = 0; 1048 else if (ret >= 0) 1049 ret = -EIO; 1050 break; 1051 1052 case PTRACE_WRITETEXT: 1053 case PTRACE_WRITEDATA: 1054 ret = ptrace_writedata(child, addr2p, addr, data); 1055 if (ret == data) 1056 ret = 0; 1057 else if (ret >= 0) 1058 ret = -EIO; 1059 break; 1060 1061 default: 1062 if (request == PTRACE_SPARC_DETACH) 1063 request = PTRACE_DETACH; 1064 ret = ptrace_request(child, request, addr, data); 1065 break; 1066 } 1067 1068 return ret; 1069 } 1070 1071 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1072 { 1073 int ret = 0; 1074 1075 /* do the secure computing check first */ 1076 secure_computing_strict(regs->u_regs[UREG_G1]); 1077 1078 if (test_thread_flag(TIF_NOHZ)) 1079 user_exit(); 1080 1081 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1082 ret = tracehook_report_syscall_entry(regs); 1083 1084 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1085 trace_sys_enter(regs, regs->u_regs[UREG_G1]); 1086 1087 audit_syscall_entry(regs->u_regs[UREG_G1], regs->u_regs[UREG_I0], 1088 regs->u_regs[UREG_I1], regs->u_regs[UREG_I2], 1089 regs->u_regs[UREG_I3]); 1090 1091 return ret; 1092 } 1093 1094 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1095 { 1096 if (test_thread_flag(TIF_NOHZ)) 1097 user_exit(); 1098 1099 audit_syscall_exit(regs); 1100 1101 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1102 trace_sys_exit(regs, regs->u_regs[UREG_I0]); 1103 1104 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1105 tracehook_report_syscall_exit(regs, 0); 1106 1107 if (test_thread_flag(TIF_NOHZ)) 1108 user_enter(); 1109 } 1110