1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 */ 9 #include <linux/kernel.h> 10 #include <linux/sched/signal.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/mm.h> 13 #include <linux/elf.h> 14 #include <linux/smp.h> 15 #include <linux/ptrace.h> 16 #include <linux/user.h> 17 #include <linux/security.h> 18 #include <linux/init.h> 19 #include <linux/signal.h> 20 #include <linux/uaccess.h> 21 #include <linux/perf_event.h> 22 #include <linux/hw_breakpoint.h> 23 #include <linux/regset.h> 24 #include <linux/audit.h> 25 #include <linux/tracehook.h> 26 #include <linux/unistd.h> 27 28 #include <asm/traps.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <trace/events/syscalls.h> 32 33 #define REG_PC 15 34 #define REG_PSR 16 35 /* 36 * does not yet catch signals sent when the child dies. 37 * in exit.c or in signal.c. 38 */ 39 40 #if 0 41 /* 42 * Breakpoint SWI instruction: SWI &9F0001 43 */ 44 #define BREAKINST_ARM 0xef9f0001 45 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ 46 #else 47 /* 48 * New breakpoints - use an undefined instruction. The ARM architecture 49 * reference manual guarantees that the following instruction space 50 * will produce an undefined instruction exception on all CPUs: 51 * 52 * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx 53 * Thumb: 1101 1110 xxxx xxxx 54 */ 55 #define BREAKINST_ARM 0xe7f001f0 56 #define BREAKINST_THUMB 0xde01 57 #endif 58 59 struct pt_regs_offset { 60 const char *name; 61 int offset; 62 }; 63 64 #define REG_OFFSET_NAME(r) \ 65 {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} 66 #define REG_OFFSET_END {.name = NULL, .offset = 0} 67 68 static const struct pt_regs_offset regoffset_table[] = { 69 REG_OFFSET_NAME(r0), 70 REG_OFFSET_NAME(r1), 71 REG_OFFSET_NAME(r2), 72 REG_OFFSET_NAME(r3), 73 REG_OFFSET_NAME(r4), 74 REG_OFFSET_NAME(r5), 75 REG_OFFSET_NAME(r6), 76 REG_OFFSET_NAME(r7), 77 REG_OFFSET_NAME(r8), 78 REG_OFFSET_NAME(r9), 79 REG_OFFSET_NAME(r10), 80 REG_OFFSET_NAME(fp), 81 REG_OFFSET_NAME(ip), 82 REG_OFFSET_NAME(sp), 83 REG_OFFSET_NAME(lr), 84 REG_OFFSET_NAME(pc), 85 REG_OFFSET_NAME(cpsr), 86 REG_OFFSET_NAME(ORIG_r0), 87 REG_OFFSET_END, 88 }; 89 90 /** 91 * regs_query_register_offset() - query register offset from its name 92 * @name: the name of a register 93 * 94 * regs_query_register_offset() returns the offset of a register in struct 95 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 96 */ 97 int regs_query_register_offset(const char *name) 98 { 99 const struct pt_regs_offset *roff; 100 for (roff = regoffset_table; roff->name != NULL; roff++) 101 if (!strcmp(roff->name, name)) 102 return roff->offset; 103 return -EINVAL; 104 } 105 106 /** 107 * regs_query_register_name() - query register name from its offset 108 * @offset: the offset of a register in struct pt_regs. 109 * 110 * regs_query_register_name() returns the name of a register from its 111 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 112 */ 113 const char *regs_query_register_name(unsigned int offset) 114 { 115 const struct pt_regs_offset *roff; 116 for (roff = regoffset_table; roff->name != NULL; roff++) 117 if (roff->offset == offset) 118 return roff->name; 119 return NULL; 120 } 121 122 /** 123 * regs_within_kernel_stack() - check the address in the stack 124 * @regs: pt_regs which contains kernel stack pointer. 125 * @addr: address which is checked. 126 * 127 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 128 * If @addr is within the kernel stack, it returns true. If not, returns false. 129 */ 130 bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 131 { 132 return ((addr & ~(THREAD_SIZE - 1)) == 133 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); 134 } 135 136 /** 137 * regs_get_kernel_stack_nth() - get Nth entry of the stack 138 * @regs: pt_regs which contains kernel stack pointer. 139 * @n: stack entry number. 140 * 141 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 142 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 143 * this returns 0. 144 */ 145 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 146 { 147 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 148 addr += n; 149 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 150 return *addr; 151 else 152 return 0; 153 } 154 155 /* 156 * this routine will get a word off of the processes privileged stack. 157 * the offset is how far from the base addr as stored in the THREAD. 158 * this routine assumes that all the privileged stacks are in our 159 * data space. 160 */ 161 static inline long get_user_reg(struct task_struct *task, int offset) 162 { 163 return task_pt_regs(task)->uregs[offset]; 164 } 165 166 /* 167 * this routine will put a word on the processes privileged stack. 168 * the offset is how far from the base addr as stored in the THREAD. 169 * this routine assumes that all the privileged stacks are in our 170 * data space. 171 */ 172 static inline int 173 put_user_reg(struct task_struct *task, int offset, long data) 174 { 175 struct pt_regs newregs, *regs = task_pt_regs(task); 176 int ret = -EINVAL; 177 178 newregs = *regs; 179 newregs.uregs[offset] = data; 180 181 if (valid_user_regs(&newregs)) { 182 regs->uregs[offset] = data; 183 ret = 0; 184 } 185 186 return ret; 187 } 188 189 /* 190 * Called by kernel/ptrace.c when detaching.. 191 */ 192 void ptrace_disable(struct task_struct *child) 193 { 194 /* Nothing to do. */ 195 } 196 197 /* 198 * Handle hitting a breakpoint. 199 */ 200 void ptrace_break(struct pt_regs *regs) 201 { 202 force_sig_fault(SIGTRAP, TRAP_BRKPT, 203 (void __user *)instruction_pointer(regs)); 204 } 205 206 static int break_trap(struct pt_regs *regs, unsigned int instr) 207 { 208 ptrace_break(regs); 209 return 0; 210 } 211 212 static struct undef_hook arm_break_hook = { 213 .instr_mask = 0x0fffffff, 214 .instr_val = 0x07f001f0, 215 .cpsr_mask = PSR_T_BIT, 216 .cpsr_val = 0, 217 .fn = break_trap, 218 }; 219 220 static struct undef_hook thumb_break_hook = { 221 .instr_mask = 0xffffffff, 222 .instr_val = 0x0000de01, 223 .cpsr_mask = PSR_T_BIT, 224 .cpsr_val = PSR_T_BIT, 225 .fn = break_trap, 226 }; 227 228 static struct undef_hook thumb2_break_hook = { 229 .instr_mask = 0xffffffff, 230 .instr_val = 0xf7f0a000, 231 .cpsr_mask = PSR_T_BIT, 232 .cpsr_val = PSR_T_BIT, 233 .fn = break_trap, 234 }; 235 236 static int __init ptrace_break_init(void) 237 { 238 register_undef_hook(&arm_break_hook); 239 register_undef_hook(&thumb_break_hook); 240 register_undef_hook(&thumb2_break_hook); 241 return 0; 242 } 243 244 core_initcall(ptrace_break_init); 245 246 /* 247 * Read the word at offset "off" into the "struct user". We 248 * actually access the pt_regs stored on the kernel stack. 249 */ 250 static int ptrace_read_user(struct task_struct *tsk, unsigned long off, 251 unsigned long __user *ret) 252 { 253 unsigned long tmp; 254 255 if (off & 3) 256 return -EIO; 257 258 tmp = 0; 259 if (off == PT_TEXT_ADDR) 260 tmp = tsk->mm->start_code; 261 else if (off == PT_DATA_ADDR) 262 tmp = tsk->mm->start_data; 263 else if (off == PT_TEXT_END_ADDR) 264 tmp = tsk->mm->end_code; 265 else if (off < sizeof(struct pt_regs)) 266 tmp = get_user_reg(tsk, off >> 2); 267 else if (off >= sizeof(struct user)) 268 return -EIO; 269 270 return put_user(tmp, ret); 271 } 272 273 /* 274 * Write the word at offset "off" into "struct user". We 275 * actually access the pt_regs stored on the kernel stack. 276 */ 277 static int ptrace_write_user(struct task_struct *tsk, unsigned long off, 278 unsigned long val) 279 { 280 if (off & 3 || off >= sizeof(struct user)) 281 return -EIO; 282 283 if (off >= sizeof(struct pt_regs)) 284 return 0; 285 286 return put_user_reg(tsk, off >> 2, val); 287 } 288 289 #ifdef CONFIG_IWMMXT 290 291 /* 292 * Get the child iWMMXt state. 293 */ 294 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) 295 { 296 struct thread_info *thread = task_thread_info(tsk); 297 298 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 299 return -ENODATA; 300 iwmmxt_task_disable(thread); /* force it to ram */ 301 return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) 302 ? -EFAULT : 0; 303 } 304 305 /* 306 * Set the child iWMMXt state. 307 */ 308 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) 309 { 310 struct thread_info *thread = task_thread_info(tsk); 311 312 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) 313 return -EACCES; 314 iwmmxt_task_release(thread); /* force a reload */ 315 return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) 316 ? -EFAULT : 0; 317 } 318 319 #endif 320 321 #ifdef CONFIG_CRUNCH 322 /* 323 * Get the child Crunch state. 324 */ 325 static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp) 326 { 327 struct thread_info *thread = task_thread_info(tsk); 328 329 crunch_task_disable(thread); /* force it to ram */ 330 return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) 331 ? -EFAULT : 0; 332 } 333 334 /* 335 * Set the child Crunch state. 336 */ 337 static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp) 338 { 339 struct thread_info *thread = task_thread_info(tsk); 340 341 crunch_task_release(thread); /* force a reload */ 342 return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) 343 ? -EFAULT : 0; 344 } 345 #endif 346 347 #ifdef CONFIG_HAVE_HW_BREAKPOINT 348 /* 349 * Convert a virtual register number into an index for a thread_info 350 * breakpoint array. Breakpoints are identified using positive numbers 351 * whilst watchpoints are negative. The registers are laid out as pairs 352 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 353 * Register 0 is reserved for describing resource information. 354 */ 355 static int ptrace_hbp_num_to_idx(long num) 356 { 357 if (num < 0) 358 num = (ARM_MAX_BRP << 1) - num; 359 return (num - 1) >> 1; 360 } 361 362 /* 363 * Returns the virtual register number for the address of the 364 * breakpoint at index idx. 365 */ 366 static long ptrace_hbp_idx_to_num(int idx) 367 { 368 long mid = ARM_MAX_BRP << 1; 369 long num = (idx << 1) + 1; 370 return num > mid ? mid - num : num; 371 } 372 373 /* 374 * Handle hitting a HW-breakpoint. 375 */ 376 static void ptrace_hbptriggered(struct perf_event *bp, 377 struct perf_sample_data *data, 378 struct pt_regs *regs) 379 { 380 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 381 long num; 382 int i; 383 384 for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) 385 if (current->thread.debug.hbp[i] == bp) 386 break; 387 388 num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); 389 390 force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger)); 391 } 392 393 /* 394 * Set ptrace breakpoint pointers to zero for this task. 395 * This is required in order to prevent child processes from unregistering 396 * breakpoints held by their parent. 397 */ 398 void clear_ptrace_hw_breakpoint(struct task_struct *tsk) 399 { 400 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); 401 } 402 403 /* 404 * Unregister breakpoints from this task and reset the pointers in 405 * the thread_struct. 406 */ 407 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 408 { 409 int i; 410 struct thread_struct *t = &tsk->thread; 411 412 for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { 413 if (t->debug.hbp[i]) { 414 unregister_hw_breakpoint(t->debug.hbp[i]); 415 t->debug.hbp[i] = NULL; 416 } 417 } 418 } 419 420 static u32 ptrace_get_hbp_resource_info(void) 421 { 422 u8 num_brps, num_wrps, debug_arch, wp_len; 423 u32 reg = 0; 424 425 num_brps = hw_breakpoint_slots(TYPE_INST); 426 num_wrps = hw_breakpoint_slots(TYPE_DATA); 427 debug_arch = arch_get_debug_arch(); 428 wp_len = arch_get_max_wp_len(); 429 430 reg |= debug_arch; 431 reg <<= 8; 432 reg |= wp_len; 433 reg <<= 8; 434 reg |= num_wrps; 435 reg <<= 8; 436 reg |= num_brps; 437 438 return reg; 439 } 440 441 static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) 442 { 443 struct perf_event_attr attr; 444 445 ptrace_breakpoint_init(&attr); 446 447 /* Initialise fields to sane defaults. */ 448 attr.bp_addr = 0; 449 attr.bp_len = HW_BREAKPOINT_LEN_4; 450 attr.bp_type = type; 451 attr.disabled = 1; 452 453 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, 454 tsk); 455 } 456 457 static int ptrace_gethbpregs(struct task_struct *tsk, long num, 458 unsigned long __user *data) 459 { 460 u32 reg; 461 int idx, ret = 0; 462 struct perf_event *bp; 463 struct arch_hw_breakpoint_ctrl arch_ctrl; 464 465 if (num == 0) { 466 reg = ptrace_get_hbp_resource_info(); 467 } else { 468 idx = ptrace_hbp_num_to_idx(num); 469 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { 470 ret = -EINVAL; 471 goto out; 472 } 473 474 bp = tsk->thread.debug.hbp[idx]; 475 if (!bp) { 476 reg = 0; 477 goto put; 478 } 479 480 arch_ctrl = counter_arch_bp(bp)->ctrl; 481 482 /* 483 * Fix up the len because we may have adjusted it 484 * to compensate for an unaligned address. 485 */ 486 while (!(arch_ctrl.len & 0x1)) 487 arch_ctrl.len >>= 1; 488 489 if (num & 0x1) 490 reg = bp->attr.bp_addr; 491 else 492 reg = encode_ctrl_reg(arch_ctrl); 493 } 494 495 put: 496 if (put_user(reg, data)) 497 ret = -EFAULT; 498 499 out: 500 return ret; 501 } 502 503 static int ptrace_sethbpregs(struct task_struct *tsk, long num, 504 unsigned long __user *data) 505 { 506 int idx, gen_len, gen_type, implied_type, ret = 0; 507 u32 user_val; 508 struct perf_event *bp; 509 struct arch_hw_breakpoint_ctrl ctrl; 510 struct perf_event_attr attr; 511 512 if (num == 0) 513 goto out; 514 else if (num < 0) 515 implied_type = HW_BREAKPOINT_RW; 516 else 517 implied_type = HW_BREAKPOINT_X; 518 519 idx = ptrace_hbp_num_to_idx(num); 520 if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { 521 ret = -EINVAL; 522 goto out; 523 } 524 525 if (get_user(user_val, data)) { 526 ret = -EFAULT; 527 goto out; 528 } 529 530 bp = tsk->thread.debug.hbp[idx]; 531 if (!bp) { 532 bp = ptrace_hbp_create(tsk, implied_type); 533 if (IS_ERR(bp)) { 534 ret = PTR_ERR(bp); 535 goto out; 536 } 537 tsk->thread.debug.hbp[idx] = bp; 538 } 539 540 attr = bp->attr; 541 542 if (num & 0x1) { 543 /* Address */ 544 attr.bp_addr = user_val; 545 } else { 546 /* Control */ 547 decode_ctrl_reg(user_val, &ctrl); 548 ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); 549 if (ret) 550 goto out; 551 552 if ((gen_type & implied_type) != gen_type) { 553 ret = -EINVAL; 554 goto out; 555 } 556 557 attr.bp_len = gen_len; 558 attr.bp_type = gen_type; 559 attr.disabled = !ctrl.enabled; 560 } 561 562 ret = modify_user_hw_breakpoint(bp, &attr); 563 out: 564 return ret; 565 } 566 #endif 567 568 /* regset get/set implementations */ 569 570 static int gpr_get(struct task_struct *target, 571 const struct user_regset *regset, 572 struct membuf to) 573 { 574 return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs)); 575 } 576 577 static int gpr_set(struct task_struct *target, 578 const struct user_regset *regset, 579 unsigned int pos, unsigned int count, 580 const void *kbuf, const void __user *ubuf) 581 { 582 int ret; 583 struct pt_regs newregs = *task_pt_regs(target); 584 585 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 586 &newregs, 587 0, sizeof(newregs)); 588 if (ret) 589 return ret; 590 591 if (!valid_user_regs(&newregs)) 592 return -EINVAL; 593 594 *task_pt_regs(target) = newregs; 595 return 0; 596 } 597 598 static int fpa_get(struct task_struct *target, 599 const struct user_regset *regset, 600 struct membuf to) 601 { 602 return membuf_write(&to, &task_thread_info(target)->fpstate, 603 sizeof(struct user_fp)); 604 } 605 606 static int fpa_set(struct task_struct *target, 607 const struct user_regset *regset, 608 unsigned int pos, unsigned int count, 609 const void *kbuf, const void __user *ubuf) 610 { 611 struct thread_info *thread = task_thread_info(target); 612 613 thread->used_cp[1] = thread->used_cp[2] = 1; 614 615 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 616 &thread->fpstate, 617 0, sizeof(struct user_fp)); 618 } 619 620 #ifdef CONFIG_VFP 621 /* 622 * VFP register get/set implementations. 623 * 624 * With respect to the kernel, struct user_fp is divided into three chunks: 625 * 16 or 32 real VFP registers (d0-d15 or d0-31) 626 * These are transferred to/from the real registers in the task's 627 * vfp_hard_struct. The number of registers depends on the kernel 628 * configuration. 629 * 630 * 16 or 0 fake VFP registers (d16-d31 or empty) 631 * i.e., the user_vfp structure has space for 32 registers even if 632 * the kernel doesn't have them all. 633 * 634 * vfp_get() reads this chunk as zero where applicable 635 * vfp_set() ignores this chunk 636 * 637 * 1 word for the FPSCR 638 */ 639 static int vfp_get(struct task_struct *target, 640 const struct user_regset *regset, 641 struct membuf to) 642 { 643 struct thread_info *thread = task_thread_info(target); 644 struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; 645 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); 646 647 vfp_sync_hwstate(thread); 648 649 membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs)); 650 membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs)); 651 return membuf_store(&to, vfp->fpscr); 652 } 653 654 /* 655 * For vfp_set() a read-modify-write is done on the VFP registers, 656 * in order to avoid writing back a half-modified set of registers on 657 * failure. 658 */ 659 static int vfp_set(struct task_struct *target, 660 const struct user_regset *regset, 661 unsigned int pos, unsigned int count, 662 const void *kbuf, const void __user *ubuf) 663 { 664 int ret; 665 struct thread_info *thread = task_thread_info(target); 666 struct vfp_hard_struct new_vfp; 667 const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); 668 const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); 669 670 vfp_sync_hwstate(thread); 671 new_vfp = thread->vfpstate.hard; 672 673 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 674 &new_vfp.fpregs, 675 user_fpregs_offset, 676 user_fpregs_offset + sizeof(new_vfp.fpregs)); 677 if (ret) 678 return ret; 679 680 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 681 user_fpregs_offset + sizeof(new_vfp.fpregs), 682 user_fpscr_offset); 683 if (ret) 684 return ret; 685 686 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 687 &new_vfp.fpscr, 688 user_fpscr_offset, 689 user_fpscr_offset + sizeof(new_vfp.fpscr)); 690 if (ret) 691 return ret; 692 693 thread->vfpstate.hard = new_vfp; 694 vfp_flush_hwstate(thread); 695 696 return 0; 697 } 698 #endif /* CONFIG_VFP */ 699 700 enum arm_regset { 701 REGSET_GPR, 702 REGSET_FPR, 703 #ifdef CONFIG_VFP 704 REGSET_VFP, 705 #endif 706 }; 707 708 static const struct user_regset arm_regsets[] = { 709 [REGSET_GPR] = { 710 .core_note_type = NT_PRSTATUS, 711 .n = ELF_NGREG, 712 .size = sizeof(u32), 713 .align = sizeof(u32), 714 .regset_get = gpr_get, 715 .set = gpr_set 716 }, 717 [REGSET_FPR] = { 718 /* 719 * For the FPA regs in fpstate, the real fields are a mixture 720 * of sizes, so pretend that the registers are word-sized: 721 */ 722 .core_note_type = NT_PRFPREG, 723 .n = sizeof(struct user_fp) / sizeof(u32), 724 .size = sizeof(u32), 725 .align = sizeof(u32), 726 .regset_get = fpa_get, 727 .set = fpa_set 728 }, 729 #ifdef CONFIG_VFP 730 [REGSET_VFP] = { 731 /* 732 * Pretend that the VFP regs are word-sized, since the FPSCR is 733 * a single word dangling at the end of struct user_vfp: 734 */ 735 .core_note_type = NT_ARM_VFP, 736 .n = ARM_VFPREGS_SIZE / sizeof(u32), 737 .size = sizeof(u32), 738 .align = sizeof(u32), 739 .regset_get = vfp_get, 740 .set = vfp_set 741 }, 742 #endif /* CONFIG_VFP */ 743 }; 744 745 static const struct user_regset_view user_arm_view = { 746 .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, 747 .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) 748 }; 749 750 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 751 { 752 return &user_arm_view; 753 } 754 755 long arch_ptrace(struct task_struct *child, long request, 756 unsigned long addr, unsigned long data) 757 { 758 int ret; 759 unsigned long __user *datap = (unsigned long __user *) data; 760 761 switch (request) { 762 case PTRACE_PEEKUSR: 763 ret = ptrace_read_user(child, addr, datap); 764 break; 765 766 case PTRACE_POKEUSR: 767 ret = ptrace_write_user(child, addr, data); 768 break; 769 770 case PTRACE_GETREGS: 771 ret = copy_regset_to_user(child, 772 &user_arm_view, REGSET_GPR, 773 0, sizeof(struct pt_regs), 774 datap); 775 break; 776 777 case PTRACE_SETREGS: 778 ret = copy_regset_from_user(child, 779 &user_arm_view, REGSET_GPR, 780 0, sizeof(struct pt_regs), 781 datap); 782 break; 783 784 case PTRACE_GETFPREGS: 785 ret = copy_regset_to_user(child, 786 &user_arm_view, REGSET_FPR, 787 0, sizeof(union fp_state), 788 datap); 789 break; 790 791 case PTRACE_SETFPREGS: 792 ret = copy_regset_from_user(child, 793 &user_arm_view, REGSET_FPR, 794 0, sizeof(union fp_state), 795 datap); 796 break; 797 798 #ifdef CONFIG_IWMMXT 799 case PTRACE_GETWMMXREGS: 800 ret = ptrace_getwmmxregs(child, datap); 801 break; 802 803 case PTRACE_SETWMMXREGS: 804 ret = ptrace_setwmmxregs(child, datap); 805 break; 806 #endif 807 808 case PTRACE_GET_THREAD_AREA: 809 ret = put_user(task_thread_info(child)->tp_value[0], 810 datap); 811 break; 812 813 case PTRACE_SET_SYSCALL: 814 task_thread_info(child)->syscall = data; 815 ret = 0; 816 break; 817 818 #ifdef CONFIG_CRUNCH 819 case PTRACE_GETCRUNCHREGS: 820 ret = ptrace_getcrunchregs(child, datap); 821 break; 822 823 case PTRACE_SETCRUNCHREGS: 824 ret = ptrace_setcrunchregs(child, datap); 825 break; 826 #endif 827 828 #ifdef CONFIG_VFP 829 case PTRACE_GETVFPREGS: 830 ret = copy_regset_to_user(child, 831 &user_arm_view, REGSET_VFP, 832 0, ARM_VFPREGS_SIZE, 833 datap); 834 break; 835 836 case PTRACE_SETVFPREGS: 837 ret = copy_regset_from_user(child, 838 &user_arm_view, REGSET_VFP, 839 0, ARM_VFPREGS_SIZE, 840 datap); 841 break; 842 #endif 843 844 #ifdef CONFIG_HAVE_HW_BREAKPOINT 845 case PTRACE_GETHBPREGS: 846 ret = ptrace_gethbpregs(child, addr, 847 (unsigned long __user *)data); 848 break; 849 case PTRACE_SETHBPREGS: 850 ret = ptrace_sethbpregs(child, addr, 851 (unsigned long __user *)data); 852 break; 853 #endif 854 855 default: 856 ret = ptrace_request(child, request, addr, data); 857 break; 858 } 859 860 return ret; 861 } 862 863 enum ptrace_syscall_dir { 864 PTRACE_SYSCALL_ENTER = 0, 865 PTRACE_SYSCALL_EXIT, 866 }; 867 868 static void tracehook_report_syscall(struct pt_regs *regs, 869 enum ptrace_syscall_dir dir) 870 { 871 unsigned long ip; 872 873 /* 874 * IP is used to denote syscall entry/exit: 875 * IP = 0 -> entry, =1 -> exit 876 */ 877 ip = regs->ARM_ip; 878 regs->ARM_ip = dir; 879 880 if (dir == PTRACE_SYSCALL_EXIT) 881 tracehook_report_syscall_exit(regs, 0); 882 else if (tracehook_report_syscall_entry(regs)) 883 current_thread_info()->syscall = -1; 884 885 regs->ARM_ip = ip; 886 } 887 888 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) 889 { 890 current_thread_info()->syscall = scno; 891 892 if (test_thread_flag(TIF_SYSCALL_TRACE)) 893 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 894 895 /* Do seccomp after ptrace; syscall may have changed. */ 896 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER 897 if (secure_computing() == -1) 898 return -1; 899 #else 900 /* XXX: remove this once OABI gets fixed */ 901 secure_computing_strict(current_thread_info()->syscall); 902 #endif 903 904 /* Tracer or seccomp may have changed syscall. */ 905 scno = current_thread_info()->syscall; 906 907 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 908 trace_sys_enter(regs, scno); 909 910 audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, 911 regs->ARM_r3); 912 913 return scno; 914 } 915 916 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 917 { 918 /* 919 * Audit the syscall before anything else, as a debugger may 920 * come in and change the current registers. 921 */ 922 audit_syscall_exit(regs); 923 924 /* 925 * Note that we haven't updated the ->syscall field for the 926 * current thread. This isn't a problem because it will have 927 * been set on syscall entry and there hasn't been an opportunity 928 * for a PTRACE_SET_SYSCALL since then. 929 */ 930 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 931 trace_sys_exit(regs, regs_return_value(regs)); 932 933 if (test_thread_flag(TIF_SYSCALL_TRACE)) 934 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 935 } 936