1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/compat.h> 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/mm.h> 26 #include <linux/smp.h> 27 #include <linux/ptrace.h> 28 #include <linux/user.h> 29 #include <linux/security.h> 30 #include <linux/init.h> 31 #include <linux/signal.h> 32 #include <linux/uaccess.h> 33 #include <linux/perf_event.h> 34 #include <linux/hw_breakpoint.h> 35 #include <linux/regset.h> 36 #include <linux/tracehook.h> 37 #include <linux/elf.h> 38 39 #include <asm/compat.h> 40 #include <asm/debug-monitors.h> 41 #include <asm/pgtable.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 /* 49 * TODO: does not yet catch signals sent when the child dies. 50 * in exit.c or in signal.c. 51 */ 52 53 /* 54 * Called by kernel/ptrace.c when detaching.. 55 */ 56 void ptrace_disable(struct task_struct *child) 57 { 58 } 59 60 #ifdef CONFIG_HAVE_HW_BREAKPOINT 61 /* 62 * Handle hitting a HW-breakpoint. 63 */ 64 static void ptrace_hbptriggered(struct perf_event *bp, 65 struct perf_sample_data *data, 66 struct pt_regs *regs) 67 { 68 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 69 siginfo_t info = { 70 .si_signo = SIGTRAP, 71 .si_errno = 0, 72 .si_code = TRAP_HWBKPT, 73 .si_addr = (void __user *)(bkpt->trigger), 74 }; 75 76 #ifdef CONFIG_COMPAT 77 int i; 78 79 if (!is_compat_task()) 80 goto send_sig; 81 82 for (i = 0; i < ARM_MAX_BRP; ++i) { 83 if (current->thread.debug.hbp_break[i] == bp) { 84 info.si_errno = (i << 1) + 1; 85 break; 86 } 87 } 88 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { 89 if (current->thread.debug.hbp_watch[i] == bp) { 90 info.si_errno = -((i << 1) + 1); 91 break; 92 } 93 } 94 95 send_sig: 96 #endif 97 force_sig_info(SIGTRAP, &info, current); 98 } 99 100 /* 101 * Unregister breakpoints from this task and reset the pointers in 102 * the thread_struct. 103 */ 104 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 105 { 106 int i; 107 struct thread_struct *t = &tsk->thread; 108 109 for (i = 0; i < ARM_MAX_BRP; i++) { 110 if (t->debug.hbp_break[i]) { 111 unregister_hw_breakpoint(t->debug.hbp_break[i]); 112 t->debug.hbp_break[i] = NULL; 113 } 114 } 115 116 for (i = 0; i < ARM_MAX_WRP; i++) { 117 if (t->debug.hbp_watch[i]) { 118 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 119 t->debug.hbp_watch[i] = NULL; 120 } 121 } 122 } 123 124 void ptrace_hw_copy_thread(struct task_struct *tsk) 125 { 126 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 127 } 128 129 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 130 struct task_struct *tsk, 131 unsigned long idx) 132 { 133 struct perf_event *bp = ERR_PTR(-EINVAL); 134 135 switch (note_type) { 136 case NT_ARM_HW_BREAK: 137 if (idx < ARM_MAX_BRP) 138 bp = tsk->thread.debug.hbp_break[idx]; 139 break; 140 case NT_ARM_HW_WATCH: 141 if (idx < ARM_MAX_WRP) 142 bp = tsk->thread.debug.hbp_watch[idx]; 143 break; 144 } 145 146 return bp; 147 } 148 149 static int ptrace_hbp_set_event(unsigned int note_type, 150 struct task_struct *tsk, 151 unsigned long idx, 152 struct perf_event *bp) 153 { 154 int err = -EINVAL; 155 156 switch (note_type) { 157 case NT_ARM_HW_BREAK: 158 if (idx < ARM_MAX_BRP) { 159 tsk->thread.debug.hbp_break[idx] = bp; 160 err = 0; 161 } 162 break; 163 case NT_ARM_HW_WATCH: 164 if (idx < ARM_MAX_WRP) { 165 tsk->thread.debug.hbp_watch[idx] = bp; 166 err = 0; 167 } 168 break; 169 } 170 171 return err; 172 } 173 174 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 175 struct task_struct *tsk, 176 unsigned long idx) 177 { 178 struct perf_event *bp; 179 struct perf_event_attr attr; 180 int err, type; 181 182 switch (note_type) { 183 case NT_ARM_HW_BREAK: 184 type = HW_BREAKPOINT_X; 185 break; 186 case NT_ARM_HW_WATCH: 187 type = HW_BREAKPOINT_RW; 188 break; 189 default: 190 return ERR_PTR(-EINVAL); 191 } 192 193 ptrace_breakpoint_init(&attr); 194 195 /* 196 * Initialise fields to sane defaults 197 * (i.e. values that will pass validation). 198 */ 199 attr.bp_addr = 0; 200 attr.bp_len = HW_BREAKPOINT_LEN_4; 201 attr.bp_type = type; 202 attr.disabled = 1; 203 204 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 205 if (IS_ERR(bp)) 206 return bp; 207 208 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 209 if (err) 210 return ERR_PTR(err); 211 212 return bp; 213 } 214 215 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 216 struct arch_hw_breakpoint_ctrl ctrl, 217 struct perf_event_attr *attr) 218 { 219 int err, len, type, disabled = !ctrl.enabled; 220 221 attr->disabled = disabled; 222 if (disabled) 223 return 0; 224 225 err = arch_bp_generic_fields(ctrl, &len, &type); 226 if (err) 227 return err; 228 229 switch (note_type) { 230 case NT_ARM_HW_BREAK: 231 if ((type & HW_BREAKPOINT_X) != type) 232 return -EINVAL; 233 break; 234 case NT_ARM_HW_WATCH: 235 if ((type & HW_BREAKPOINT_RW) != type) 236 return -EINVAL; 237 break; 238 default: 239 return -EINVAL; 240 } 241 242 attr->bp_len = len; 243 attr->bp_type = type; 244 245 return 0; 246 } 247 248 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 249 { 250 u8 num; 251 u32 reg = 0; 252 253 switch (note_type) { 254 case NT_ARM_HW_BREAK: 255 num = hw_breakpoint_slots(TYPE_INST); 256 break; 257 case NT_ARM_HW_WATCH: 258 num = hw_breakpoint_slots(TYPE_DATA); 259 break; 260 default: 261 return -EINVAL; 262 } 263 264 reg |= debug_monitors_arch(); 265 reg <<= 8; 266 reg |= num; 267 268 *info = reg; 269 return 0; 270 } 271 272 static int ptrace_hbp_get_ctrl(unsigned int note_type, 273 struct task_struct *tsk, 274 unsigned long idx, 275 u32 *ctrl) 276 { 277 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 278 279 if (IS_ERR(bp)) 280 return PTR_ERR(bp); 281 282 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 283 return 0; 284 } 285 286 static int ptrace_hbp_get_addr(unsigned int note_type, 287 struct task_struct *tsk, 288 unsigned long idx, 289 u64 *addr) 290 { 291 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 292 293 if (IS_ERR(bp)) 294 return PTR_ERR(bp); 295 296 *addr = bp ? bp->attr.bp_addr : 0; 297 return 0; 298 } 299 300 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 301 struct task_struct *tsk, 302 unsigned long idx) 303 { 304 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 305 306 if (!bp) 307 bp = ptrace_hbp_create(note_type, tsk, idx); 308 309 return bp; 310 } 311 312 static int ptrace_hbp_set_ctrl(unsigned int note_type, 313 struct task_struct *tsk, 314 unsigned long idx, 315 u32 uctrl) 316 { 317 int err; 318 struct perf_event *bp; 319 struct perf_event_attr attr; 320 struct arch_hw_breakpoint_ctrl ctrl; 321 322 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 323 if (IS_ERR(bp)) { 324 err = PTR_ERR(bp); 325 return err; 326 } 327 328 attr = bp->attr; 329 decode_ctrl_reg(uctrl, &ctrl); 330 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 331 if (err) 332 return err; 333 334 return modify_user_hw_breakpoint(bp, &attr); 335 } 336 337 static int ptrace_hbp_set_addr(unsigned int note_type, 338 struct task_struct *tsk, 339 unsigned long idx, 340 u64 addr) 341 { 342 int err; 343 struct perf_event *bp; 344 struct perf_event_attr attr; 345 346 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 347 if (IS_ERR(bp)) { 348 err = PTR_ERR(bp); 349 return err; 350 } 351 352 attr = bp->attr; 353 attr.bp_addr = addr; 354 err = modify_user_hw_breakpoint(bp, &attr); 355 return err; 356 } 357 358 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 359 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 360 #define PTRACE_HBP_PAD_SZ sizeof(u32) 361 362 static int hw_break_get(struct task_struct *target, 363 const struct user_regset *regset, 364 unsigned int pos, unsigned int count, 365 void *kbuf, void __user *ubuf) 366 { 367 unsigned int note_type = regset->core_note_type; 368 int ret, idx = 0, offset, limit; 369 u32 info, ctrl; 370 u64 addr; 371 372 /* Resource info */ 373 ret = ptrace_hbp_get_resource_info(note_type, &info); 374 if (ret) 375 return ret; 376 377 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 378 sizeof(info)); 379 if (ret) 380 return ret; 381 382 /* Pad */ 383 offset = offsetof(struct user_hwdebug_state, pad); 384 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 385 offset + PTRACE_HBP_PAD_SZ); 386 if (ret) 387 return ret; 388 389 /* (address, ctrl) registers */ 390 offset = offsetof(struct user_hwdebug_state, dbg_regs); 391 limit = regset->n * regset->size; 392 while (count && offset < limit) { 393 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 394 if (ret) 395 return ret; 396 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 397 offset, offset + PTRACE_HBP_ADDR_SZ); 398 if (ret) 399 return ret; 400 offset += PTRACE_HBP_ADDR_SZ; 401 402 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 403 if (ret) 404 return ret; 405 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 406 offset, offset + PTRACE_HBP_CTRL_SZ); 407 if (ret) 408 return ret; 409 offset += PTRACE_HBP_CTRL_SZ; 410 411 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 412 offset, 413 offset + PTRACE_HBP_PAD_SZ); 414 if (ret) 415 return ret; 416 offset += PTRACE_HBP_PAD_SZ; 417 idx++; 418 } 419 420 return 0; 421 } 422 423 static int hw_break_set(struct task_struct *target, 424 const struct user_regset *regset, 425 unsigned int pos, unsigned int count, 426 const void *kbuf, const void __user *ubuf) 427 { 428 unsigned int note_type = regset->core_note_type; 429 int ret, idx = 0, offset, limit; 430 u32 ctrl; 431 u64 addr; 432 433 /* Resource info and pad */ 434 offset = offsetof(struct user_hwdebug_state, dbg_regs); 435 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 436 if (ret) 437 return ret; 438 439 /* (address, ctrl) registers */ 440 limit = regset->n * regset->size; 441 while (count && offset < limit) { 442 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 443 offset, offset + PTRACE_HBP_ADDR_SZ); 444 if (ret) 445 return ret; 446 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 447 if (ret) 448 return ret; 449 offset += PTRACE_HBP_ADDR_SZ; 450 451 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 452 offset, offset + PTRACE_HBP_CTRL_SZ); 453 if (ret) 454 return ret; 455 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 456 if (ret) 457 return ret; 458 offset += PTRACE_HBP_CTRL_SZ; 459 460 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 461 offset, 462 offset + PTRACE_HBP_PAD_SZ); 463 if (ret) 464 return ret; 465 offset += PTRACE_HBP_PAD_SZ; 466 idx++; 467 } 468 469 return 0; 470 } 471 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 472 473 static int gpr_get(struct task_struct *target, 474 const struct user_regset *regset, 475 unsigned int pos, unsigned int count, 476 void *kbuf, void __user *ubuf) 477 { 478 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 479 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 480 } 481 482 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 483 unsigned int pos, unsigned int count, 484 const void *kbuf, const void __user *ubuf) 485 { 486 int ret; 487 struct user_pt_regs newregs; 488 489 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 490 if (ret) 491 return ret; 492 493 if (!valid_user_regs(&newregs)) 494 return -EINVAL; 495 496 task_pt_regs(target)->user_regs = newregs; 497 return 0; 498 } 499 500 /* 501 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 502 */ 503 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 504 unsigned int pos, unsigned int count, 505 void *kbuf, void __user *ubuf) 506 { 507 struct user_fpsimd_state *uregs; 508 uregs = &target->thread.fpsimd_state.user_fpsimd; 509 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 510 } 511 512 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 513 unsigned int pos, unsigned int count, 514 const void *kbuf, const void __user *ubuf) 515 { 516 int ret; 517 struct user_fpsimd_state newstate; 518 519 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 520 if (ret) 521 return ret; 522 523 target->thread.fpsimd_state.user_fpsimd = newstate; 524 fpsimd_flush_task_state(target); 525 return ret; 526 } 527 528 static int tls_get(struct task_struct *target, const struct user_regset *regset, 529 unsigned int pos, unsigned int count, 530 void *kbuf, void __user *ubuf) 531 { 532 unsigned long *tls = &target->thread.tp_value; 533 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 534 } 535 536 static int tls_set(struct task_struct *target, const struct user_regset *regset, 537 unsigned int pos, unsigned int count, 538 const void *kbuf, const void __user *ubuf) 539 { 540 int ret; 541 unsigned long tls; 542 543 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 544 if (ret) 545 return ret; 546 547 target->thread.tp_value = tls; 548 return ret; 549 } 550 551 enum aarch64_regset { 552 REGSET_GPR, 553 REGSET_FPR, 554 REGSET_TLS, 555 #ifdef CONFIG_HAVE_HW_BREAKPOINT 556 REGSET_HW_BREAK, 557 REGSET_HW_WATCH, 558 #endif 559 }; 560 561 static const struct user_regset aarch64_regsets[] = { 562 [REGSET_GPR] = { 563 .core_note_type = NT_PRSTATUS, 564 .n = sizeof(struct user_pt_regs) / sizeof(u64), 565 .size = sizeof(u64), 566 .align = sizeof(u64), 567 .get = gpr_get, 568 .set = gpr_set 569 }, 570 [REGSET_FPR] = { 571 .core_note_type = NT_PRFPREG, 572 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 573 /* 574 * We pretend we have 32-bit registers because the fpsr and 575 * fpcr are 32-bits wide. 576 */ 577 .size = sizeof(u32), 578 .align = sizeof(u32), 579 .get = fpr_get, 580 .set = fpr_set 581 }, 582 [REGSET_TLS] = { 583 .core_note_type = NT_ARM_TLS, 584 .n = 1, 585 .size = sizeof(void *), 586 .align = sizeof(void *), 587 .get = tls_get, 588 .set = tls_set, 589 }, 590 #ifdef CONFIG_HAVE_HW_BREAKPOINT 591 [REGSET_HW_BREAK] = { 592 .core_note_type = NT_ARM_HW_BREAK, 593 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 594 .size = sizeof(u32), 595 .align = sizeof(u32), 596 .get = hw_break_get, 597 .set = hw_break_set, 598 }, 599 [REGSET_HW_WATCH] = { 600 .core_note_type = NT_ARM_HW_WATCH, 601 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 602 .size = sizeof(u32), 603 .align = sizeof(u32), 604 .get = hw_break_get, 605 .set = hw_break_set, 606 }, 607 #endif 608 }; 609 610 static const struct user_regset_view user_aarch64_view = { 611 .name = "aarch64", .e_machine = EM_AARCH64, 612 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 613 }; 614 615 #ifdef CONFIG_COMPAT 616 #include <linux/compat.h> 617 618 enum compat_regset { 619 REGSET_COMPAT_GPR, 620 REGSET_COMPAT_VFP, 621 }; 622 623 static int compat_gpr_get(struct task_struct *target, 624 const struct user_regset *regset, 625 unsigned int pos, unsigned int count, 626 void *kbuf, void __user *ubuf) 627 { 628 int ret = 0; 629 unsigned int i, start, num_regs; 630 631 /* Calculate the number of AArch32 registers contained in count */ 632 num_regs = count / regset->size; 633 634 /* Convert pos into an register number */ 635 start = pos / regset->size; 636 637 if (start + num_regs > regset->n) 638 return -EIO; 639 640 for (i = 0; i < num_regs; ++i) { 641 unsigned int idx = start + i; 642 compat_ulong_t reg; 643 644 switch (idx) { 645 case 15: 646 reg = task_pt_regs(target)->pc; 647 break; 648 case 16: 649 reg = task_pt_regs(target)->pstate; 650 break; 651 case 17: 652 reg = task_pt_regs(target)->orig_x0; 653 break; 654 default: 655 reg = task_pt_regs(target)->regs[idx]; 656 } 657 658 ret = copy_to_user(ubuf, ®, sizeof(reg)); 659 if (ret) 660 break; 661 662 ubuf += sizeof(reg); 663 } 664 665 return ret; 666 } 667 668 static int compat_gpr_set(struct task_struct *target, 669 const struct user_regset *regset, 670 unsigned int pos, unsigned int count, 671 const void *kbuf, const void __user *ubuf) 672 { 673 struct pt_regs newregs; 674 int ret = 0; 675 unsigned int i, start, num_regs; 676 677 /* Calculate the number of AArch32 registers contained in count */ 678 num_regs = count / regset->size; 679 680 /* Convert pos into an register number */ 681 start = pos / regset->size; 682 683 if (start + num_regs > regset->n) 684 return -EIO; 685 686 newregs = *task_pt_regs(target); 687 688 for (i = 0; i < num_regs; ++i) { 689 unsigned int idx = start + i; 690 compat_ulong_t reg; 691 692 ret = copy_from_user(®, ubuf, sizeof(reg)); 693 if (ret) 694 return ret; 695 696 ubuf += sizeof(reg); 697 698 switch (idx) { 699 case 15: 700 newregs.pc = reg; 701 break; 702 case 16: 703 newregs.pstate = reg; 704 break; 705 case 17: 706 newregs.orig_x0 = reg; 707 break; 708 default: 709 newregs.regs[idx] = reg; 710 } 711 712 } 713 714 if (valid_user_regs(&newregs.user_regs)) 715 *task_pt_regs(target) = newregs; 716 else 717 ret = -EINVAL; 718 719 return ret; 720 } 721 722 static int compat_vfp_get(struct task_struct *target, 723 const struct user_regset *regset, 724 unsigned int pos, unsigned int count, 725 void *kbuf, void __user *ubuf) 726 { 727 struct user_fpsimd_state *uregs; 728 compat_ulong_t fpscr; 729 int ret; 730 731 uregs = &target->thread.fpsimd_state.user_fpsimd; 732 733 /* 734 * The VFP registers are packed into the fpsimd_state, so they all sit 735 * nicely together for us. We just need to create the fpscr separately. 736 */ 737 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 738 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 739 740 if (count && !ret) { 741 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 742 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 743 ret = put_user(fpscr, (compat_ulong_t *)ubuf); 744 } 745 746 return ret; 747 } 748 749 static int compat_vfp_set(struct task_struct *target, 750 const struct user_regset *regset, 751 unsigned int pos, unsigned int count, 752 const void *kbuf, const void __user *ubuf) 753 { 754 struct user_fpsimd_state *uregs; 755 compat_ulong_t fpscr; 756 int ret; 757 758 if (pos + count > VFP_STATE_SIZE) 759 return -EIO; 760 761 uregs = &target->thread.fpsimd_state.user_fpsimd; 762 763 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 764 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 765 766 if (count && !ret) { 767 ret = get_user(fpscr, (compat_ulong_t *)ubuf); 768 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 769 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 770 } 771 772 fpsimd_flush_task_state(target); 773 return ret; 774 } 775 776 static const struct user_regset aarch32_regsets[] = { 777 [REGSET_COMPAT_GPR] = { 778 .core_note_type = NT_PRSTATUS, 779 .n = COMPAT_ELF_NGREG, 780 .size = sizeof(compat_elf_greg_t), 781 .align = sizeof(compat_elf_greg_t), 782 .get = compat_gpr_get, 783 .set = compat_gpr_set 784 }, 785 [REGSET_COMPAT_VFP] = { 786 .core_note_type = NT_ARM_VFP, 787 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 788 .size = sizeof(compat_ulong_t), 789 .align = sizeof(compat_ulong_t), 790 .get = compat_vfp_get, 791 .set = compat_vfp_set 792 }, 793 }; 794 795 static const struct user_regset_view user_aarch32_view = { 796 .name = "aarch32", .e_machine = EM_ARM, 797 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 798 }; 799 800 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 801 compat_ulong_t __user *ret) 802 { 803 compat_ulong_t tmp; 804 805 if (off & 3) 806 return -EIO; 807 808 if (off == COMPAT_PT_TEXT_ADDR) 809 tmp = tsk->mm->start_code; 810 else if (off == COMPAT_PT_DATA_ADDR) 811 tmp = tsk->mm->start_data; 812 else if (off == COMPAT_PT_TEXT_END_ADDR) 813 tmp = tsk->mm->end_code; 814 else if (off < sizeof(compat_elf_gregset_t)) 815 return copy_regset_to_user(tsk, &user_aarch32_view, 816 REGSET_COMPAT_GPR, off, 817 sizeof(compat_ulong_t), ret); 818 else if (off >= COMPAT_USER_SZ) 819 return -EIO; 820 else 821 tmp = 0; 822 823 return put_user(tmp, ret); 824 } 825 826 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 827 compat_ulong_t val) 828 { 829 int ret; 830 831 if (off & 3 || off >= COMPAT_USER_SZ) 832 return -EIO; 833 834 if (off >= sizeof(compat_elf_gregset_t)) 835 return 0; 836 837 ret = copy_regset_from_user(tsk, &user_aarch32_view, 838 REGSET_COMPAT_GPR, off, 839 sizeof(compat_ulong_t), 840 &val); 841 return ret; 842 } 843 844 #ifdef CONFIG_HAVE_HW_BREAKPOINT 845 846 /* 847 * Convert a virtual register number into an index for a thread_info 848 * breakpoint array. Breakpoints are identified using positive numbers 849 * whilst watchpoints are negative. The registers are laid out as pairs 850 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 851 * Register 0 is reserved for describing resource information. 852 */ 853 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 854 { 855 return (abs(num) - 1) >> 1; 856 } 857 858 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 859 { 860 u8 num_brps, num_wrps, debug_arch, wp_len; 861 u32 reg = 0; 862 863 num_brps = hw_breakpoint_slots(TYPE_INST); 864 num_wrps = hw_breakpoint_slots(TYPE_DATA); 865 866 debug_arch = debug_monitors_arch(); 867 wp_len = 8; 868 reg |= debug_arch; 869 reg <<= 8; 870 reg |= wp_len; 871 reg <<= 8; 872 reg |= num_wrps; 873 reg <<= 8; 874 reg |= num_brps; 875 876 *kdata = reg; 877 return 0; 878 } 879 880 static int compat_ptrace_hbp_get(unsigned int note_type, 881 struct task_struct *tsk, 882 compat_long_t num, 883 u32 *kdata) 884 { 885 u64 addr = 0; 886 u32 ctrl = 0; 887 888 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 889 890 if (num & 1) { 891 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 892 *kdata = (u32)addr; 893 } else { 894 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 895 *kdata = ctrl; 896 } 897 898 return err; 899 } 900 901 static int compat_ptrace_hbp_set(unsigned int note_type, 902 struct task_struct *tsk, 903 compat_long_t num, 904 u32 *kdata) 905 { 906 u64 addr; 907 u32 ctrl; 908 909 int err, idx = compat_ptrace_hbp_num_to_idx(num); 910 911 if (num & 1) { 912 addr = *kdata; 913 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 914 } else { 915 ctrl = *kdata; 916 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 917 } 918 919 return err; 920 } 921 922 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 923 compat_ulong_t __user *data) 924 { 925 int ret; 926 u32 kdata; 927 mm_segment_t old_fs = get_fs(); 928 929 set_fs(KERNEL_DS); 930 /* Watchpoint */ 931 if (num < 0) { 932 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 933 /* Resource info */ 934 } else if (num == 0) { 935 ret = compat_ptrace_hbp_get_resource_info(&kdata); 936 /* Breakpoint */ 937 } else { 938 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 939 } 940 set_fs(old_fs); 941 942 if (!ret) 943 ret = put_user(kdata, data); 944 945 return ret; 946 } 947 948 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 949 compat_ulong_t __user *data) 950 { 951 int ret; 952 u32 kdata = 0; 953 mm_segment_t old_fs = get_fs(); 954 955 if (num == 0) 956 return 0; 957 958 ret = get_user(kdata, data); 959 if (ret) 960 return ret; 961 962 set_fs(KERNEL_DS); 963 if (num < 0) 964 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 965 else 966 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 967 set_fs(old_fs); 968 969 return ret; 970 } 971 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 972 973 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 974 compat_ulong_t caddr, compat_ulong_t cdata) 975 { 976 unsigned long addr = caddr; 977 unsigned long data = cdata; 978 void __user *datap = compat_ptr(data); 979 int ret; 980 981 switch (request) { 982 case PTRACE_PEEKUSR: 983 ret = compat_ptrace_read_user(child, addr, datap); 984 break; 985 986 case PTRACE_POKEUSR: 987 ret = compat_ptrace_write_user(child, addr, data); 988 break; 989 990 case COMPAT_PTRACE_GETREGS: 991 ret = copy_regset_to_user(child, 992 &user_aarch32_view, 993 REGSET_COMPAT_GPR, 994 0, sizeof(compat_elf_gregset_t), 995 datap); 996 break; 997 998 case COMPAT_PTRACE_SETREGS: 999 ret = copy_regset_from_user(child, 1000 &user_aarch32_view, 1001 REGSET_COMPAT_GPR, 1002 0, sizeof(compat_elf_gregset_t), 1003 datap); 1004 break; 1005 1006 case COMPAT_PTRACE_GET_THREAD_AREA: 1007 ret = put_user((compat_ulong_t)child->thread.tp_value, 1008 (compat_ulong_t __user *)datap); 1009 break; 1010 1011 case COMPAT_PTRACE_SET_SYSCALL: 1012 task_pt_regs(child)->syscallno = data; 1013 ret = 0; 1014 break; 1015 1016 case COMPAT_PTRACE_GETVFPREGS: 1017 ret = copy_regset_to_user(child, 1018 &user_aarch32_view, 1019 REGSET_COMPAT_VFP, 1020 0, VFP_STATE_SIZE, 1021 datap); 1022 break; 1023 1024 case COMPAT_PTRACE_SETVFPREGS: 1025 ret = copy_regset_from_user(child, 1026 &user_aarch32_view, 1027 REGSET_COMPAT_VFP, 1028 0, VFP_STATE_SIZE, 1029 datap); 1030 break; 1031 1032 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1033 case COMPAT_PTRACE_GETHBPREGS: 1034 ret = compat_ptrace_gethbpregs(child, addr, datap); 1035 break; 1036 1037 case COMPAT_PTRACE_SETHBPREGS: 1038 ret = compat_ptrace_sethbpregs(child, addr, datap); 1039 break; 1040 #endif 1041 1042 default: 1043 ret = compat_ptrace_request(child, request, addr, 1044 data); 1045 break; 1046 } 1047 1048 return ret; 1049 } 1050 #endif /* CONFIG_COMPAT */ 1051 1052 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1053 { 1054 #ifdef CONFIG_COMPAT 1055 if (is_compat_thread(task_thread_info(task))) 1056 return &user_aarch32_view; 1057 #endif 1058 return &user_aarch64_view; 1059 } 1060 1061 long arch_ptrace(struct task_struct *child, long request, 1062 unsigned long addr, unsigned long data) 1063 { 1064 return ptrace_request(child, request, addr, data); 1065 } 1066 1067 enum ptrace_syscall_dir { 1068 PTRACE_SYSCALL_ENTER = 0, 1069 PTRACE_SYSCALL_EXIT, 1070 }; 1071 1072 static void tracehook_report_syscall(struct pt_regs *regs, 1073 enum ptrace_syscall_dir dir) 1074 { 1075 int regno; 1076 unsigned long saved_reg; 1077 1078 /* 1079 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1080 * used to denote syscall entry/exit: 1081 */ 1082 regno = (is_compat_task() ? 12 : 7); 1083 saved_reg = regs->regs[regno]; 1084 regs->regs[regno] = dir; 1085 1086 if (dir == PTRACE_SYSCALL_EXIT) 1087 tracehook_report_syscall_exit(regs, 0); 1088 else if (tracehook_report_syscall_entry(regs)) 1089 regs->syscallno = ~0UL; 1090 1091 regs->regs[regno] = saved_reg; 1092 } 1093 1094 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1095 { 1096 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1097 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1098 1099 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1100 trace_sys_enter(regs, regs->syscallno); 1101 1102 return regs->syscallno; 1103 } 1104 1105 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1106 { 1107 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1108 trace_sys_exit(regs, regs_return_value(regs)); 1109 1110 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1111 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1112 } 1113