1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/compat.h> 23 #include <linux/kernel.h> 24 #include <linux/sched.h> 25 #include <linux/mm.h> 26 #include <linux/smp.h> 27 #include <linux/ptrace.h> 28 #include <linux/user.h> 29 #include <linux/security.h> 30 #include <linux/init.h> 31 #include <linux/signal.h> 32 #include <linux/uaccess.h> 33 #include <linux/perf_event.h> 34 #include <linux/hw_breakpoint.h> 35 #include <linux/regset.h> 36 #include <linux/tracehook.h> 37 #include <linux/elf.h> 38 39 #include <asm/compat.h> 40 #include <asm/debug-monitors.h> 41 #include <asm/pgtable.h> 42 #include <asm/traps.h> 43 #include <asm/system_misc.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/syscalls.h> 47 48 /* 49 * TODO: does not yet catch signals sent when the child dies. 50 * in exit.c or in signal.c. 51 */ 52 53 /* 54 * Called by kernel/ptrace.c when detaching.. 55 */ 56 void ptrace_disable(struct task_struct *child) 57 { 58 } 59 60 #ifdef CONFIG_HAVE_HW_BREAKPOINT 61 /* 62 * Handle hitting a HW-breakpoint. 63 */ 64 static void ptrace_hbptriggered(struct perf_event *bp, 65 struct perf_sample_data *data, 66 struct pt_regs *regs) 67 { 68 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 69 siginfo_t info = { 70 .si_signo = SIGTRAP, 71 .si_errno = 0, 72 .si_code = TRAP_HWBKPT, 73 .si_addr = (void __user *)(bkpt->trigger), 74 }; 75 76 #ifdef CONFIG_COMPAT 77 int i; 78 79 if (!is_compat_task()) 80 goto send_sig; 81 82 for (i = 0; i < ARM_MAX_BRP; ++i) { 83 if (current->thread.debug.hbp_break[i] == bp) { 84 info.si_errno = (i << 1) + 1; 85 break; 86 } 87 } 88 for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { 89 if (current->thread.debug.hbp_watch[i] == bp) { 90 info.si_errno = -((i << 1) + 1); 91 break; 92 } 93 } 94 95 send_sig: 96 #endif 97 force_sig_info(SIGTRAP, &info, current); 98 } 99 100 /* 101 * Unregister breakpoints from this task and reset the pointers in 102 * the thread_struct. 103 */ 104 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 105 { 106 int i; 107 struct thread_struct *t = &tsk->thread; 108 109 for (i = 0; i < ARM_MAX_BRP; i++) { 110 if (t->debug.hbp_break[i]) { 111 unregister_hw_breakpoint(t->debug.hbp_break[i]); 112 t->debug.hbp_break[i] = NULL; 113 } 114 } 115 116 for (i = 0; i < ARM_MAX_WRP; i++) { 117 if (t->debug.hbp_watch[i]) { 118 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 119 t->debug.hbp_watch[i] = NULL; 120 } 121 } 122 } 123 124 void ptrace_hw_copy_thread(struct task_struct *tsk) 125 { 126 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 127 } 128 129 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 130 struct task_struct *tsk, 131 unsigned long idx) 132 { 133 struct perf_event *bp = ERR_PTR(-EINVAL); 134 135 switch (note_type) { 136 case NT_ARM_HW_BREAK: 137 if (idx < ARM_MAX_BRP) 138 bp = tsk->thread.debug.hbp_break[idx]; 139 break; 140 case NT_ARM_HW_WATCH: 141 if (idx < ARM_MAX_WRP) 142 bp = tsk->thread.debug.hbp_watch[idx]; 143 break; 144 } 145 146 return bp; 147 } 148 149 static int ptrace_hbp_set_event(unsigned int note_type, 150 struct task_struct *tsk, 151 unsigned long idx, 152 struct perf_event *bp) 153 { 154 int err = -EINVAL; 155 156 switch (note_type) { 157 case NT_ARM_HW_BREAK: 158 if (idx < ARM_MAX_BRP) { 159 tsk->thread.debug.hbp_break[idx] = bp; 160 err = 0; 161 } 162 break; 163 case NT_ARM_HW_WATCH: 164 if (idx < ARM_MAX_WRP) { 165 tsk->thread.debug.hbp_watch[idx] = bp; 166 err = 0; 167 } 168 break; 169 } 170 171 return err; 172 } 173 174 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 175 struct task_struct *tsk, 176 unsigned long idx) 177 { 178 struct perf_event *bp; 179 struct perf_event_attr attr; 180 int err, type; 181 182 switch (note_type) { 183 case NT_ARM_HW_BREAK: 184 type = HW_BREAKPOINT_X; 185 break; 186 case NT_ARM_HW_WATCH: 187 type = HW_BREAKPOINT_RW; 188 break; 189 default: 190 return ERR_PTR(-EINVAL); 191 } 192 193 ptrace_breakpoint_init(&attr); 194 195 /* 196 * Initialise fields to sane defaults 197 * (i.e. values that will pass validation). 198 */ 199 attr.bp_addr = 0; 200 attr.bp_len = HW_BREAKPOINT_LEN_4; 201 attr.bp_type = type; 202 attr.disabled = 1; 203 204 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 205 if (IS_ERR(bp)) 206 return bp; 207 208 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 209 if (err) 210 return ERR_PTR(err); 211 212 return bp; 213 } 214 215 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 216 struct arch_hw_breakpoint_ctrl ctrl, 217 struct perf_event_attr *attr) 218 { 219 int err, len, type, disabled = !ctrl.enabled; 220 221 attr->disabled = disabled; 222 if (disabled) 223 return 0; 224 225 err = arch_bp_generic_fields(ctrl, &len, &type); 226 if (err) 227 return err; 228 229 switch (note_type) { 230 case NT_ARM_HW_BREAK: 231 if ((type & HW_BREAKPOINT_X) != type) 232 return -EINVAL; 233 break; 234 case NT_ARM_HW_WATCH: 235 if ((type & HW_BREAKPOINT_RW) != type) 236 return -EINVAL; 237 break; 238 default: 239 return -EINVAL; 240 } 241 242 attr->bp_len = len; 243 attr->bp_type = type; 244 245 return 0; 246 } 247 248 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 249 { 250 u8 num; 251 u32 reg = 0; 252 253 switch (note_type) { 254 case NT_ARM_HW_BREAK: 255 num = hw_breakpoint_slots(TYPE_INST); 256 break; 257 case NT_ARM_HW_WATCH: 258 num = hw_breakpoint_slots(TYPE_DATA); 259 break; 260 default: 261 return -EINVAL; 262 } 263 264 reg |= debug_monitors_arch(); 265 reg <<= 8; 266 reg |= num; 267 268 *info = reg; 269 return 0; 270 } 271 272 static int ptrace_hbp_get_ctrl(unsigned int note_type, 273 struct task_struct *tsk, 274 unsigned long idx, 275 u32 *ctrl) 276 { 277 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 278 279 if (IS_ERR(bp)) 280 return PTR_ERR(bp); 281 282 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 283 return 0; 284 } 285 286 static int ptrace_hbp_get_addr(unsigned int note_type, 287 struct task_struct *tsk, 288 unsigned long idx, 289 u64 *addr) 290 { 291 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 292 293 if (IS_ERR(bp)) 294 return PTR_ERR(bp); 295 296 *addr = bp ? bp->attr.bp_addr : 0; 297 return 0; 298 } 299 300 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 301 struct task_struct *tsk, 302 unsigned long idx) 303 { 304 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 305 306 if (!bp) 307 bp = ptrace_hbp_create(note_type, tsk, idx); 308 309 return bp; 310 } 311 312 static int ptrace_hbp_set_ctrl(unsigned int note_type, 313 struct task_struct *tsk, 314 unsigned long idx, 315 u32 uctrl) 316 { 317 int err; 318 struct perf_event *bp; 319 struct perf_event_attr attr; 320 struct arch_hw_breakpoint_ctrl ctrl; 321 322 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 323 if (IS_ERR(bp)) { 324 err = PTR_ERR(bp); 325 return err; 326 } 327 328 attr = bp->attr; 329 decode_ctrl_reg(uctrl, &ctrl); 330 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 331 if (err) 332 return err; 333 334 return modify_user_hw_breakpoint(bp, &attr); 335 } 336 337 static int ptrace_hbp_set_addr(unsigned int note_type, 338 struct task_struct *tsk, 339 unsigned long idx, 340 u64 addr) 341 { 342 int err; 343 struct perf_event *bp; 344 struct perf_event_attr attr; 345 346 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 347 if (IS_ERR(bp)) { 348 err = PTR_ERR(bp); 349 return err; 350 } 351 352 attr = bp->attr; 353 attr.bp_addr = addr; 354 err = modify_user_hw_breakpoint(bp, &attr); 355 return err; 356 } 357 358 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 359 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 360 #define PTRACE_HBP_PAD_SZ sizeof(u32) 361 362 static int hw_break_get(struct task_struct *target, 363 const struct user_regset *regset, 364 unsigned int pos, unsigned int count, 365 void *kbuf, void __user *ubuf) 366 { 367 unsigned int note_type = regset->core_note_type; 368 int ret, idx = 0, offset, limit; 369 u32 info, ctrl; 370 u64 addr; 371 372 /* Resource info */ 373 ret = ptrace_hbp_get_resource_info(note_type, &info); 374 if (ret) 375 return ret; 376 377 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 378 sizeof(info)); 379 if (ret) 380 return ret; 381 382 /* Pad */ 383 offset = offsetof(struct user_hwdebug_state, pad); 384 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 385 offset + PTRACE_HBP_PAD_SZ); 386 if (ret) 387 return ret; 388 389 /* (address, ctrl) registers */ 390 offset = offsetof(struct user_hwdebug_state, dbg_regs); 391 limit = regset->n * regset->size; 392 while (count && offset < limit) { 393 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 394 if (ret) 395 return ret; 396 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 397 offset, offset + PTRACE_HBP_ADDR_SZ); 398 if (ret) 399 return ret; 400 offset += PTRACE_HBP_ADDR_SZ; 401 402 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 403 if (ret) 404 return ret; 405 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 406 offset, offset + PTRACE_HBP_CTRL_SZ); 407 if (ret) 408 return ret; 409 offset += PTRACE_HBP_CTRL_SZ; 410 411 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 412 offset, 413 offset + PTRACE_HBP_PAD_SZ); 414 if (ret) 415 return ret; 416 offset += PTRACE_HBP_PAD_SZ; 417 idx++; 418 } 419 420 return 0; 421 } 422 423 static int hw_break_set(struct task_struct *target, 424 const struct user_regset *regset, 425 unsigned int pos, unsigned int count, 426 const void *kbuf, const void __user *ubuf) 427 { 428 unsigned int note_type = regset->core_note_type; 429 int ret, idx = 0, offset, limit; 430 u32 ctrl; 431 u64 addr; 432 433 /* Resource info and pad */ 434 offset = offsetof(struct user_hwdebug_state, dbg_regs); 435 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 436 if (ret) 437 return ret; 438 439 /* (address, ctrl) registers */ 440 limit = regset->n * regset->size; 441 while (count && offset < limit) { 442 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 443 offset, offset + PTRACE_HBP_ADDR_SZ); 444 if (ret) 445 return ret; 446 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 447 if (ret) 448 return ret; 449 offset += PTRACE_HBP_ADDR_SZ; 450 451 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 452 offset, offset + PTRACE_HBP_CTRL_SZ); 453 if (ret) 454 return ret; 455 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 456 if (ret) 457 return ret; 458 offset += PTRACE_HBP_CTRL_SZ; 459 460 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 461 offset, 462 offset + PTRACE_HBP_PAD_SZ); 463 if (ret) 464 return ret; 465 offset += PTRACE_HBP_PAD_SZ; 466 idx++; 467 } 468 469 return 0; 470 } 471 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 472 473 static int gpr_get(struct task_struct *target, 474 const struct user_regset *regset, 475 unsigned int pos, unsigned int count, 476 void *kbuf, void __user *ubuf) 477 { 478 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 479 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 480 } 481 482 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 483 unsigned int pos, unsigned int count, 484 const void *kbuf, const void __user *ubuf) 485 { 486 int ret; 487 struct user_pt_regs newregs; 488 489 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 490 if (ret) 491 return ret; 492 493 if (!valid_user_regs(&newregs)) 494 return -EINVAL; 495 496 task_pt_regs(target)->user_regs = newregs; 497 return 0; 498 } 499 500 /* 501 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 502 */ 503 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 504 unsigned int pos, unsigned int count, 505 void *kbuf, void __user *ubuf) 506 { 507 struct user_fpsimd_state *uregs; 508 uregs = &target->thread.fpsimd_state.user_fpsimd; 509 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 510 } 511 512 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 513 unsigned int pos, unsigned int count, 514 const void *kbuf, const void __user *ubuf) 515 { 516 int ret; 517 struct user_fpsimd_state newstate; 518 519 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 520 if (ret) 521 return ret; 522 523 target->thread.fpsimd_state.user_fpsimd = newstate; 524 fpsimd_flush_task_state(target); 525 return ret; 526 } 527 528 static int tls_get(struct task_struct *target, const struct user_regset *regset, 529 unsigned int pos, unsigned int count, 530 void *kbuf, void __user *ubuf) 531 { 532 unsigned long *tls = &target->thread.tp_value; 533 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 534 } 535 536 static int tls_set(struct task_struct *target, const struct user_regset *regset, 537 unsigned int pos, unsigned int count, 538 const void *kbuf, const void __user *ubuf) 539 { 540 int ret; 541 unsigned long tls; 542 543 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 544 if (ret) 545 return ret; 546 547 target->thread.tp_value = tls; 548 return ret; 549 } 550 551 enum aarch64_regset { 552 REGSET_GPR, 553 REGSET_FPR, 554 REGSET_TLS, 555 #ifdef CONFIG_HAVE_HW_BREAKPOINT 556 REGSET_HW_BREAK, 557 REGSET_HW_WATCH, 558 #endif 559 }; 560 561 static const struct user_regset aarch64_regsets[] = { 562 [REGSET_GPR] = { 563 .core_note_type = NT_PRSTATUS, 564 .n = sizeof(struct user_pt_regs) / sizeof(u64), 565 .size = sizeof(u64), 566 .align = sizeof(u64), 567 .get = gpr_get, 568 .set = gpr_set 569 }, 570 [REGSET_FPR] = { 571 .core_note_type = NT_PRFPREG, 572 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 573 /* 574 * We pretend we have 32-bit registers because the fpsr and 575 * fpcr are 32-bits wide. 576 */ 577 .size = sizeof(u32), 578 .align = sizeof(u32), 579 .get = fpr_get, 580 .set = fpr_set 581 }, 582 [REGSET_TLS] = { 583 .core_note_type = NT_ARM_TLS, 584 .n = 1, 585 .size = sizeof(void *), 586 .align = sizeof(void *), 587 .get = tls_get, 588 .set = tls_set, 589 }, 590 #ifdef CONFIG_HAVE_HW_BREAKPOINT 591 [REGSET_HW_BREAK] = { 592 .core_note_type = NT_ARM_HW_BREAK, 593 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 594 .size = sizeof(u32), 595 .align = sizeof(u32), 596 .get = hw_break_get, 597 .set = hw_break_set, 598 }, 599 [REGSET_HW_WATCH] = { 600 .core_note_type = NT_ARM_HW_WATCH, 601 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 602 .size = sizeof(u32), 603 .align = sizeof(u32), 604 .get = hw_break_get, 605 .set = hw_break_set, 606 }, 607 #endif 608 }; 609 610 static const struct user_regset_view user_aarch64_view = { 611 .name = "aarch64", .e_machine = EM_AARCH64, 612 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 613 }; 614 615 #ifdef CONFIG_COMPAT 616 #include <linux/compat.h> 617 618 enum compat_regset { 619 REGSET_COMPAT_GPR, 620 REGSET_COMPAT_VFP, 621 }; 622 623 static int compat_gpr_get(struct task_struct *target, 624 const struct user_regset *regset, 625 unsigned int pos, unsigned int count, 626 void *kbuf, void __user *ubuf) 627 { 628 int ret = 0; 629 unsigned int i, start, num_regs; 630 631 /* Calculate the number of AArch32 registers contained in count */ 632 num_regs = count / regset->size; 633 634 /* Convert pos into an register number */ 635 start = pos / regset->size; 636 637 if (start + num_regs > regset->n) 638 return -EIO; 639 640 for (i = 0; i < num_regs; ++i) { 641 unsigned int idx = start + i; 642 compat_ulong_t reg; 643 644 switch (idx) { 645 case 15: 646 reg = task_pt_regs(target)->pc; 647 break; 648 case 16: 649 reg = task_pt_regs(target)->pstate; 650 break; 651 case 17: 652 reg = task_pt_regs(target)->orig_x0; 653 break; 654 default: 655 reg = task_pt_regs(target)->regs[idx]; 656 } 657 658 if (kbuf) { 659 memcpy(kbuf, ®, sizeof(reg)); 660 kbuf += sizeof(reg); 661 } else { 662 ret = copy_to_user(ubuf, ®, sizeof(reg)); 663 if (ret) 664 break; 665 666 ubuf += sizeof(reg); 667 } 668 } 669 670 return ret; 671 } 672 673 static int compat_gpr_set(struct task_struct *target, 674 const struct user_regset *regset, 675 unsigned int pos, unsigned int count, 676 const void *kbuf, const void __user *ubuf) 677 { 678 struct pt_regs newregs; 679 int ret = 0; 680 unsigned int i, start, num_regs; 681 682 /* Calculate the number of AArch32 registers contained in count */ 683 num_regs = count / regset->size; 684 685 /* Convert pos into an register number */ 686 start = pos / regset->size; 687 688 if (start + num_regs > regset->n) 689 return -EIO; 690 691 newregs = *task_pt_regs(target); 692 693 for (i = 0; i < num_regs; ++i) { 694 unsigned int idx = start + i; 695 compat_ulong_t reg; 696 697 if (kbuf) { 698 memcpy(®, kbuf, sizeof(reg)); 699 kbuf += sizeof(reg); 700 } else { 701 ret = copy_from_user(®, ubuf, sizeof(reg)); 702 if (ret) 703 return ret; 704 705 ubuf += sizeof(reg); 706 } 707 708 switch (idx) { 709 case 15: 710 newregs.pc = reg; 711 break; 712 case 16: 713 newregs.pstate = reg; 714 break; 715 case 17: 716 newregs.orig_x0 = reg; 717 break; 718 default: 719 newregs.regs[idx] = reg; 720 } 721 722 } 723 724 if (valid_user_regs(&newregs.user_regs)) 725 *task_pt_regs(target) = newregs; 726 else 727 ret = -EINVAL; 728 729 return ret; 730 } 731 732 static int compat_vfp_get(struct task_struct *target, 733 const struct user_regset *regset, 734 unsigned int pos, unsigned int count, 735 void *kbuf, void __user *ubuf) 736 { 737 struct user_fpsimd_state *uregs; 738 compat_ulong_t fpscr; 739 int ret; 740 741 uregs = &target->thread.fpsimd_state.user_fpsimd; 742 743 /* 744 * The VFP registers are packed into the fpsimd_state, so they all sit 745 * nicely together for us. We just need to create the fpscr separately. 746 */ 747 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 748 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 749 750 if (count && !ret) { 751 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 752 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 753 ret = put_user(fpscr, (compat_ulong_t *)ubuf); 754 } 755 756 return ret; 757 } 758 759 static int compat_vfp_set(struct task_struct *target, 760 const struct user_regset *regset, 761 unsigned int pos, unsigned int count, 762 const void *kbuf, const void __user *ubuf) 763 { 764 struct user_fpsimd_state *uregs; 765 compat_ulong_t fpscr; 766 int ret; 767 768 if (pos + count > VFP_STATE_SIZE) 769 return -EIO; 770 771 uregs = &target->thread.fpsimd_state.user_fpsimd; 772 773 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 774 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 775 776 if (count && !ret) { 777 ret = get_user(fpscr, (compat_ulong_t *)ubuf); 778 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 779 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 780 } 781 782 fpsimd_flush_task_state(target); 783 return ret; 784 } 785 786 static const struct user_regset aarch32_regsets[] = { 787 [REGSET_COMPAT_GPR] = { 788 .core_note_type = NT_PRSTATUS, 789 .n = COMPAT_ELF_NGREG, 790 .size = sizeof(compat_elf_greg_t), 791 .align = sizeof(compat_elf_greg_t), 792 .get = compat_gpr_get, 793 .set = compat_gpr_set 794 }, 795 [REGSET_COMPAT_VFP] = { 796 .core_note_type = NT_ARM_VFP, 797 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 798 .size = sizeof(compat_ulong_t), 799 .align = sizeof(compat_ulong_t), 800 .get = compat_vfp_get, 801 .set = compat_vfp_set 802 }, 803 }; 804 805 static const struct user_regset_view user_aarch32_view = { 806 .name = "aarch32", .e_machine = EM_ARM, 807 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 808 }; 809 810 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 811 compat_ulong_t __user *ret) 812 { 813 compat_ulong_t tmp; 814 815 if (off & 3) 816 return -EIO; 817 818 if (off == COMPAT_PT_TEXT_ADDR) 819 tmp = tsk->mm->start_code; 820 else if (off == COMPAT_PT_DATA_ADDR) 821 tmp = tsk->mm->start_data; 822 else if (off == COMPAT_PT_TEXT_END_ADDR) 823 tmp = tsk->mm->end_code; 824 else if (off < sizeof(compat_elf_gregset_t)) 825 return copy_regset_to_user(tsk, &user_aarch32_view, 826 REGSET_COMPAT_GPR, off, 827 sizeof(compat_ulong_t), ret); 828 else if (off >= COMPAT_USER_SZ) 829 return -EIO; 830 else 831 tmp = 0; 832 833 return put_user(tmp, ret); 834 } 835 836 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 837 compat_ulong_t val) 838 { 839 int ret; 840 mm_segment_t old_fs = get_fs(); 841 842 if (off & 3 || off >= COMPAT_USER_SZ) 843 return -EIO; 844 845 if (off >= sizeof(compat_elf_gregset_t)) 846 return 0; 847 848 set_fs(KERNEL_DS); 849 ret = copy_regset_from_user(tsk, &user_aarch32_view, 850 REGSET_COMPAT_GPR, off, 851 sizeof(compat_ulong_t), 852 &val); 853 set_fs(old_fs); 854 855 return ret; 856 } 857 858 #ifdef CONFIG_HAVE_HW_BREAKPOINT 859 860 /* 861 * Convert a virtual register number into an index for a thread_info 862 * breakpoint array. Breakpoints are identified using positive numbers 863 * whilst watchpoints are negative. The registers are laid out as pairs 864 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 865 * Register 0 is reserved for describing resource information. 866 */ 867 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 868 { 869 return (abs(num) - 1) >> 1; 870 } 871 872 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 873 { 874 u8 num_brps, num_wrps, debug_arch, wp_len; 875 u32 reg = 0; 876 877 num_brps = hw_breakpoint_slots(TYPE_INST); 878 num_wrps = hw_breakpoint_slots(TYPE_DATA); 879 880 debug_arch = debug_monitors_arch(); 881 wp_len = 8; 882 reg |= debug_arch; 883 reg <<= 8; 884 reg |= wp_len; 885 reg <<= 8; 886 reg |= num_wrps; 887 reg <<= 8; 888 reg |= num_brps; 889 890 *kdata = reg; 891 return 0; 892 } 893 894 static int compat_ptrace_hbp_get(unsigned int note_type, 895 struct task_struct *tsk, 896 compat_long_t num, 897 u32 *kdata) 898 { 899 u64 addr = 0; 900 u32 ctrl = 0; 901 902 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 903 904 if (num & 1) { 905 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 906 *kdata = (u32)addr; 907 } else { 908 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 909 *kdata = ctrl; 910 } 911 912 return err; 913 } 914 915 static int compat_ptrace_hbp_set(unsigned int note_type, 916 struct task_struct *tsk, 917 compat_long_t num, 918 u32 *kdata) 919 { 920 u64 addr; 921 u32 ctrl; 922 923 int err, idx = compat_ptrace_hbp_num_to_idx(num); 924 925 if (num & 1) { 926 addr = *kdata; 927 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 928 } else { 929 ctrl = *kdata; 930 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 931 } 932 933 return err; 934 } 935 936 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 937 compat_ulong_t __user *data) 938 { 939 int ret; 940 u32 kdata; 941 mm_segment_t old_fs = get_fs(); 942 943 set_fs(KERNEL_DS); 944 /* Watchpoint */ 945 if (num < 0) { 946 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 947 /* Resource info */ 948 } else if (num == 0) { 949 ret = compat_ptrace_hbp_get_resource_info(&kdata); 950 /* Breakpoint */ 951 } else { 952 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 953 } 954 set_fs(old_fs); 955 956 if (!ret) 957 ret = put_user(kdata, data); 958 959 return ret; 960 } 961 962 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 963 compat_ulong_t __user *data) 964 { 965 int ret; 966 u32 kdata = 0; 967 mm_segment_t old_fs = get_fs(); 968 969 if (num == 0) 970 return 0; 971 972 ret = get_user(kdata, data); 973 if (ret) 974 return ret; 975 976 set_fs(KERNEL_DS); 977 if (num < 0) 978 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 979 else 980 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 981 set_fs(old_fs); 982 983 return ret; 984 } 985 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 986 987 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 988 compat_ulong_t caddr, compat_ulong_t cdata) 989 { 990 unsigned long addr = caddr; 991 unsigned long data = cdata; 992 void __user *datap = compat_ptr(data); 993 int ret; 994 995 switch (request) { 996 case PTRACE_PEEKUSR: 997 ret = compat_ptrace_read_user(child, addr, datap); 998 break; 999 1000 case PTRACE_POKEUSR: 1001 ret = compat_ptrace_write_user(child, addr, data); 1002 break; 1003 1004 case COMPAT_PTRACE_GETREGS: 1005 ret = copy_regset_to_user(child, 1006 &user_aarch32_view, 1007 REGSET_COMPAT_GPR, 1008 0, sizeof(compat_elf_gregset_t), 1009 datap); 1010 break; 1011 1012 case COMPAT_PTRACE_SETREGS: 1013 ret = copy_regset_from_user(child, 1014 &user_aarch32_view, 1015 REGSET_COMPAT_GPR, 1016 0, sizeof(compat_elf_gregset_t), 1017 datap); 1018 break; 1019 1020 case COMPAT_PTRACE_GET_THREAD_AREA: 1021 ret = put_user((compat_ulong_t)child->thread.tp_value, 1022 (compat_ulong_t __user *)datap); 1023 break; 1024 1025 case COMPAT_PTRACE_SET_SYSCALL: 1026 task_pt_regs(child)->syscallno = data; 1027 ret = 0; 1028 break; 1029 1030 case COMPAT_PTRACE_GETVFPREGS: 1031 ret = copy_regset_to_user(child, 1032 &user_aarch32_view, 1033 REGSET_COMPAT_VFP, 1034 0, VFP_STATE_SIZE, 1035 datap); 1036 break; 1037 1038 case COMPAT_PTRACE_SETVFPREGS: 1039 ret = copy_regset_from_user(child, 1040 &user_aarch32_view, 1041 REGSET_COMPAT_VFP, 1042 0, VFP_STATE_SIZE, 1043 datap); 1044 break; 1045 1046 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1047 case COMPAT_PTRACE_GETHBPREGS: 1048 ret = compat_ptrace_gethbpregs(child, addr, datap); 1049 break; 1050 1051 case COMPAT_PTRACE_SETHBPREGS: 1052 ret = compat_ptrace_sethbpregs(child, addr, datap); 1053 break; 1054 #endif 1055 1056 default: 1057 ret = compat_ptrace_request(child, request, addr, 1058 data); 1059 break; 1060 } 1061 1062 return ret; 1063 } 1064 #endif /* CONFIG_COMPAT */ 1065 1066 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1067 { 1068 #ifdef CONFIG_COMPAT 1069 if (is_compat_thread(task_thread_info(task))) 1070 return &user_aarch32_view; 1071 #endif 1072 return &user_aarch64_view; 1073 } 1074 1075 long arch_ptrace(struct task_struct *child, long request, 1076 unsigned long addr, unsigned long data) 1077 { 1078 return ptrace_request(child, request, addr, data); 1079 } 1080 1081 enum ptrace_syscall_dir { 1082 PTRACE_SYSCALL_ENTER = 0, 1083 PTRACE_SYSCALL_EXIT, 1084 }; 1085 1086 static void tracehook_report_syscall(struct pt_regs *regs, 1087 enum ptrace_syscall_dir dir) 1088 { 1089 int regno; 1090 unsigned long saved_reg; 1091 1092 /* 1093 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1094 * used to denote syscall entry/exit: 1095 */ 1096 regno = (is_compat_task() ? 12 : 7); 1097 saved_reg = regs->regs[regno]; 1098 regs->regs[regno] = dir; 1099 1100 if (dir == PTRACE_SYSCALL_EXIT) 1101 tracehook_report_syscall_exit(regs, 0); 1102 else if (tracehook_report_syscall_entry(regs)) 1103 regs->syscallno = ~0UL; 1104 1105 regs->regs[regno] = saved_reg; 1106 } 1107 1108 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1109 { 1110 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1111 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1112 1113 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1114 trace_sys_enter(regs, regs->syscallno); 1115 1116 return regs->syscallno; 1117 } 1118 1119 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1120 { 1121 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1122 trace_sys_exit(regs, regs_return_value(regs)); 1123 1124 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1125 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1126 } 1127