1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched.h> 26 #include <linux/mm.h> 27 #include <linux/smp.h> 28 #include <linux/ptrace.h> 29 #include <linux/user.h> 30 #include <linux/seccomp.h> 31 #include <linux/security.h> 32 #include <linux/init.h> 33 #include <linux/signal.h> 34 #include <linux/uaccess.h> 35 #include <linux/perf_event.h> 36 #include <linux/hw_breakpoint.h> 37 #include <linux/regset.h> 38 #include <linux/tracehook.h> 39 #include <linux/elf.h> 40 41 #include <asm/compat.h> 42 #include <asm/debug-monitors.h> 43 #include <asm/pgtable.h> 44 #include <asm/syscall.h> 45 #include <asm/traps.h> 46 #include <asm/system_misc.h> 47 48 #define CREATE_TRACE_POINTS 49 #include <trace/events/syscalls.h> 50 51 /* 52 * TODO: does not yet catch signals sent when the child dies. 53 * in exit.c or in signal.c. 54 */ 55 56 /* 57 * Called by kernel/ptrace.c when detaching.. 58 */ 59 void ptrace_disable(struct task_struct *child) 60 { 61 } 62 63 #ifdef CONFIG_HAVE_HW_BREAKPOINT 64 /* 65 * Handle hitting a HW-breakpoint. 66 */ 67 static void ptrace_hbptriggered(struct perf_event *bp, 68 struct perf_sample_data *data, 69 struct pt_regs *regs) 70 { 71 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 72 siginfo_t info = { 73 .si_signo = SIGTRAP, 74 .si_errno = 0, 75 .si_code = TRAP_HWBKPT, 76 .si_addr = (void __user *)(bkpt->trigger), 77 }; 78 79 #ifdef CONFIG_COMPAT 80 int i; 81 82 if (!is_compat_task()) 83 goto send_sig; 84 85 for (i = 0; i < ARM_MAX_BRP; ++i) { 86 if (current->thread.debug.hbp_break[i] == bp) { 87 info.si_errno = (i << 1) + 1; 88 break; 89 } 90 } 91 92 for (i = 0; i < ARM_MAX_WRP; ++i) { 93 if (current->thread.debug.hbp_watch[i] == bp) { 94 info.si_errno = -((i << 1) + 1); 95 break; 96 } 97 } 98 99 send_sig: 100 #endif 101 force_sig_info(SIGTRAP, &info, current); 102 } 103 104 /* 105 * Unregister breakpoints from this task and reset the pointers in 106 * the thread_struct. 107 */ 108 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 109 { 110 int i; 111 struct thread_struct *t = &tsk->thread; 112 113 for (i = 0; i < ARM_MAX_BRP; i++) { 114 if (t->debug.hbp_break[i]) { 115 unregister_hw_breakpoint(t->debug.hbp_break[i]); 116 t->debug.hbp_break[i] = NULL; 117 } 118 } 119 120 for (i = 0; i < ARM_MAX_WRP; i++) { 121 if (t->debug.hbp_watch[i]) { 122 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 123 t->debug.hbp_watch[i] = NULL; 124 } 125 } 126 } 127 128 void ptrace_hw_copy_thread(struct task_struct *tsk) 129 { 130 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 131 } 132 133 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 134 struct task_struct *tsk, 135 unsigned long idx) 136 { 137 struct perf_event *bp = ERR_PTR(-EINVAL); 138 139 switch (note_type) { 140 case NT_ARM_HW_BREAK: 141 if (idx < ARM_MAX_BRP) 142 bp = tsk->thread.debug.hbp_break[idx]; 143 break; 144 case NT_ARM_HW_WATCH: 145 if (idx < ARM_MAX_WRP) 146 bp = tsk->thread.debug.hbp_watch[idx]; 147 break; 148 } 149 150 return bp; 151 } 152 153 static int ptrace_hbp_set_event(unsigned int note_type, 154 struct task_struct *tsk, 155 unsigned long idx, 156 struct perf_event *bp) 157 { 158 int err = -EINVAL; 159 160 switch (note_type) { 161 case NT_ARM_HW_BREAK: 162 if (idx < ARM_MAX_BRP) { 163 tsk->thread.debug.hbp_break[idx] = bp; 164 err = 0; 165 } 166 break; 167 case NT_ARM_HW_WATCH: 168 if (idx < ARM_MAX_WRP) { 169 tsk->thread.debug.hbp_watch[idx] = bp; 170 err = 0; 171 } 172 break; 173 } 174 175 return err; 176 } 177 178 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 179 struct task_struct *tsk, 180 unsigned long idx) 181 { 182 struct perf_event *bp; 183 struct perf_event_attr attr; 184 int err, type; 185 186 switch (note_type) { 187 case NT_ARM_HW_BREAK: 188 type = HW_BREAKPOINT_X; 189 break; 190 case NT_ARM_HW_WATCH: 191 type = HW_BREAKPOINT_RW; 192 break; 193 default: 194 return ERR_PTR(-EINVAL); 195 } 196 197 ptrace_breakpoint_init(&attr); 198 199 /* 200 * Initialise fields to sane defaults 201 * (i.e. values that will pass validation). 202 */ 203 attr.bp_addr = 0; 204 attr.bp_len = HW_BREAKPOINT_LEN_4; 205 attr.bp_type = type; 206 attr.disabled = 1; 207 208 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 209 if (IS_ERR(bp)) 210 return bp; 211 212 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 213 if (err) 214 return ERR_PTR(err); 215 216 return bp; 217 } 218 219 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 220 struct arch_hw_breakpoint_ctrl ctrl, 221 struct perf_event_attr *attr) 222 { 223 int err, len, type, disabled = !ctrl.enabled; 224 225 attr->disabled = disabled; 226 if (disabled) 227 return 0; 228 229 err = arch_bp_generic_fields(ctrl, &len, &type); 230 if (err) 231 return err; 232 233 switch (note_type) { 234 case NT_ARM_HW_BREAK: 235 if ((type & HW_BREAKPOINT_X) != type) 236 return -EINVAL; 237 break; 238 case NT_ARM_HW_WATCH: 239 if ((type & HW_BREAKPOINT_RW) != type) 240 return -EINVAL; 241 break; 242 default: 243 return -EINVAL; 244 } 245 246 attr->bp_len = len; 247 attr->bp_type = type; 248 249 return 0; 250 } 251 252 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 253 { 254 u8 num; 255 u32 reg = 0; 256 257 switch (note_type) { 258 case NT_ARM_HW_BREAK: 259 num = hw_breakpoint_slots(TYPE_INST); 260 break; 261 case NT_ARM_HW_WATCH: 262 num = hw_breakpoint_slots(TYPE_DATA); 263 break; 264 default: 265 return -EINVAL; 266 } 267 268 reg |= debug_monitors_arch(); 269 reg <<= 8; 270 reg |= num; 271 272 *info = reg; 273 return 0; 274 } 275 276 static int ptrace_hbp_get_ctrl(unsigned int note_type, 277 struct task_struct *tsk, 278 unsigned long idx, 279 u32 *ctrl) 280 { 281 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 282 283 if (IS_ERR(bp)) 284 return PTR_ERR(bp); 285 286 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 287 return 0; 288 } 289 290 static int ptrace_hbp_get_addr(unsigned int note_type, 291 struct task_struct *tsk, 292 unsigned long idx, 293 u64 *addr) 294 { 295 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 296 297 if (IS_ERR(bp)) 298 return PTR_ERR(bp); 299 300 *addr = bp ? bp->attr.bp_addr : 0; 301 return 0; 302 } 303 304 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 305 struct task_struct *tsk, 306 unsigned long idx) 307 { 308 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 309 310 if (!bp) 311 bp = ptrace_hbp_create(note_type, tsk, idx); 312 313 return bp; 314 } 315 316 static int ptrace_hbp_set_ctrl(unsigned int note_type, 317 struct task_struct *tsk, 318 unsigned long idx, 319 u32 uctrl) 320 { 321 int err; 322 struct perf_event *bp; 323 struct perf_event_attr attr; 324 struct arch_hw_breakpoint_ctrl ctrl; 325 326 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 327 if (IS_ERR(bp)) { 328 err = PTR_ERR(bp); 329 return err; 330 } 331 332 attr = bp->attr; 333 decode_ctrl_reg(uctrl, &ctrl); 334 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 335 if (err) 336 return err; 337 338 return modify_user_hw_breakpoint(bp, &attr); 339 } 340 341 static int ptrace_hbp_set_addr(unsigned int note_type, 342 struct task_struct *tsk, 343 unsigned long idx, 344 u64 addr) 345 { 346 int err; 347 struct perf_event *bp; 348 struct perf_event_attr attr; 349 350 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 351 if (IS_ERR(bp)) { 352 err = PTR_ERR(bp); 353 return err; 354 } 355 356 attr = bp->attr; 357 attr.bp_addr = addr; 358 err = modify_user_hw_breakpoint(bp, &attr); 359 return err; 360 } 361 362 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 363 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 364 #define PTRACE_HBP_PAD_SZ sizeof(u32) 365 366 static int hw_break_get(struct task_struct *target, 367 const struct user_regset *regset, 368 unsigned int pos, unsigned int count, 369 void *kbuf, void __user *ubuf) 370 { 371 unsigned int note_type = regset->core_note_type; 372 int ret, idx = 0, offset, limit; 373 u32 info, ctrl; 374 u64 addr; 375 376 /* Resource info */ 377 ret = ptrace_hbp_get_resource_info(note_type, &info); 378 if (ret) 379 return ret; 380 381 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 382 sizeof(info)); 383 if (ret) 384 return ret; 385 386 /* Pad */ 387 offset = offsetof(struct user_hwdebug_state, pad); 388 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 389 offset + PTRACE_HBP_PAD_SZ); 390 if (ret) 391 return ret; 392 393 /* (address, ctrl) registers */ 394 offset = offsetof(struct user_hwdebug_state, dbg_regs); 395 limit = regset->n * regset->size; 396 while (count && offset < limit) { 397 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 398 if (ret) 399 return ret; 400 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 401 offset, offset + PTRACE_HBP_ADDR_SZ); 402 if (ret) 403 return ret; 404 offset += PTRACE_HBP_ADDR_SZ; 405 406 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 407 if (ret) 408 return ret; 409 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 410 offset, offset + PTRACE_HBP_CTRL_SZ); 411 if (ret) 412 return ret; 413 offset += PTRACE_HBP_CTRL_SZ; 414 415 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 416 offset, 417 offset + PTRACE_HBP_PAD_SZ); 418 if (ret) 419 return ret; 420 offset += PTRACE_HBP_PAD_SZ; 421 idx++; 422 } 423 424 return 0; 425 } 426 427 static int hw_break_set(struct task_struct *target, 428 const struct user_regset *regset, 429 unsigned int pos, unsigned int count, 430 const void *kbuf, const void __user *ubuf) 431 { 432 unsigned int note_type = regset->core_note_type; 433 int ret, idx = 0, offset, limit; 434 u32 ctrl; 435 u64 addr; 436 437 /* Resource info and pad */ 438 offset = offsetof(struct user_hwdebug_state, dbg_regs); 439 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 440 if (ret) 441 return ret; 442 443 /* (address, ctrl) registers */ 444 limit = regset->n * regset->size; 445 while (count && offset < limit) { 446 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 447 offset, offset + PTRACE_HBP_ADDR_SZ); 448 if (ret) 449 return ret; 450 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 451 if (ret) 452 return ret; 453 offset += PTRACE_HBP_ADDR_SZ; 454 455 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 456 offset, offset + PTRACE_HBP_CTRL_SZ); 457 if (ret) 458 return ret; 459 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 460 if (ret) 461 return ret; 462 offset += PTRACE_HBP_CTRL_SZ; 463 464 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 465 offset, 466 offset + PTRACE_HBP_PAD_SZ); 467 if (ret) 468 return ret; 469 offset += PTRACE_HBP_PAD_SZ; 470 idx++; 471 } 472 473 return 0; 474 } 475 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 476 477 static int gpr_get(struct task_struct *target, 478 const struct user_regset *regset, 479 unsigned int pos, unsigned int count, 480 void *kbuf, void __user *ubuf) 481 { 482 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 483 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 484 } 485 486 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 487 unsigned int pos, unsigned int count, 488 const void *kbuf, const void __user *ubuf) 489 { 490 int ret; 491 struct user_pt_regs newregs; 492 493 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 494 if (ret) 495 return ret; 496 497 if (!valid_user_regs(&newregs)) 498 return -EINVAL; 499 500 task_pt_regs(target)->user_regs = newregs; 501 return 0; 502 } 503 504 /* 505 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 506 */ 507 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 508 unsigned int pos, unsigned int count, 509 void *kbuf, void __user *ubuf) 510 { 511 struct user_fpsimd_state *uregs; 512 uregs = &target->thread.fpsimd_state.user_fpsimd; 513 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 514 } 515 516 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 517 unsigned int pos, unsigned int count, 518 const void *kbuf, const void __user *ubuf) 519 { 520 int ret; 521 struct user_fpsimd_state newstate; 522 523 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 524 if (ret) 525 return ret; 526 527 target->thread.fpsimd_state.user_fpsimd = newstate; 528 fpsimd_flush_task_state(target); 529 return ret; 530 } 531 532 static int tls_get(struct task_struct *target, const struct user_regset *regset, 533 unsigned int pos, unsigned int count, 534 void *kbuf, void __user *ubuf) 535 { 536 unsigned long *tls = &target->thread.tp_value; 537 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 538 } 539 540 static int tls_set(struct task_struct *target, const struct user_regset *regset, 541 unsigned int pos, unsigned int count, 542 const void *kbuf, const void __user *ubuf) 543 { 544 int ret; 545 unsigned long tls; 546 547 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 548 if (ret) 549 return ret; 550 551 target->thread.tp_value = tls; 552 return ret; 553 } 554 555 static int system_call_get(struct task_struct *target, 556 const struct user_regset *regset, 557 unsigned int pos, unsigned int count, 558 void *kbuf, void __user *ubuf) 559 { 560 int syscallno = task_pt_regs(target)->syscallno; 561 562 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 563 &syscallno, 0, -1); 564 } 565 566 static int system_call_set(struct task_struct *target, 567 const struct user_regset *regset, 568 unsigned int pos, unsigned int count, 569 const void *kbuf, const void __user *ubuf) 570 { 571 int syscallno, ret; 572 573 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 574 if (ret) 575 return ret; 576 577 task_pt_regs(target)->syscallno = syscallno; 578 return ret; 579 } 580 581 enum aarch64_regset { 582 REGSET_GPR, 583 REGSET_FPR, 584 REGSET_TLS, 585 #ifdef CONFIG_HAVE_HW_BREAKPOINT 586 REGSET_HW_BREAK, 587 REGSET_HW_WATCH, 588 #endif 589 REGSET_SYSTEM_CALL, 590 }; 591 592 static const struct user_regset aarch64_regsets[] = { 593 [REGSET_GPR] = { 594 .core_note_type = NT_PRSTATUS, 595 .n = sizeof(struct user_pt_regs) / sizeof(u64), 596 .size = sizeof(u64), 597 .align = sizeof(u64), 598 .get = gpr_get, 599 .set = gpr_set 600 }, 601 [REGSET_FPR] = { 602 .core_note_type = NT_PRFPREG, 603 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 604 /* 605 * We pretend we have 32-bit registers because the fpsr and 606 * fpcr are 32-bits wide. 607 */ 608 .size = sizeof(u32), 609 .align = sizeof(u32), 610 .get = fpr_get, 611 .set = fpr_set 612 }, 613 [REGSET_TLS] = { 614 .core_note_type = NT_ARM_TLS, 615 .n = 1, 616 .size = sizeof(void *), 617 .align = sizeof(void *), 618 .get = tls_get, 619 .set = tls_set, 620 }, 621 #ifdef CONFIG_HAVE_HW_BREAKPOINT 622 [REGSET_HW_BREAK] = { 623 .core_note_type = NT_ARM_HW_BREAK, 624 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 625 .size = sizeof(u32), 626 .align = sizeof(u32), 627 .get = hw_break_get, 628 .set = hw_break_set, 629 }, 630 [REGSET_HW_WATCH] = { 631 .core_note_type = NT_ARM_HW_WATCH, 632 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 633 .size = sizeof(u32), 634 .align = sizeof(u32), 635 .get = hw_break_get, 636 .set = hw_break_set, 637 }, 638 #endif 639 [REGSET_SYSTEM_CALL] = { 640 .core_note_type = NT_ARM_SYSTEM_CALL, 641 .n = 1, 642 .size = sizeof(int), 643 .align = sizeof(int), 644 .get = system_call_get, 645 .set = system_call_set, 646 }, 647 }; 648 649 static const struct user_regset_view user_aarch64_view = { 650 .name = "aarch64", .e_machine = EM_AARCH64, 651 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 652 }; 653 654 #ifdef CONFIG_COMPAT 655 #include <linux/compat.h> 656 657 enum compat_regset { 658 REGSET_COMPAT_GPR, 659 REGSET_COMPAT_VFP, 660 }; 661 662 static int compat_gpr_get(struct task_struct *target, 663 const struct user_regset *regset, 664 unsigned int pos, unsigned int count, 665 void *kbuf, void __user *ubuf) 666 { 667 int ret = 0; 668 unsigned int i, start, num_regs; 669 670 /* Calculate the number of AArch32 registers contained in count */ 671 num_regs = count / regset->size; 672 673 /* Convert pos into an register number */ 674 start = pos / regset->size; 675 676 if (start + num_regs > regset->n) 677 return -EIO; 678 679 for (i = 0; i < num_regs; ++i) { 680 unsigned int idx = start + i; 681 compat_ulong_t reg; 682 683 switch (idx) { 684 case 15: 685 reg = task_pt_regs(target)->pc; 686 break; 687 case 16: 688 reg = task_pt_regs(target)->pstate; 689 break; 690 case 17: 691 reg = task_pt_regs(target)->orig_x0; 692 break; 693 default: 694 reg = task_pt_regs(target)->regs[idx]; 695 } 696 697 if (kbuf) { 698 memcpy(kbuf, ®, sizeof(reg)); 699 kbuf += sizeof(reg); 700 } else { 701 ret = copy_to_user(ubuf, ®, sizeof(reg)); 702 if (ret) { 703 ret = -EFAULT; 704 break; 705 } 706 707 ubuf += sizeof(reg); 708 } 709 } 710 711 return ret; 712 } 713 714 static int compat_gpr_set(struct task_struct *target, 715 const struct user_regset *regset, 716 unsigned int pos, unsigned int count, 717 const void *kbuf, const void __user *ubuf) 718 { 719 struct pt_regs newregs; 720 int ret = 0; 721 unsigned int i, start, num_regs; 722 723 /* Calculate the number of AArch32 registers contained in count */ 724 num_regs = count / regset->size; 725 726 /* Convert pos into an register number */ 727 start = pos / regset->size; 728 729 if (start + num_regs > regset->n) 730 return -EIO; 731 732 newregs = *task_pt_regs(target); 733 734 for (i = 0; i < num_regs; ++i) { 735 unsigned int idx = start + i; 736 compat_ulong_t reg; 737 738 if (kbuf) { 739 memcpy(®, kbuf, sizeof(reg)); 740 kbuf += sizeof(reg); 741 } else { 742 ret = copy_from_user(®, ubuf, sizeof(reg)); 743 if (ret) { 744 ret = -EFAULT; 745 break; 746 } 747 748 ubuf += sizeof(reg); 749 } 750 751 switch (idx) { 752 case 15: 753 newregs.pc = reg; 754 break; 755 case 16: 756 newregs.pstate = reg; 757 break; 758 case 17: 759 newregs.orig_x0 = reg; 760 break; 761 default: 762 newregs.regs[idx] = reg; 763 } 764 765 } 766 767 if (valid_user_regs(&newregs.user_regs)) 768 *task_pt_regs(target) = newregs; 769 else 770 ret = -EINVAL; 771 772 return ret; 773 } 774 775 static int compat_vfp_get(struct task_struct *target, 776 const struct user_regset *regset, 777 unsigned int pos, unsigned int count, 778 void *kbuf, void __user *ubuf) 779 { 780 struct user_fpsimd_state *uregs; 781 compat_ulong_t fpscr; 782 int ret; 783 784 uregs = &target->thread.fpsimd_state.user_fpsimd; 785 786 /* 787 * The VFP registers are packed into the fpsimd_state, so they all sit 788 * nicely together for us. We just need to create the fpscr separately. 789 */ 790 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, 791 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 792 793 if (count && !ret) { 794 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 795 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 796 ret = put_user(fpscr, (compat_ulong_t *)ubuf); 797 } 798 799 return ret; 800 } 801 802 static int compat_vfp_set(struct task_struct *target, 803 const struct user_regset *regset, 804 unsigned int pos, unsigned int count, 805 const void *kbuf, const void __user *ubuf) 806 { 807 struct user_fpsimd_state *uregs; 808 compat_ulong_t fpscr; 809 int ret; 810 811 if (pos + count > VFP_STATE_SIZE) 812 return -EIO; 813 814 uregs = &target->thread.fpsimd_state.user_fpsimd; 815 816 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 817 VFP_STATE_SIZE - sizeof(compat_ulong_t)); 818 819 if (count && !ret) { 820 ret = get_user(fpscr, (compat_ulong_t *)ubuf); 821 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 822 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 823 } 824 825 fpsimd_flush_task_state(target); 826 return ret; 827 } 828 829 static int compat_tls_get(struct task_struct *target, 830 const struct user_regset *regset, unsigned int pos, 831 unsigned int count, void *kbuf, void __user *ubuf) 832 { 833 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; 834 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 835 } 836 837 static int compat_tls_set(struct task_struct *target, 838 const struct user_regset *regset, unsigned int pos, 839 unsigned int count, const void *kbuf, 840 const void __user *ubuf) 841 { 842 int ret; 843 compat_ulong_t tls; 844 845 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 846 if (ret) 847 return ret; 848 849 target->thread.tp_value = tls; 850 return ret; 851 } 852 853 static const struct user_regset aarch32_regsets[] = { 854 [REGSET_COMPAT_GPR] = { 855 .core_note_type = NT_PRSTATUS, 856 .n = COMPAT_ELF_NGREG, 857 .size = sizeof(compat_elf_greg_t), 858 .align = sizeof(compat_elf_greg_t), 859 .get = compat_gpr_get, 860 .set = compat_gpr_set 861 }, 862 [REGSET_COMPAT_VFP] = { 863 .core_note_type = NT_ARM_VFP, 864 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 865 .size = sizeof(compat_ulong_t), 866 .align = sizeof(compat_ulong_t), 867 .get = compat_vfp_get, 868 .set = compat_vfp_set 869 }, 870 }; 871 872 static const struct user_regset_view user_aarch32_view = { 873 .name = "aarch32", .e_machine = EM_ARM, 874 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 875 }; 876 877 static const struct user_regset aarch32_ptrace_regsets[] = { 878 [REGSET_GPR] = { 879 .core_note_type = NT_PRSTATUS, 880 .n = COMPAT_ELF_NGREG, 881 .size = sizeof(compat_elf_greg_t), 882 .align = sizeof(compat_elf_greg_t), 883 .get = compat_gpr_get, 884 .set = compat_gpr_set 885 }, 886 [REGSET_FPR] = { 887 .core_note_type = NT_ARM_VFP, 888 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 889 .size = sizeof(compat_ulong_t), 890 .align = sizeof(compat_ulong_t), 891 .get = compat_vfp_get, 892 .set = compat_vfp_set 893 }, 894 [REGSET_TLS] = { 895 .core_note_type = NT_ARM_TLS, 896 .n = 1, 897 .size = sizeof(compat_ulong_t), 898 .align = sizeof(compat_ulong_t), 899 .get = compat_tls_get, 900 .set = compat_tls_set, 901 }, 902 #ifdef CONFIG_HAVE_HW_BREAKPOINT 903 [REGSET_HW_BREAK] = { 904 .core_note_type = NT_ARM_HW_BREAK, 905 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 906 .size = sizeof(u32), 907 .align = sizeof(u32), 908 .get = hw_break_get, 909 .set = hw_break_set, 910 }, 911 [REGSET_HW_WATCH] = { 912 .core_note_type = NT_ARM_HW_WATCH, 913 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 914 .size = sizeof(u32), 915 .align = sizeof(u32), 916 .get = hw_break_get, 917 .set = hw_break_set, 918 }, 919 #endif 920 [REGSET_SYSTEM_CALL] = { 921 .core_note_type = NT_ARM_SYSTEM_CALL, 922 .n = 1, 923 .size = sizeof(int), 924 .align = sizeof(int), 925 .get = system_call_get, 926 .set = system_call_set, 927 }, 928 }; 929 930 static const struct user_regset_view user_aarch32_ptrace_view = { 931 .name = "aarch32", .e_machine = EM_ARM, 932 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 933 }; 934 935 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 936 compat_ulong_t __user *ret) 937 { 938 compat_ulong_t tmp; 939 940 if (off & 3) 941 return -EIO; 942 943 if (off == COMPAT_PT_TEXT_ADDR) 944 tmp = tsk->mm->start_code; 945 else if (off == COMPAT_PT_DATA_ADDR) 946 tmp = tsk->mm->start_data; 947 else if (off == COMPAT_PT_TEXT_END_ADDR) 948 tmp = tsk->mm->end_code; 949 else if (off < sizeof(compat_elf_gregset_t)) 950 return copy_regset_to_user(tsk, &user_aarch32_view, 951 REGSET_COMPAT_GPR, off, 952 sizeof(compat_ulong_t), ret); 953 else if (off >= COMPAT_USER_SZ) 954 return -EIO; 955 else 956 tmp = 0; 957 958 return put_user(tmp, ret); 959 } 960 961 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 962 compat_ulong_t val) 963 { 964 int ret; 965 mm_segment_t old_fs = get_fs(); 966 967 if (off & 3 || off >= COMPAT_USER_SZ) 968 return -EIO; 969 970 if (off >= sizeof(compat_elf_gregset_t)) 971 return 0; 972 973 set_fs(KERNEL_DS); 974 ret = copy_regset_from_user(tsk, &user_aarch32_view, 975 REGSET_COMPAT_GPR, off, 976 sizeof(compat_ulong_t), 977 &val); 978 set_fs(old_fs); 979 980 return ret; 981 } 982 983 #ifdef CONFIG_HAVE_HW_BREAKPOINT 984 985 /* 986 * Convert a virtual register number into an index for a thread_info 987 * breakpoint array. Breakpoints are identified using positive numbers 988 * whilst watchpoints are negative. The registers are laid out as pairs 989 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 990 * Register 0 is reserved for describing resource information. 991 */ 992 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 993 { 994 return (abs(num) - 1) >> 1; 995 } 996 997 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 998 { 999 u8 num_brps, num_wrps, debug_arch, wp_len; 1000 u32 reg = 0; 1001 1002 num_brps = hw_breakpoint_slots(TYPE_INST); 1003 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1004 1005 debug_arch = debug_monitors_arch(); 1006 wp_len = 8; 1007 reg |= debug_arch; 1008 reg <<= 8; 1009 reg |= wp_len; 1010 reg <<= 8; 1011 reg |= num_wrps; 1012 reg <<= 8; 1013 reg |= num_brps; 1014 1015 *kdata = reg; 1016 return 0; 1017 } 1018 1019 static int compat_ptrace_hbp_get(unsigned int note_type, 1020 struct task_struct *tsk, 1021 compat_long_t num, 1022 u32 *kdata) 1023 { 1024 u64 addr = 0; 1025 u32 ctrl = 0; 1026 1027 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 1028 1029 if (num & 1) { 1030 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1031 *kdata = (u32)addr; 1032 } else { 1033 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1034 *kdata = ctrl; 1035 } 1036 1037 return err; 1038 } 1039 1040 static int compat_ptrace_hbp_set(unsigned int note_type, 1041 struct task_struct *tsk, 1042 compat_long_t num, 1043 u32 *kdata) 1044 { 1045 u64 addr; 1046 u32 ctrl; 1047 1048 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1049 1050 if (num & 1) { 1051 addr = *kdata; 1052 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1053 } else { 1054 ctrl = *kdata; 1055 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1056 } 1057 1058 return err; 1059 } 1060 1061 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1062 compat_ulong_t __user *data) 1063 { 1064 int ret; 1065 u32 kdata; 1066 mm_segment_t old_fs = get_fs(); 1067 1068 set_fs(KERNEL_DS); 1069 /* Watchpoint */ 1070 if (num < 0) { 1071 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1072 /* Resource info */ 1073 } else if (num == 0) { 1074 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1075 /* Breakpoint */ 1076 } else { 1077 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1078 } 1079 set_fs(old_fs); 1080 1081 if (!ret) 1082 ret = put_user(kdata, data); 1083 1084 return ret; 1085 } 1086 1087 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1088 compat_ulong_t __user *data) 1089 { 1090 int ret; 1091 u32 kdata = 0; 1092 mm_segment_t old_fs = get_fs(); 1093 1094 if (num == 0) 1095 return 0; 1096 1097 ret = get_user(kdata, data); 1098 if (ret) 1099 return ret; 1100 1101 set_fs(KERNEL_DS); 1102 if (num < 0) 1103 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1104 else 1105 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1106 set_fs(old_fs); 1107 1108 return ret; 1109 } 1110 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1111 1112 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1113 compat_ulong_t caddr, compat_ulong_t cdata) 1114 { 1115 unsigned long addr = caddr; 1116 unsigned long data = cdata; 1117 void __user *datap = compat_ptr(data); 1118 int ret; 1119 1120 switch (request) { 1121 case PTRACE_PEEKUSR: 1122 ret = compat_ptrace_read_user(child, addr, datap); 1123 break; 1124 1125 case PTRACE_POKEUSR: 1126 ret = compat_ptrace_write_user(child, addr, data); 1127 break; 1128 1129 case COMPAT_PTRACE_GETREGS: 1130 ret = copy_regset_to_user(child, 1131 &user_aarch32_view, 1132 REGSET_COMPAT_GPR, 1133 0, sizeof(compat_elf_gregset_t), 1134 datap); 1135 break; 1136 1137 case COMPAT_PTRACE_SETREGS: 1138 ret = copy_regset_from_user(child, 1139 &user_aarch32_view, 1140 REGSET_COMPAT_GPR, 1141 0, sizeof(compat_elf_gregset_t), 1142 datap); 1143 break; 1144 1145 case COMPAT_PTRACE_GET_THREAD_AREA: 1146 ret = put_user((compat_ulong_t)child->thread.tp_value, 1147 (compat_ulong_t __user *)datap); 1148 break; 1149 1150 case COMPAT_PTRACE_SET_SYSCALL: 1151 task_pt_regs(child)->syscallno = data; 1152 ret = 0; 1153 break; 1154 1155 case COMPAT_PTRACE_GETVFPREGS: 1156 ret = copy_regset_to_user(child, 1157 &user_aarch32_view, 1158 REGSET_COMPAT_VFP, 1159 0, VFP_STATE_SIZE, 1160 datap); 1161 break; 1162 1163 case COMPAT_PTRACE_SETVFPREGS: 1164 ret = copy_regset_from_user(child, 1165 &user_aarch32_view, 1166 REGSET_COMPAT_VFP, 1167 0, VFP_STATE_SIZE, 1168 datap); 1169 break; 1170 1171 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1172 case COMPAT_PTRACE_GETHBPREGS: 1173 ret = compat_ptrace_gethbpregs(child, addr, datap); 1174 break; 1175 1176 case COMPAT_PTRACE_SETHBPREGS: 1177 ret = compat_ptrace_sethbpregs(child, addr, datap); 1178 break; 1179 #endif 1180 1181 default: 1182 ret = compat_ptrace_request(child, request, addr, 1183 data); 1184 break; 1185 } 1186 1187 return ret; 1188 } 1189 #endif /* CONFIG_COMPAT */ 1190 1191 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1192 { 1193 #ifdef CONFIG_COMPAT 1194 /* 1195 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1196 * user_aarch32_view compatible with arm32. Native ptrace requests on 1197 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1198 * access to the TLS register. 1199 */ 1200 if (is_compat_task()) 1201 return &user_aarch32_view; 1202 else if (is_compat_thread(task_thread_info(task))) 1203 return &user_aarch32_ptrace_view; 1204 #endif 1205 return &user_aarch64_view; 1206 } 1207 1208 long arch_ptrace(struct task_struct *child, long request, 1209 unsigned long addr, unsigned long data) 1210 { 1211 return ptrace_request(child, request, addr, data); 1212 } 1213 1214 enum ptrace_syscall_dir { 1215 PTRACE_SYSCALL_ENTER = 0, 1216 PTRACE_SYSCALL_EXIT, 1217 }; 1218 1219 static void tracehook_report_syscall(struct pt_regs *regs, 1220 enum ptrace_syscall_dir dir) 1221 { 1222 int regno; 1223 unsigned long saved_reg; 1224 1225 /* 1226 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1227 * used to denote syscall entry/exit: 1228 */ 1229 regno = (is_compat_task() ? 12 : 7); 1230 saved_reg = regs->regs[regno]; 1231 regs->regs[regno] = dir; 1232 1233 if (dir == PTRACE_SYSCALL_EXIT) 1234 tracehook_report_syscall_exit(regs, 0); 1235 else if (tracehook_report_syscall_entry(regs)) 1236 regs->syscallno = ~0UL; 1237 1238 regs->regs[regno] = saved_reg; 1239 } 1240 1241 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1242 { 1243 /* Do the secure computing check first; failures should be fast. */ 1244 if (secure_computing() == -1) 1245 return -1; 1246 1247 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1248 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1249 1250 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1251 trace_sys_enter(regs, regs->syscallno); 1252 1253 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1254 regs->regs[2], regs->regs[3]); 1255 1256 return regs->syscallno; 1257 } 1258 1259 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1260 { 1261 audit_syscall_exit(regs); 1262 1263 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1264 trace_sys_exit(regs, regs_return_value(regs)); 1265 1266 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1267 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1268 } 1269