1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/nospec.h> 29 #include <linux/smp.h> 30 #include <linux/ptrace.h> 31 #include <linux/user.h> 32 #include <linux/seccomp.h> 33 #include <linux/security.h> 34 #include <linux/init.h> 35 #include <linux/signal.h> 36 #include <linux/string.h> 37 #include <linux/uaccess.h> 38 #include <linux/perf_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/regset.h> 41 #include <linux/tracehook.h> 42 #include <linux/elf.h> 43 44 #include <asm/compat.h> 45 #include <asm/cpufeature.h> 46 #include <asm/debug-monitors.h> 47 #include <asm/fpsimd.h> 48 #include <asm/pgtable.h> 49 #include <asm/stacktrace.h> 50 #include <asm/syscall.h> 51 #include <asm/traps.h> 52 #include <asm/system_misc.h> 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/syscalls.h> 56 57 struct pt_regs_offset { 58 const char *name; 59 int offset; 60 }; 61 62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 63 #define REG_OFFSET_END {.name = NULL, .offset = 0} 64 #define GPR_OFFSET_NAME(r) \ 65 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 66 67 static const struct pt_regs_offset regoffset_table[] = { 68 GPR_OFFSET_NAME(0), 69 GPR_OFFSET_NAME(1), 70 GPR_OFFSET_NAME(2), 71 GPR_OFFSET_NAME(3), 72 GPR_OFFSET_NAME(4), 73 GPR_OFFSET_NAME(5), 74 GPR_OFFSET_NAME(6), 75 GPR_OFFSET_NAME(7), 76 GPR_OFFSET_NAME(8), 77 GPR_OFFSET_NAME(9), 78 GPR_OFFSET_NAME(10), 79 GPR_OFFSET_NAME(11), 80 GPR_OFFSET_NAME(12), 81 GPR_OFFSET_NAME(13), 82 GPR_OFFSET_NAME(14), 83 GPR_OFFSET_NAME(15), 84 GPR_OFFSET_NAME(16), 85 GPR_OFFSET_NAME(17), 86 GPR_OFFSET_NAME(18), 87 GPR_OFFSET_NAME(19), 88 GPR_OFFSET_NAME(20), 89 GPR_OFFSET_NAME(21), 90 GPR_OFFSET_NAME(22), 91 GPR_OFFSET_NAME(23), 92 GPR_OFFSET_NAME(24), 93 GPR_OFFSET_NAME(25), 94 GPR_OFFSET_NAME(26), 95 GPR_OFFSET_NAME(27), 96 GPR_OFFSET_NAME(28), 97 GPR_OFFSET_NAME(29), 98 GPR_OFFSET_NAME(30), 99 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 100 REG_OFFSET_NAME(sp), 101 REG_OFFSET_NAME(pc), 102 REG_OFFSET_NAME(pstate), 103 REG_OFFSET_END, 104 }; 105 106 /** 107 * regs_query_register_offset() - query register offset from its name 108 * @name: the name of a register 109 * 110 * regs_query_register_offset() returns the offset of a register in struct 111 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 112 */ 113 int regs_query_register_offset(const char *name) 114 { 115 const struct pt_regs_offset *roff; 116 117 for (roff = regoffset_table; roff->name != NULL; roff++) 118 if (!strcmp(roff->name, name)) 119 return roff->offset; 120 return -EINVAL; 121 } 122 123 /** 124 * regs_within_kernel_stack() - check the address in the stack 125 * @regs: pt_regs which contains kernel stack pointer. 126 * @addr: address which is checked. 127 * 128 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 129 * If @addr is within the kernel stack, it returns true. If not, returns false. 130 */ 131 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 132 { 133 return ((addr & ~(THREAD_SIZE - 1)) == 134 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 135 on_irq_stack(addr); 136 } 137 138 /** 139 * regs_get_kernel_stack_nth() - get Nth entry of the stack 140 * @regs: pt_regs which contains kernel stack pointer. 141 * @n: stack entry number. 142 * 143 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 144 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 145 * this returns 0. 146 */ 147 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 148 { 149 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 150 151 addr += n; 152 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 153 return *addr; 154 else 155 return 0; 156 } 157 158 /* 159 * TODO: does not yet catch signals sent when the child dies. 160 * in exit.c or in signal.c. 161 */ 162 163 /* 164 * Called by kernel/ptrace.c when detaching.. 165 */ 166 void ptrace_disable(struct task_struct *child) 167 { 168 /* 169 * This would be better off in core code, but PTRACE_DETACH has 170 * grown its fair share of arch-specific worts and changing it 171 * is likely to cause regressions on obscure architectures. 172 */ 173 user_disable_single_step(child); 174 } 175 176 #ifdef CONFIG_HAVE_HW_BREAKPOINT 177 /* 178 * Handle hitting a HW-breakpoint. 179 */ 180 static void ptrace_hbptriggered(struct perf_event *bp, 181 struct perf_sample_data *data, 182 struct pt_regs *regs) 183 { 184 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 185 siginfo_t info; 186 187 clear_siginfo(&info); 188 info.si_signo = SIGTRAP; 189 info.si_errno = 0; 190 info.si_code = TRAP_HWBKPT; 191 info.si_addr = (void __user *)(bkpt->trigger); 192 193 #ifdef CONFIG_COMPAT 194 if (is_compat_task()) { 195 int si_errno = 0; 196 int i; 197 198 for (i = 0; i < ARM_MAX_BRP; ++i) { 199 if (current->thread.debug.hbp_break[i] == bp) { 200 si_errno = (i << 1) + 1; 201 break; 202 } 203 } 204 205 for (i = 0; i < ARM_MAX_WRP; ++i) { 206 if (current->thread.debug.hbp_watch[i] == bp) { 207 si_errno = -((i << 1) + 1); 208 break; 209 } 210 } 211 force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); 212 } 213 #endif 214 arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current); 215 } 216 217 /* 218 * Unregister breakpoints from this task and reset the pointers in 219 * the thread_struct. 220 */ 221 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 222 { 223 int i; 224 struct thread_struct *t = &tsk->thread; 225 226 for (i = 0; i < ARM_MAX_BRP; i++) { 227 if (t->debug.hbp_break[i]) { 228 unregister_hw_breakpoint(t->debug.hbp_break[i]); 229 t->debug.hbp_break[i] = NULL; 230 } 231 } 232 233 for (i = 0; i < ARM_MAX_WRP; i++) { 234 if (t->debug.hbp_watch[i]) { 235 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 236 t->debug.hbp_watch[i] = NULL; 237 } 238 } 239 } 240 241 void ptrace_hw_copy_thread(struct task_struct *tsk) 242 { 243 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 244 } 245 246 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 247 struct task_struct *tsk, 248 unsigned long idx) 249 { 250 struct perf_event *bp = ERR_PTR(-EINVAL); 251 252 switch (note_type) { 253 case NT_ARM_HW_BREAK: 254 if (idx >= ARM_MAX_BRP) 255 goto out; 256 idx = array_index_nospec(idx, ARM_MAX_BRP); 257 bp = tsk->thread.debug.hbp_break[idx]; 258 break; 259 case NT_ARM_HW_WATCH: 260 if (idx >= ARM_MAX_WRP) 261 goto out; 262 idx = array_index_nospec(idx, ARM_MAX_WRP); 263 bp = tsk->thread.debug.hbp_watch[idx]; 264 break; 265 } 266 267 out: 268 return bp; 269 } 270 271 static int ptrace_hbp_set_event(unsigned int note_type, 272 struct task_struct *tsk, 273 unsigned long idx, 274 struct perf_event *bp) 275 { 276 int err = -EINVAL; 277 278 switch (note_type) { 279 case NT_ARM_HW_BREAK: 280 if (idx < ARM_MAX_BRP) { 281 tsk->thread.debug.hbp_break[idx] = bp; 282 err = 0; 283 } 284 break; 285 case NT_ARM_HW_WATCH: 286 if (idx < ARM_MAX_WRP) { 287 tsk->thread.debug.hbp_watch[idx] = bp; 288 err = 0; 289 } 290 break; 291 } 292 293 return err; 294 } 295 296 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 297 struct task_struct *tsk, 298 unsigned long idx) 299 { 300 struct perf_event *bp; 301 struct perf_event_attr attr; 302 int err, type; 303 304 switch (note_type) { 305 case NT_ARM_HW_BREAK: 306 type = HW_BREAKPOINT_X; 307 break; 308 case NT_ARM_HW_WATCH: 309 type = HW_BREAKPOINT_RW; 310 break; 311 default: 312 return ERR_PTR(-EINVAL); 313 } 314 315 ptrace_breakpoint_init(&attr); 316 317 /* 318 * Initialise fields to sane defaults 319 * (i.e. values that will pass validation). 320 */ 321 attr.bp_addr = 0; 322 attr.bp_len = HW_BREAKPOINT_LEN_4; 323 attr.bp_type = type; 324 attr.disabled = 1; 325 326 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 327 if (IS_ERR(bp)) 328 return bp; 329 330 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 331 if (err) 332 return ERR_PTR(err); 333 334 return bp; 335 } 336 337 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 338 struct arch_hw_breakpoint_ctrl ctrl, 339 struct perf_event_attr *attr) 340 { 341 int err, len, type, offset, disabled = !ctrl.enabled; 342 343 attr->disabled = disabled; 344 if (disabled) 345 return 0; 346 347 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 348 if (err) 349 return err; 350 351 switch (note_type) { 352 case NT_ARM_HW_BREAK: 353 if ((type & HW_BREAKPOINT_X) != type) 354 return -EINVAL; 355 break; 356 case NT_ARM_HW_WATCH: 357 if ((type & HW_BREAKPOINT_RW) != type) 358 return -EINVAL; 359 break; 360 default: 361 return -EINVAL; 362 } 363 364 attr->bp_len = len; 365 attr->bp_type = type; 366 attr->bp_addr += offset; 367 368 return 0; 369 } 370 371 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 372 { 373 u8 num; 374 u32 reg = 0; 375 376 switch (note_type) { 377 case NT_ARM_HW_BREAK: 378 num = hw_breakpoint_slots(TYPE_INST); 379 break; 380 case NT_ARM_HW_WATCH: 381 num = hw_breakpoint_slots(TYPE_DATA); 382 break; 383 default: 384 return -EINVAL; 385 } 386 387 reg |= debug_monitors_arch(); 388 reg <<= 8; 389 reg |= num; 390 391 *info = reg; 392 return 0; 393 } 394 395 static int ptrace_hbp_get_ctrl(unsigned int note_type, 396 struct task_struct *tsk, 397 unsigned long idx, 398 u32 *ctrl) 399 { 400 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 401 402 if (IS_ERR(bp)) 403 return PTR_ERR(bp); 404 405 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 406 return 0; 407 } 408 409 static int ptrace_hbp_get_addr(unsigned int note_type, 410 struct task_struct *tsk, 411 unsigned long idx, 412 u64 *addr) 413 { 414 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 415 416 if (IS_ERR(bp)) 417 return PTR_ERR(bp); 418 419 *addr = bp ? counter_arch_bp(bp)->address : 0; 420 return 0; 421 } 422 423 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 424 struct task_struct *tsk, 425 unsigned long idx) 426 { 427 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 428 429 if (!bp) 430 bp = ptrace_hbp_create(note_type, tsk, idx); 431 432 return bp; 433 } 434 435 static int ptrace_hbp_set_ctrl(unsigned int note_type, 436 struct task_struct *tsk, 437 unsigned long idx, 438 u32 uctrl) 439 { 440 int err; 441 struct perf_event *bp; 442 struct perf_event_attr attr; 443 struct arch_hw_breakpoint_ctrl ctrl; 444 445 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 446 if (IS_ERR(bp)) { 447 err = PTR_ERR(bp); 448 return err; 449 } 450 451 attr = bp->attr; 452 decode_ctrl_reg(uctrl, &ctrl); 453 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 454 if (err) 455 return err; 456 457 return modify_user_hw_breakpoint(bp, &attr); 458 } 459 460 static int ptrace_hbp_set_addr(unsigned int note_type, 461 struct task_struct *tsk, 462 unsigned long idx, 463 u64 addr) 464 { 465 int err; 466 struct perf_event *bp; 467 struct perf_event_attr attr; 468 469 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 470 if (IS_ERR(bp)) { 471 err = PTR_ERR(bp); 472 return err; 473 } 474 475 attr = bp->attr; 476 attr.bp_addr = addr; 477 err = modify_user_hw_breakpoint(bp, &attr); 478 return err; 479 } 480 481 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 482 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 483 #define PTRACE_HBP_PAD_SZ sizeof(u32) 484 485 static int hw_break_get(struct task_struct *target, 486 const struct user_regset *regset, 487 unsigned int pos, unsigned int count, 488 void *kbuf, void __user *ubuf) 489 { 490 unsigned int note_type = regset->core_note_type; 491 int ret, idx = 0, offset, limit; 492 u32 info, ctrl; 493 u64 addr; 494 495 /* Resource info */ 496 ret = ptrace_hbp_get_resource_info(note_type, &info); 497 if (ret) 498 return ret; 499 500 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 501 sizeof(info)); 502 if (ret) 503 return ret; 504 505 /* Pad */ 506 offset = offsetof(struct user_hwdebug_state, pad); 507 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 508 offset + PTRACE_HBP_PAD_SZ); 509 if (ret) 510 return ret; 511 512 /* (address, ctrl) registers */ 513 offset = offsetof(struct user_hwdebug_state, dbg_regs); 514 limit = regset->n * regset->size; 515 while (count && offset < limit) { 516 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 517 if (ret) 518 return ret; 519 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 520 offset, offset + PTRACE_HBP_ADDR_SZ); 521 if (ret) 522 return ret; 523 offset += PTRACE_HBP_ADDR_SZ; 524 525 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 526 if (ret) 527 return ret; 528 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 529 offset, offset + PTRACE_HBP_CTRL_SZ); 530 if (ret) 531 return ret; 532 offset += PTRACE_HBP_CTRL_SZ; 533 534 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 535 offset, 536 offset + PTRACE_HBP_PAD_SZ); 537 if (ret) 538 return ret; 539 offset += PTRACE_HBP_PAD_SZ; 540 idx++; 541 } 542 543 return 0; 544 } 545 546 static int hw_break_set(struct task_struct *target, 547 const struct user_regset *regset, 548 unsigned int pos, unsigned int count, 549 const void *kbuf, const void __user *ubuf) 550 { 551 unsigned int note_type = regset->core_note_type; 552 int ret, idx = 0, offset, limit; 553 u32 ctrl; 554 u64 addr; 555 556 /* Resource info and pad */ 557 offset = offsetof(struct user_hwdebug_state, dbg_regs); 558 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 559 if (ret) 560 return ret; 561 562 /* (address, ctrl) registers */ 563 limit = regset->n * regset->size; 564 while (count && offset < limit) { 565 if (count < PTRACE_HBP_ADDR_SZ) 566 return -EINVAL; 567 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 568 offset, offset + PTRACE_HBP_ADDR_SZ); 569 if (ret) 570 return ret; 571 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 572 if (ret) 573 return ret; 574 offset += PTRACE_HBP_ADDR_SZ; 575 576 if (!count) 577 break; 578 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 579 offset, offset + PTRACE_HBP_CTRL_SZ); 580 if (ret) 581 return ret; 582 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 583 if (ret) 584 return ret; 585 offset += PTRACE_HBP_CTRL_SZ; 586 587 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 588 offset, 589 offset + PTRACE_HBP_PAD_SZ); 590 if (ret) 591 return ret; 592 offset += PTRACE_HBP_PAD_SZ; 593 idx++; 594 } 595 596 return 0; 597 } 598 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 599 600 static int gpr_get(struct task_struct *target, 601 const struct user_regset *regset, 602 unsigned int pos, unsigned int count, 603 void *kbuf, void __user *ubuf) 604 { 605 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 606 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 607 } 608 609 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 610 unsigned int pos, unsigned int count, 611 const void *kbuf, const void __user *ubuf) 612 { 613 int ret; 614 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 615 616 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 617 if (ret) 618 return ret; 619 620 if (!valid_user_regs(&newregs, target)) 621 return -EINVAL; 622 623 task_pt_regs(target)->user_regs = newregs; 624 return 0; 625 } 626 627 /* 628 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 629 */ 630 static int __fpr_get(struct task_struct *target, 631 const struct user_regset *regset, 632 unsigned int pos, unsigned int count, 633 void *kbuf, void __user *ubuf, unsigned int start_pos) 634 { 635 struct user_fpsimd_state *uregs; 636 637 sve_sync_to_fpsimd(target); 638 639 uregs = &target->thread.uw.fpsimd_state; 640 641 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 642 start_pos, start_pos + sizeof(*uregs)); 643 } 644 645 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 646 unsigned int pos, unsigned int count, 647 void *kbuf, void __user *ubuf) 648 { 649 if (target == current) 650 fpsimd_preserve_current_state(); 651 652 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); 653 } 654 655 static int __fpr_set(struct task_struct *target, 656 const struct user_regset *regset, 657 unsigned int pos, unsigned int count, 658 const void *kbuf, const void __user *ubuf, 659 unsigned int start_pos) 660 { 661 int ret; 662 struct user_fpsimd_state newstate; 663 664 /* 665 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 666 * short copyin can't resurrect stale data. 667 */ 668 sve_sync_to_fpsimd(target); 669 670 newstate = target->thread.uw.fpsimd_state; 671 672 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 673 start_pos, start_pos + sizeof(newstate)); 674 if (ret) 675 return ret; 676 677 target->thread.uw.fpsimd_state = newstate; 678 679 return ret; 680 } 681 682 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 683 unsigned int pos, unsigned int count, 684 const void *kbuf, const void __user *ubuf) 685 { 686 int ret; 687 688 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 689 if (ret) 690 return ret; 691 692 sve_sync_from_fpsimd_zeropad(target); 693 fpsimd_flush_task_state(target); 694 695 return ret; 696 } 697 698 static int tls_get(struct task_struct *target, const struct user_regset *regset, 699 unsigned int pos, unsigned int count, 700 void *kbuf, void __user *ubuf) 701 { 702 unsigned long *tls = &target->thread.uw.tp_value; 703 704 if (target == current) 705 tls_preserve_current_state(); 706 707 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 708 } 709 710 static int tls_set(struct task_struct *target, const struct user_regset *regset, 711 unsigned int pos, unsigned int count, 712 const void *kbuf, const void __user *ubuf) 713 { 714 int ret; 715 unsigned long tls = target->thread.uw.tp_value; 716 717 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 718 if (ret) 719 return ret; 720 721 target->thread.uw.tp_value = tls; 722 return ret; 723 } 724 725 static int system_call_get(struct task_struct *target, 726 const struct user_regset *regset, 727 unsigned int pos, unsigned int count, 728 void *kbuf, void __user *ubuf) 729 { 730 int syscallno = task_pt_regs(target)->syscallno; 731 732 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 733 &syscallno, 0, -1); 734 } 735 736 static int system_call_set(struct task_struct *target, 737 const struct user_regset *regset, 738 unsigned int pos, unsigned int count, 739 const void *kbuf, const void __user *ubuf) 740 { 741 int syscallno = task_pt_regs(target)->syscallno; 742 int ret; 743 744 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 745 if (ret) 746 return ret; 747 748 task_pt_regs(target)->syscallno = syscallno; 749 return ret; 750 } 751 752 #ifdef CONFIG_ARM64_SVE 753 754 static void sve_init_header_from_task(struct user_sve_header *header, 755 struct task_struct *target) 756 { 757 unsigned int vq; 758 759 memset(header, 0, sizeof(*header)); 760 761 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 762 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 763 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 764 header->flags |= SVE_PT_VL_INHERIT; 765 766 header->vl = target->thread.sve_vl; 767 vq = sve_vq_from_vl(header->vl); 768 769 header->max_vl = sve_max_vl; 770 header->size = SVE_PT_SIZE(vq, header->flags); 771 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 772 SVE_PT_REGS_SVE); 773 } 774 775 static unsigned int sve_size_from_header(struct user_sve_header const *header) 776 { 777 return ALIGN(header->size, SVE_VQ_BYTES); 778 } 779 780 static unsigned int sve_get_size(struct task_struct *target, 781 const struct user_regset *regset) 782 { 783 struct user_sve_header header; 784 785 if (!system_supports_sve()) 786 return 0; 787 788 sve_init_header_from_task(&header, target); 789 return sve_size_from_header(&header); 790 } 791 792 static int sve_get(struct task_struct *target, 793 const struct user_regset *regset, 794 unsigned int pos, unsigned int count, 795 void *kbuf, void __user *ubuf) 796 { 797 int ret; 798 struct user_sve_header header; 799 unsigned int vq; 800 unsigned long start, end; 801 802 if (!system_supports_sve()) 803 return -EINVAL; 804 805 /* Header */ 806 sve_init_header_from_task(&header, target); 807 vq = sve_vq_from_vl(header.vl); 808 809 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, 810 0, sizeof(header)); 811 if (ret) 812 return ret; 813 814 if (target == current) 815 fpsimd_preserve_current_state(); 816 817 /* Registers: FPSIMD-only case */ 818 819 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 820 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 821 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 822 SVE_PT_FPSIMD_OFFSET); 823 824 /* Otherwise: full SVE case */ 825 826 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 827 start = SVE_PT_SVE_OFFSET; 828 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 829 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 830 target->thread.sve_state, 831 start, end); 832 if (ret) 833 return ret; 834 835 start = end; 836 end = SVE_PT_SVE_FPSR_OFFSET(vq); 837 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 838 start, end); 839 if (ret) 840 return ret; 841 842 /* 843 * Copy fpsr, and fpcr which must follow contiguously in 844 * struct fpsimd_state: 845 */ 846 start = end; 847 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 848 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 849 &target->thread.uw.fpsimd_state.fpsr, 850 start, end); 851 if (ret) 852 return ret; 853 854 start = end; 855 end = sve_size_from_header(&header); 856 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 857 start, end); 858 } 859 860 static int sve_set(struct task_struct *target, 861 const struct user_regset *regset, 862 unsigned int pos, unsigned int count, 863 const void *kbuf, const void __user *ubuf) 864 { 865 int ret; 866 struct user_sve_header header; 867 unsigned int vq; 868 unsigned long start, end; 869 870 if (!system_supports_sve()) 871 return -EINVAL; 872 873 /* Header */ 874 if (count < sizeof(header)) 875 return -EINVAL; 876 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 877 0, sizeof(header)); 878 if (ret) 879 goto out; 880 881 /* 882 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 883 * sve_set_vector_length(), which will also validate them for us: 884 */ 885 ret = sve_set_vector_length(target, header.vl, 886 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 887 if (ret) 888 goto out; 889 890 /* Actual VL set may be less than the user asked for: */ 891 vq = sve_vq_from_vl(target->thread.sve_vl); 892 893 /* Registers: FPSIMD-only case */ 894 895 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 896 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 897 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 898 SVE_PT_FPSIMD_OFFSET); 899 clear_tsk_thread_flag(target, TIF_SVE); 900 goto out; 901 } 902 903 /* Otherwise: full SVE case */ 904 905 /* 906 * If setting a different VL from the requested VL and there is 907 * register data, the data layout will be wrong: don't even 908 * try to set the registers in this case. 909 */ 910 if (count && vq != sve_vq_from_vl(header.vl)) { 911 ret = -EIO; 912 goto out; 913 } 914 915 sve_alloc(target); 916 917 /* 918 * Ensure target->thread.sve_state is up to date with target's 919 * FPSIMD regs, so that a short copyin leaves trailing registers 920 * unmodified. 921 */ 922 fpsimd_sync_to_sve(target); 923 set_tsk_thread_flag(target, TIF_SVE); 924 925 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 926 start = SVE_PT_SVE_OFFSET; 927 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 928 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 929 target->thread.sve_state, 930 start, end); 931 if (ret) 932 goto out; 933 934 start = end; 935 end = SVE_PT_SVE_FPSR_OFFSET(vq); 936 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 937 start, end); 938 if (ret) 939 goto out; 940 941 /* 942 * Copy fpsr, and fpcr which must follow contiguously in 943 * struct fpsimd_state: 944 */ 945 start = end; 946 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 947 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 948 &target->thread.uw.fpsimd_state.fpsr, 949 start, end); 950 951 out: 952 fpsimd_flush_task_state(target); 953 return ret; 954 } 955 956 #endif /* CONFIG_ARM64_SVE */ 957 958 enum aarch64_regset { 959 REGSET_GPR, 960 REGSET_FPR, 961 REGSET_TLS, 962 #ifdef CONFIG_HAVE_HW_BREAKPOINT 963 REGSET_HW_BREAK, 964 REGSET_HW_WATCH, 965 #endif 966 REGSET_SYSTEM_CALL, 967 #ifdef CONFIG_ARM64_SVE 968 REGSET_SVE, 969 #endif 970 }; 971 972 static const struct user_regset aarch64_regsets[] = { 973 [REGSET_GPR] = { 974 .core_note_type = NT_PRSTATUS, 975 .n = sizeof(struct user_pt_regs) / sizeof(u64), 976 .size = sizeof(u64), 977 .align = sizeof(u64), 978 .get = gpr_get, 979 .set = gpr_set 980 }, 981 [REGSET_FPR] = { 982 .core_note_type = NT_PRFPREG, 983 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 984 /* 985 * We pretend we have 32-bit registers because the fpsr and 986 * fpcr are 32-bits wide. 987 */ 988 .size = sizeof(u32), 989 .align = sizeof(u32), 990 .get = fpr_get, 991 .set = fpr_set 992 }, 993 [REGSET_TLS] = { 994 .core_note_type = NT_ARM_TLS, 995 .n = 1, 996 .size = sizeof(void *), 997 .align = sizeof(void *), 998 .get = tls_get, 999 .set = tls_set, 1000 }, 1001 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1002 [REGSET_HW_BREAK] = { 1003 .core_note_type = NT_ARM_HW_BREAK, 1004 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1005 .size = sizeof(u32), 1006 .align = sizeof(u32), 1007 .get = hw_break_get, 1008 .set = hw_break_set, 1009 }, 1010 [REGSET_HW_WATCH] = { 1011 .core_note_type = NT_ARM_HW_WATCH, 1012 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1013 .size = sizeof(u32), 1014 .align = sizeof(u32), 1015 .get = hw_break_get, 1016 .set = hw_break_set, 1017 }, 1018 #endif 1019 [REGSET_SYSTEM_CALL] = { 1020 .core_note_type = NT_ARM_SYSTEM_CALL, 1021 .n = 1, 1022 .size = sizeof(int), 1023 .align = sizeof(int), 1024 .get = system_call_get, 1025 .set = system_call_set, 1026 }, 1027 #ifdef CONFIG_ARM64_SVE 1028 [REGSET_SVE] = { /* Scalable Vector Extension */ 1029 .core_note_type = NT_ARM_SVE, 1030 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1031 SVE_VQ_BYTES), 1032 .size = SVE_VQ_BYTES, 1033 .align = SVE_VQ_BYTES, 1034 .get = sve_get, 1035 .set = sve_set, 1036 .get_size = sve_get_size, 1037 }, 1038 #endif 1039 }; 1040 1041 static const struct user_regset_view user_aarch64_view = { 1042 .name = "aarch64", .e_machine = EM_AARCH64, 1043 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1044 }; 1045 1046 #ifdef CONFIG_COMPAT 1047 enum compat_regset { 1048 REGSET_COMPAT_GPR, 1049 REGSET_COMPAT_VFP, 1050 }; 1051 1052 static int compat_gpr_get(struct task_struct *target, 1053 const struct user_regset *regset, 1054 unsigned int pos, unsigned int count, 1055 void *kbuf, void __user *ubuf) 1056 { 1057 int ret = 0; 1058 unsigned int i, start, num_regs; 1059 1060 /* Calculate the number of AArch32 registers contained in count */ 1061 num_regs = count / regset->size; 1062 1063 /* Convert pos into an register number */ 1064 start = pos / regset->size; 1065 1066 if (start + num_regs > regset->n) 1067 return -EIO; 1068 1069 for (i = 0; i < num_regs; ++i) { 1070 unsigned int idx = start + i; 1071 compat_ulong_t reg; 1072 1073 switch (idx) { 1074 case 15: 1075 reg = task_pt_regs(target)->pc; 1076 break; 1077 case 16: 1078 reg = task_pt_regs(target)->pstate; 1079 break; 1080 case 17: 1081 reg = task_pt_regs(target)->orig_x0; 1082 break; 1083 default: 1084 reg = task_pt_regs(target)->regs[idx]; 1085 } 1086 1087 if (kbuf) { 1088 memcpy(kbuf, ®, sizeof(reg)); 1089 kbuf += sizeof(reg); 1090 } else { 1091 ret = copy_to_user(ubuf, ®, sizeof(reg)); 1092 if (ret) { 1093 ret = -EFAULT; 1094 break; 1095 } 1096 1097 ubuf += sizeof(reg); 1098 } 1099 } 1100 1101 return ret; 1102 } 1103 1104 static int compat_gpr_set(struct task_struct *target, 1105 const struct user_regset *regset, 1106 unsigned int pos, unsigned int count, 1107 const void *kbuf, const void __user *ubuf) 1108 { 1109 struct pt_regs newregs; 1110 int ret = 0; 1111 unsigned int i, start, num_regs; 1112 1113 /* Calculate the number of AArch32 registers contained in count */ 1114 num_regs = count / regset->size; 1115 1116 /* Convert pos into an register number */ 1117 start = pos / regset->size; 1118 1119 if (start + num_regs > regset->n) 1120 return -EIO; 1121 1122 newregs = *task_pt_regs(target); 1123 1124 for (i = 0; i < num_regs; ++i) { 1125 unsigned int idx = start + i; 1126 compat_ulong_t reg; 1127 1128 if (kbuf) { 1129 memcpy(®, kbuf, sizeof(reg)); 1130 kbuf += sizeof(reg); 1131 } else { 1132 ret = copy_from_user(®, ubuf, sizeof(reg)); 1133 if (ret) { 1134 ret = -EFAULT; 1135 break; 1136 } 1137 1138 ubuf += sizeof(reg); 1139 } 1140 1141 switch (idx) { 1142 case 15: 1143 newregs.pc = reg; 1144 break; 1145 case 16: 1146 newregs.pstate = reg; 1147 break; 1148 case 17: 1149 newregs.orig_x0 = reg; 1150 break; 1151 default: 1152 newregs.regs[idx] = reg; 1153 } 1154 1155 } 1156 1157 if (valid_user_regs(&newregs.user_regs, target)) 1158 *task_pt_regs(target) = newregs; 1159 else 1160 ret = -EINVAL; 1161 1162 return ret; 1163 } 1164 1165 static int compat_vfp_get(struct task_struct *target, 1166 const struct user_regset *regset, 1167 unsigned int pos, unsigned int count, 1168 void *kbuf, void __user *ubuf) 1169 { 1170 struct user_fpsimd_state *uregs; 1171 compat_ulong_t fpscr; 1172 int ret, vregs_end_pos; 1173 1174 uregs = &target->thread.uw.fpsimd_state; 1175 1176 if (target == current) 1177 fpsimd_preserve_current_state(); 1178 1179 /* 1180 * The VFP registers are packed into the fpsimd_state, so they all sit 1181 * nicely together for us. We just need to create the fpscr separately. 1182 */ 1183 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1184 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 1185 0, vregs_end_pos); 1186 1187 if (count && !ret) { 1188 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1189 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1190 1191 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 1192 vregs_end_pos, VFP_STATE_SIZE); 1193 } 1194 1195 return ret; 1196 } 1197 1198 static int compat_vfp_set(struct task_struct *target, 1199 const struct user_regset *regset, 1200 unsigned int pos, unsigned int count, 1201 const void *kbuf, const void __user *ubuf) 1202 { 1203 struct user_fpsimd_state *uregs; 1204 compat_ulong_t fpscr; 1205 int ret, vregs_end_pos; 1206 1207 uregs = &target->thread.uw.fpsimd_state; 1208 1209 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1210 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1211 vregs_end_pos); 1212 1213 if (count && !ret) { 1214 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1215 vregs_end_pos, VFP_STATE_SIZE); 1216 if (!ret) { 1217 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1218 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1219 } 1220 } 1221 1222 fpsimd_flush_task_state(target); 1223 return ret; 1224 } 1225 1226 static int compat_tls_get(struct task_struct *target, 1227 const struct user_regset *regset, unsigned int pos, 1228 unsigned int count, void *kbuf, void __user *ubuf) 1229 { 1230 compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value; 1231 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1232 } 1233 1234 static int compat_tls_set(struct task_struct *target, 1235 const struct user_regset *regset, unsigned int pos, 1236 unsigned int count, const void *kbuf, 1237 const void __user *ubuf) 1238 { 1239 int ret; 1240 compat_ulong_t tls = target->thread.uw.tp_value; 1241 1242 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1243 if (ret) 1244 return ret; 1245 1246 target->thread.uw.tp_value = tls; 1247 return ret; 1248 } 1249 1250 static const struct user_regset aarch32_regsets[] = { 1251 [REGSET_COMPAT_GPR] = { 1252 .core_note_type = NT_PRSTATUS, 1253 .n = COMPAT_ELF_NGREG, 1254 .size = sizeof(compat_elf_greg_t), 1255 .align = sizeof(compat_elf_greg_t), 1256 .get = compat_gpr_get, 1257 .set = compat_gpr_set 1258 }, 1259 [REGSET_COMPAT_VFP] = { 1260 .core_note_type = NT_ARM_VFP, 1261 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1262 .size = sizeof(compat_ulong_t), 1263 .align = sizeof(compat_ulong_t), 1264 .get = compat_vfp_get, 1265 .set = compat_vfp_set 1266 }, 1267 }; 1268 1269 static const struct user_regset_view user_aarch32_view = { 1270 .name = "aarch32", .e_machine = EM_ARM, 1271 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1272 }; 1273 1274 static const struct user_regset aarch32_ptrace_regsets[] = { 1275 [REGSET_GPR] = { 1276 .core_note_type = NT_PRSTATUS, 1277 .n = COMPAT_ELF_NGREG, 1278 .size = sizeof(compat_elf_greg_t), 1279 .align = sizeof(compat_elf_greg_t), 1280 .get = compat_gpr_get, 1281 .set = compat_gpr_set 1282 }, 1283 [REGSET_FPR] = { 1284 .core_note_type = NT_ARM_VFP, 1285 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1286 .size = sizeof(compat_ulong_t), 1287 .align = sizeof(compat_ulong_t), 1288 .get = compat_vfp_get, 1289 .set = compat_vfp_set 1290 }, 1291 [REGSET_TLS] = { 1292 .core_note_type = NT_ARM_TLS, 1293 .n = 1, 1294 .size = sizeof(compat_ulong_t), 1295 .align = sizeof(compat_ulong_t), 1296 .get = compat_tls_get, 1297 .set = compat_tls_set, 1298 }, 1299 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1300 [REGSET_HW_BREAK] = { 1301 .core_note_type = NT_ARM_HW_BREAK, 1302 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1303 .size = sizeof(u32), 1304 .align = sizeof(u32), 1305 .get = hw_break_get, 1306 .set = hw_break_set, 1307 }, 1308 [REGSET_HW_WATCH] = { 1309 .core_note_type = NT_ARM_HW_WATCH, 1310 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1311 .size = sizeof(u32), 1312 .align = sizeof(u32), 1313 .get = hw_break_get, 1314 .set = hw_break_set, 1315 }, 1316 #endif 1317 [REGSET_SYSTEM_CALL] = { 1318 .core_note_type = NT_ARM_SYSTEM_CALL, 1319 .n = 1, 1320 .size = sizeof(int), 1321 .align = sizeof(int), 1322 .get = system_call_get, 1323 .set = system_call_set, 1324 }, 1325 }; 1326 1327 static const struct user_regset_view user_aarch32_ptrace_view = { 1328 .name = "aarch32", .e_machine = EM_ARM, 1329 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1330 }; 1331 1332 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1333 compat_ulong_t __user *ret) 1334 { 1335 compat_ulong_t tmp; 1336 1337 if (off & 3) 1338 return -EIO; 1339 1340 if (off == COMPAT_PT_TEXT_ADDR) 1341 tmp = tsk->mm->start_code; 1342 else if (off == COMPAT_PT_DATA_ADDR) 1343 tmp = tsk->mm->start_data; 1344 else if (off == COMPAT_PT_TEXT_END_ADDR) 1345 tmp = tsk->mm->end_code; 1346 else if (off < sizeof(compat_elf_gregset_t)) 1347 return copy_regset_to_user(tsk, &user_aarch32_view, 1348 REGSET_COMPAT_GPR, off, 1349 sizeof(compat_ulong_t), ret); 1350 else if (off >= COMPAT_USER_SZ) 1351 return -EIO; 1352 else 1353 tmp = 0; 1354 1355 return put_user(tmp, ret); 1356 } 1357 1358 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1359 compat_ulong_t val) 1360 { 1361 int ret; 1362 mm_segment_t old_fs = get_fs(); 1363 1364 if (off & 3 || off >= COMPAT_USER_SZ) 1365 return -EIO; 1366 1367 if (off >= sizeof(compat_elf_gregset_t)) 1368 return 0; 1369 1370 set_fs(KERNEL_DS); 1371 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1372 REGSET_COMPAT_GPR, off, 1373 sizeof(compat_ulong_t), 1374 &val); 1375 set_fs(old_fs); 1376 1377 return ret; 1378 } 1379 1380 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1381 1382 /* 1383 * Convert a virtual register number into an index for a thread_info 1384 * breakpoint array. Breakpoints are identified using positive numbers 1385 * whilst watchpoints are negative. The registers are laid out as pairs 1386 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1387 * Register 0 is reserved for describing resource information. 1388 */ 1389 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1390 { 1391 return (abs(num) - 1) >> 1; 1392 } 1393 1394 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1395 { 1396 u8 num_brps, num_wrps, debug_arch, wp_len; 1397 u32 reg = 0; 1398 1399 num_brps = hw_breakpoint_slots(TYPE_INST); 1400 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1401 1402 debug_arch = debug_monitors_arch(); 1403 wp_len = 8; 1404 reg |= debug_arch; 1405 reg <<= 8; 1406 reg |= wp_len; 1407 reg <<= 8; 1408 reg |= num_wrps; 1409 reg <<= 8; 1410 reg |= num_brps; 1411 1412 *kdata = reg; 1413 return 0; 1414 } 1415 1416 static int compat_ptrace_hbp_get(unsigned int note_type, 1417 struct task_struct *tsk, 1418 compat_long_t num, 1419 u32 *kdata) 1420 { 1421 u64 addr = 0; 1422 u32 ctrl = 0; 1423 1424 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1425 1426 if (num & 1) { 1427 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1428 *kdata = (u32)addr; 1429 } else { 1430 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1431 *kdata = ctrl; 1432 } 1433 1434 return err; 1435 } 1436 1437 static int compat_ptrace_hbp_set(unsigned int note_type, 1438 struct task_struct *tsk, 1439 compat_long_t num, 1440 u32 *kdata) 1441 { 1442 u64 addr; 1443 u32 ctrl; 1444 1445 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1446 1447 if (num & 1) { 1448 addr = *kdata; 1449 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1450 } else { 1451 ctrl = *kdata; 1452 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1453 } 1454 1455 return err; 1456 } 1457 1458 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1459 compat_ulong_t __user *data) 1460 { 1461 int ret; 1462 u32 kdata; 1463 1464 /* Watchpoint */ 1465 if (num < 0) { 1466 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1467 /* Resource info */ 1468 } else if (num == 0) { 1469 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1470 /* Breakpoint */ 1471 } else { 1472 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1473 } 1474 1475 if (!ret) 1476 ret = put_user(kdata, data); 1477 1478 return ret; 1479 } 1480 1481 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1482 compat_ulong_t __user *data) 1483 { 1484 int ret; 1485 u32 kdata = 0; 1486 1487 if (num == 0) 1488 return 0; 1489 1490 ret = get_user(kdata, data); 1491 if (ret) 1492 return ret; 1493 1494 if (num < 0) 1495 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1496 else 1497 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1498 1499 return ret; 1500 } 1501 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1502 1503 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1504 compat_ulong_t caddr, compat_ulong_t cdata) 1505 { 1506 unsigned long addr = caddr; 1507 unsigned long data = cdata; 1508 void __user *datap = compat_ptr(data); 1509 int ret; 1510 1511 switch (request) { 1512 case PTRACE_PEEKUSR: 1513 ret = compat_ptrace_read_user(child, addr, datap); 1514 break; 1515 1516 case PTRACE_POKEUSR: 1517 ret = compat_ptrace_write_user(child, addr, data); 1518 break; 1519 1520 case COMPAT_PTRACE_GETREGS: 1521 ret = copy_regset_to_user(child, 1522 &user_aarch32_view, 1523 REGSET_COMPAT_GPR, 1524 0, sizeof(compat_elf_gregset_t), 1525 datap); 1526 break; 1527 1528 case COMPAT_PTRACE_SETREGS: 1529 ret = copy_regset_from_user(child, 1530 &user_aarch32_view, 1531 REGSET_COMPAT_GPR, 1532 0, sizeof(compat_elf_gregset_t), 1533 datap); 1534 break; 1535 1536 case COMPAT_PTRACE_GET_THREAD_AREA: 1537 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1538 (compat_ulong_t __user *)datap); 1539 break; 1540 1541 case COMPAT_PTRACE_SET_SYSCALL: 1542 task_pt_regs(child)->syscallno = data; 1543 ret = 0; 1544 break; 1545 1546 case COMPAT_PTRACE_GETVFPREGS: 1547 ret = copy_regset_to_user(child, 1548 &user_aarch32_view, 1549 REGSET_COMPAT_VFP, 1550 0, VFP_STATE_SIZE, 1551 datap); 1552 break; 1553 1554 case COMPAT_PTRACE_SETVFPREGS: 1555 ret = copy_regset_from_user(child, 1556 &user_aarch32_view, 1557 REGSET_COMPAT_VFP, 1558 0, VFP_STATE_SIZE, 1559 datap); 1560 break; 1561 1562 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1563 case COMPAT_PTRACE_GETHBPREGS: 1564 ret = compat_ptrace_gethbpregs(child, addr, datap); 1565 break; 1566 1567 case COMPAT_PTRACE_SETHBPREGS: 1568 ret = compat_ptrace_sethbpregs(child, addr, datap); 1569 break; 1570 #endif 1571 1572 default: 1573 ret = compat_ptrace_request(child, request, addr, 1574 data); 1575 break; 1576 } 1577 1578 return ret; 1579 } 1580 #endif /* CONFIG_COMPAT */ 1581 1582 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1583 { 1584 #ifdef CONFIG_COMPAT 1585 /* 1586 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1587 * user_aarch32_view compatible with arm32. Native ptrace requests on 1588 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1589 * access to the TLS register. 1590 */ 1591 if (is_compat_task()) 1592 return &user_aarch32_view; 1593 else if (is_compat_thread(task_thread_info(task))) 1594 return &user_aarch32_ptrace_view; 1595 #endif 1596 return &user_aarch64_view; 1597 } 1598 1599 long arch_ptrace(struct task_struct *child, long request, 1600 unsigned long addr, unsigned long data) 1601 { 1602 return ptrace_request(child, request, addr, data); 1603 } 1604 1605 enum ptrace_syscall_dir { 1606 PTRACE_SYSCALL_ENTER = 0, 1607 PTRACE_SYSCALL_EXIT, 1608 }; 1609 1610 static void tracehook_report_syscall(struct pt_regs *regs, 1611 enum ptrace_syscall_dir dir) 1612 { 1613 int regno; 1614 unsigned long saved_reg; 1615 1616 /* 1617 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1618 * used to denote syscall entry/exit: 1619 */ 1620 regno = (is_compat_task() ? 12 : 7); 1621 saved_reg = regs->regs[regno]; 1622 regs->regs[regno] = dir; 1623 1624 if (dir == PTRACE_SYSCALL_EXIT) 1625 tracehook_report_syscall_exit(regs, 0); 1626 else if (tracehook_report_syscall_entry(regs)) 1627 forget_syscall(regs); 1628 1629 regs->regs[regno] = saved_reg; 1630 } 1631 1632 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1633 { 1634 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1635 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1636 1637 /* Do the secure computing after ptrace; failures should be fast. */ 1638 if (secure_computing(NULL) == -1) 1639 return -1; 1640 1641 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1642 trace_sys_enter(regs, regs->syscallno); 1643 1644 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1645 regs->regs[2], regs->regs[3]); 1646 1647 return regs->syscallno; 1648 } 1649 1650 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1651 { 1652 audit_syscall_exit(regs); 1653 1654 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1655 trace_sys_exit(regs, regs_return_value(regs)); 1656 1657 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1658 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1659 } 1660 1661 /* 1662 * Bits which are always architecturally RES0 per ARM DDI 0487A.h 1663 * Userspace cannot use these until they have an architectural meaning. 1664 * We also reserve IL for the kernel; SS is handled dynamically. 1665 */ 1666 #define SPSR_EL1_AARCH64_RES0_BITS \ 1667 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ 1668 GENMASK_ULL(5, 5)) 1669 #define SPSR_EL1_AARCH32_RES0_BITS \ 1670 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) 1671 1672 static int valid_compat_regs(struct user_pt_regs *regs) 1673 { 1674 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1675 1676 if (!system_supports_mixed_endian_el0()) { 1677 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1678 regs->pstate |= COMPAT_PSR_E_BIT; 1679 else 1680 regs->pstate &= ~COMPAT_PSR_E_BIT; 1681 } 1682 1683 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1684 (regs->pstate & COMPAT_PSR_A_BIT) == 0 && 1685 (regs->pstate & COMPAT_PSR_I_BIT) == 0 && 1686 (regs->pstate & COMPAT_PSR_F_BIT) == 0) { 1687 return 1; 1688 } 1689 1690 /* 1691 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1692 * arch/arm. 1693 */ 1694 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | 1695 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | 1696 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | 1697 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | 1698 COMPAT_PSR_T_BIT; 1699 regs->pstate |= PSR_MODE32_BIT; 1700 1701 return 0; 1702 } 1703 1704 static int valid_native_regs(struct user_pt_regs *regs) 1705 { 1706 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1707 1708 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1709 (regs->pstate & PSR_D_BIT) == 0 && 1710 (regs->pstate & PSR_A_BIT) == 0 && 1711 (regs->pstate & PSR_I_BIT) == 0 && 1712 (regs->pstate & PSR_F_BIT) == 0) { 1713 return 1; 1714 } 1715 1716 /* Force PSR to a valid 64-bit EL0t */ 1717 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1718 1719 return 0; 1720 } 1721 1722 /* 1723 * Are the current registers suitable for user mode? (used to maintain 1724 * security in signal handlers) 1725 */ 1726 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1727 { 1728 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1729 regs->pstate &= ~DBG_SPSR_SS; 1730 1731 if (is_compat_thread(task_thread_info(task))) 1732 return valid_compat_regs(regs); 1733 else 1734 return valid_native_regs(regs); 1735 } 1736