1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/tracehook.h> 31 #include <linux/elf.h> 32 33 #include <asm/compat.h> 34 #include <asm/cpufeature.h> 35 #include <asm/debug-monitors.h> 36 #include <asm/fpsimd.h> 37 #include <asm/pointer_auth.h> 38 #include <asm/stacktrace.h> 39 #include <asm/syscall.h> 40 #include <asm/traps.h> 41 #include <asm/system_misc.h> 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/syscalls.h> 45 46 struct pt_regs_offset { 47 const char *name; 48 int offset; 49 }; 50 51 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 52 #define REG_OFFSET_END {.name = NULL, .offset = 0} 53 #define GPR_OFFSET_NAME(r) \ 54 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 55 56 static const struct pt_regs_offset regoffset_table[] = { 57 GPR_OFFSET_NAME(0), 58 GPR_OFFSET_NAME(1), 59 GPR_OFFSET_NAME(2), 60 GPR_OFFSET_NAME(3), 61 GPR_OFFSET_NAME(4), 62 GPR_OFFSET_NAME(5), 63 GPR_OFFSET_NAME(6), 64 GPR_OFFSET_NAME(7), 65 GPR_OFFSET_NAME(8), 66 GPR_OFFSET_NAME(9), 67 GPR_OFFSET_NAME(10), 68 GPR_OFFSET_NAME(11), 69 GPR_OFFSET_NAME(12), 70 GPR_OFFSET_NAME(13), 71 GPR_OFFSET_NAME(14), 72 GPR_OFFSET_NAME(15), 73 GPR_OFFSET_NAME(16), 74 GPR_OFFSET_NAME(17), 75 GPR_OFFSET_NAME(18), 76 GPR_OFFSET_NAME(19), 77 GPR_OFFSET_NAME(20), 78 GPR_OFFSET_NAME(21), 79 GPR_OFFSET_NAME(22), 80 GPR_OFFSET_NAME(23), 81 GPR_OFFSET_NAME(24), 82 GPR_OFFSET_NAME(25), 83 GPR_OFFSET_NAME(26), 84 GPR_OFFSET_NAME(27), 85 GPR_OFFSET_NAME(28), 86 GPR_OFFSET_NAME(29), 87 GPR_OFFSET_NAME(30), 88 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 89 REG_OFFSET_NAME(sp), 90 REG_OFFSET_NAME(pc), 91 REG_OFFSET_NAME(pstate), 92 REG_OFFSET_END, 93 }; 94 95 /** 96 * regs_query_register_offset() - query register offset from its name 97 * @name: the name of a register 98 * 99 * regs_query_register_offset() returns the offset of a register in struct 100 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 101 */ 102 int regs_query_register_offset(const char *name) 103 { 104 const struct pt_regs_offset *roff; 105 106 for (roff = regoffset_table; roff->name != NULL; roff++) 107 if (!strcmp(roff->name, name)) 108 return roff->offset; 109 return -EINVAL; 110 } 111 112 /** 113 * regs_within_kernel_stack() - check the address in the stack 114 * @regs: pt_regs which contains kernel stack pointer. 115 * @addr: address which is checked. 116 * 117 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 118 * If @addr is within the kernel stack, it returns true. If not, returns false. 119 */ 120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 121 { 122 return ((addr & ~(THREAD_SIZE - 1)) == 123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 124 on_irq_stack(addr, NULL); 125 } 126 127 /** 128 * regs_get_kernel_stack_nth() - get Nth entry of the stack 129 * @regs: pt_regs which contains kernel stack pointer. 130 * @n: stack entry number. 131 * 132 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 133 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 134 * this returns 0. 135 */ 136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 137 { 138 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 139 140 addr += n; 141 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 142 return *addr; 143 else 144 return 0; 145 } 146 147 /* 148 * TODO: does not yet catch signals sent when the child dies. 149 * in exit.c or in signal.c. 150 */ 151 152 /* 153 * Called by kernel/ptrace.c when detaching.. 154 */ 155 void ptrace_disable(struct task_struct *child) 156 { 157 /* 158 * This would be better off in core code, but PTRACE_DETACH has 159 * grown its fair share of arch-specific worts and changing it 160 * is likely to cause regressions on obscure architectures. 161 */ 162 user_disable_single_step(child); 163 } 164 165 #ifdef CONFIG_HAVE_HW_BREAKPOINT 166 /* 167 * Handle hitting a HW-breakpoint. 168 */ 169 static void ptrace_hbptriggered(struct perf_event *bp, 170 struct perf_sample_data *data, 171 struct pt_regs *regs) 172 { 173 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 174 const char *desc = "Hardware breakpoint trap (ptrace)"; 175 176 #ifdef CONFIG_COMPAT 177 if (is_compat_task()) { 178 int si_errno = 0; 179 int i; 180 181 for (i = 0; i < ARM_MAX_BRP; ++i) { 182 if (current->thread.debug.hbp_break[i] == bp) { 183 si_errno = (i << 1) + 1; 184 break; 185 } 186 } 187 188 for (i = 0; i < ARM_MAX_WRP; ++i) { 189 if (current->thread.debug.hbp_watch[i] == bp) { 190 si_errno = -((i << 1) + 1); 191 break; 192 } 193 } 194 arm64_force_sig_ptrace_errno_trap(si_errno, 195 (void __user *)bkpt->trigger, 196 desc); 197 } 198 #endif 199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, 200 (void __user *)(bkpt->trigger), 201 desc); 202 } 203 204 /* 205 * Unregister breakpoints from this task and reset the pointers in 206 * the thread_struct. 207 */ 208 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 209 { 210 int i; 211 struct thread_struct *t = &tsk->thread; 212 213 for (i = 0; i < ARM_MAX_BRP; i++) { 214 if (t->debug.hbp_break[i]) { 215 unregister_hw_breakpoint(t->debug.hbp_break[i]); 216 t->debug.hbp_break[i] = NULL; 217 } 218 } 219 220 for (i = 0; i < ARM_MAX_WRP; i++) { 221 if (t->debug.hbp_watch[i]) { 222 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 223 t->debug.hbp_watch[i] = NULL; 224 } 225 } 226 } 227 228 void ptrace_hw_copy_thread(struct task_struct *tsk) 229 { 230 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 231 } 232 233 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 234 struct task_struct *tsk, 235 unsigned long idx) 236 { 237 struct perf_event *bp = ERR_PTR(-EINVAL); 238 239 switch (note_type) { 240 case NT_ARM_HW_BREAK: 241 if (idx >= ARM_MAX_BRP) 242 goto out; 243 idx = array_index_nospec(idx, ARM_MAX_BRP); 244 bp = tsk->thread.debug.hbp_break[idx]; 245 break; 246 case NT_ARM_HW_WATCH: 247 if (idx >= ARM_MAX_WRP) 248 goto out; 249 idx = array_index_nospec(idx, ARM_MAX_WRP); 250 bp = tsk->thread.debug.hbp_watch[idx]; 251 break; 252 } 253 254 out: 255 return bp; 256 } 257 258 static int ptrace_hbp_set_event(unsigned int note_type, 259 struct task_struct *tsk, 260 unsigned long idx, 261 struct perf_event *bp) 262 { 263 int err = -EINVAL; 264 265 switch (note_type) { 266 case NT_ARM_HW_BREAK: 267 if (idx >= ARM_MAX_BRP) 268 goto out; 269 idx = array_index_nospec(idx, ARM_MAX_BRP); 270 tsk->thread.debug.hbp_break[idx] = bp; 271 err = 0; 272 break; 273 case NT_ARM_HW_WATCH: 274 if (idx >= ARM_MAX_WRP) 275 goto out; 276 idx = array_index_nospec(idx, ARM_MAX_WRP); 277 tsk->thread.debug.hbp_watch[idx] = bp; 278 err = 0; 279 break; 280 } 281 282 out: 283 return err; 284 } 285 286 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 287 struct task_struct *tsk, 288 unsigned long idx) 289 { 290 struct perf_event *bp; 291 struct perf_event_attr attr; 292 int err, type; 293 294 switch (note_type) { 295 case NT_ARM_HW_BREAK: 296 type = HW_BREAKPOINT_X; 297 break; 298 case NT_ARM_HW_WATCH: 299 type = HW_BREAKPOINT_RW; 300 break; 301 default: 302 return ERR_PTR(-EINVAL); 303 } 304 305 ptrace_breakpoint_init(&attr); 306 307 /* 308 * Initialise fields to sane defaults 309 * (i.e. values that will pass validation). 310 */ 311 attr.bp_addr = 0; 312 attr.bp_len = HW_BREAKPOINT_LEN_4; 313 attr.bp_type = type; 314 attr.disabled = 1; 315 316 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 317 if (IS_ERR(bp)) 318 return bp; 319 320 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 321 if (err) 322 return ERR_PTR(err); 323 324 return bp; 325 } 326 327 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 328 struct arch_hw_breakpoint_ctrl ctrl, 329 struct perf_event_attr *attr) 330 { 331 int err, len, type, offset, disabled = !ctrl.enabled; 332 333 attr->disabled = disabled; 334 if (disabled) 335 return 0; 336 337 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 338 if (err) 339 return err; 340 341 switch (note_type) { 342 case NT_ARM_HW_BREAK: 343 if ((type & HW_BREAKPOINT_X) != type) 344 return -EINVAL; 345 break; 346 case NT_ARM_HW_WATCH: 347 if ((type & HW_BREAKPOINT_RW) != type) 348 return -EINVAL; 349 break; 350 default: 351 return -EINVAL; 352 } 353 354 attr->bp_len = len; 355 attr->bp_type = type; 356 attr->bp_addr += offset; 357 358 return 0; 359 } 360 361 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 362 { 363 u8 num; 364 u32 reg = 0; 365 366 switch (note_type) { 367 case NT_ARM_HW_BREAK: 368 num = hw_breakpoint_slots(TYPE_INST); 369 break; 370 case NT_ARM_HW_WATCH: 371 num = hw_breakpoint_slots(TYPE_DATA); 372 break; 373 default: 374 return -EINVAL; 375 } 376 377 reg |= debug_monitors_arch(); 378 reg <<= 8; 379 reg |= num; 380 381 *info = reg; 382 return 0; 383 } 384 385 static int ptrace_hbp_get_ctrl(unsigned int note_type, 386 struct task_struct *tsk, 387 unsigned long idx, 388 u32 *ctrl) 389 { 390 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 391 392 if (IS_ERR(bp)) 393 return PTR_ERR(bp); 394 395 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 396 return 0; 397 } 398 399 static int ptrace_hbp_get_addr(unsigned int note_type, 400 struct task_struct *tsk, 401 unsigned long idx, 402 u64 *addr) 403 { 404 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 405 406 if (IS_ERR(bp)) 407 return PTR_ERR(bp); 408 409 *addr = bp ? counter_arch_bp(bp)->address : 0; 410 return 0; 411 } 412 413 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 414 struct task_struct *tsk, 415 unsigned long idx) 416 { 417 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 418 419 if (!bp) 420 bp = ptrace_hbp_create(note_type, tsk, idx); 421 422 return bp; 423 } 424 425 static int ptrace_hbp_set_ctrl(unsigned int note_type, 426 struct task_struct *tsk, 427 unsigned long idx, 428 u32 uctrl) 429 { 430 int err; 431 struct perf_event *bp; 432 struct perf_event_attr attr; 433 struct arch_hw_breakpoint_ctrl ctrl; 434 435 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 436 if (IS_ERR(bp)) { 437 err = PTR_ERR(bp); 438 return err; 439 } 440 441 attr = bp->attr; 442 decode_ctrl_reg(uctrl, &ctrl); 443 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 444 if (err) 445 return err; 446 447 return modify_user_hw_breakpoint(bp, &attr); 448 } 449 450 static int ptrace_hbp_set_addr(unsigned int note_type, 451 struct task_struct *tsk, 452 unsigned long idx, 453 u64 addr) 454 { 455 int err; 456 struct perf_event *bp; 457 struct perf_event_attr attr; 458 459 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 460 if (IS_ERR(bp)) { 461 err = PTR_ERR(bp); 462 return err; 463 } 464 465 attr = bp->attr; 466 attr.bp_addr = addr; 467 err = modify_user_hw_breakpoint(bp, &attr); 468 return err; 469 } 470 471 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 472 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 473 #define PTRACE_HBP_PAD_SZ sizeof(u32) 474 475 static int hw_break_get(struct task_struct *target, 476 const struct user_regset *regset, 477 struct membuf to) 478 { 479 unsigned int note_type = regset->core_note_type; 480 int ret, idx = 0; 481 u32 info, ctrl; 482 u64 addr; 483 484 /* Resource info */ 485 ret = ptrace_hbp_get_resource_info(note_type, &info); 486 if (ret) 487 return ret; 488 489 membuf_write(&to, &info, sizeof(info)); 490 membuf_zero(&to, sizeof(u32)); 491 /* (address, ctrl) registers */ 492 while (to.left) { 493 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 494 if (ret) 495 return ret; 496 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 497 if (ret) 498 return ret; 499 membuf_store(&to, addr); 500 membuf_store(&to, ctrl); 501 membuf_zero(&to, sizeof(u32)); 502 idx++; 503 } 504 return 0; 505 } 506 507 static int hw_break_set(struct task_struct *target, 508 const struct user_regset *regset, 509 unsigned int pos, unsigned int count, 510 const void *kbuf, const void __user *ubuf) 511 { 512 unsigned int note_type = regset->core_note_type; 513 int ret, idx = 0, offset, limit; 514 u32 ctrl; 515 u64 addr; 516 517 /* Resource info and pad */ 518 offset = offsetof(struct user_hwdebug_state, dbg_regs); 519 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 520 if (ret) 521 return ret; 522 523 /* (address, ctrl) registers */ 524 limit = regset->n * regset->size; 525 while (count && offset < limit) { 526 if (count < PTRACE_HBP_ADDR_SZ) 527 return -EINVAL; 528 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 529 offset, offset + PTRACE_HBP_ADDR_SZ); 530 if (ret) 531 return ret; 532 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 533 if (ret) 534 return ret; 535 offset += PTRACE_HBP_ADDR_SZ; 536 537 if (!count) 538 break; 539 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 540 offset, offset + PTRACE_HBP_CTRL_SZ); 541 if (ret) 542 return ret; 543 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 544 if (ret) 545 return ret; 546 offset += PTRACE_HBP_CTRL_SZ; 547 548 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 549 offset, 550 offset + PTRACE_HBP_PAD_SZ); 551 if (ret) 552 return ret; 553 offset += PTRACE_HBP_PAD_SZ; 554 idx++; 555 } 556 557 return 0; 558 } 559 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 560 561 static int gpr_get(struct task_struct *target, 562 const struct user_regset *regset, 563 struct membuf to) 564 { 565 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 566 return membuf_write(&to, uregs, sizeof(*uregs)); 567 } 568 569 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 570 unsigned int pos, unsigned int count, 571 const void *kbuf, const void __user *ubuf) 572 { 573 int ret; 574 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 575 576 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 577 if (ret) 578 return ret; 579 580 if (!valid_user_regs(&newregs, target)) 581 return -EINVAL; 582 583 task_pt_regs(target)->user_regs = newregs; 584 return 0; 585 } 586 587 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 588 { 589 if (!system_supports_fpsimd()) 590 return -ENODEV; 591 return regset->n; 592 } 593 594 /* 595 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 596 */ 597 static int __fpr_get(struct task_struct *target, 598 const struct user_regset *regset, 599 struct membuf to) 600 { 601 struct user_fpsimd_state *uregs; 602 603 sve_sync_to_fpsimd(target); 604 605 uregs = &target->thread.uw.fpsimd_state; 606 607 return membuf_write(&to, uregs, sizeof(*uregs)); 608 } 609 610 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 611 struct membuf to) 612 { 613 if (!system_supports_fpsimd()) 614 return -EINVAL; 615 616 if (target == current) 617 fpsimd_preserve_current_state(); 618 619 return __fpr_get(target, regset, to); 620 } 621 622 static int __fpr_set(struct task_struct *target, 623 const struct user_regset *regset, 624 unsigned int pos, unsigned int count, 625 const void *kbuf, const void __user *ubuf, 626 unsigned int start_pos) 627 { 628 int ret; 629 struct user_fpsimd_state newstate; 630 631 /* 632 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 633 * short copyin can't resurrect stale data. 634 */ 635 sve_sync_to_fpsimd(target); 636 637 newstate = target->thread.uw.fpsimd_state; 638 639 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 640 start_pos, start_pos + sizeof(newstate)); 641 if (ret) 642 return ret; 643 644 target->thread.uw.fpsimd_state = newstate; 645 646 return ret; 647 } 648 649 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 650 unsigned int pos, unsigned int count, 651 const void *kbuf, const void __user *ubuf) 652 { 653 int ret; 654 655 if (!system_supports_fpsimd()) 656 return -EINVAL; 657 658 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 659 if (ret) 660 return ret; 661 662 sve_sync_from_fpsimd_zeropad(target); 663 fpsimd_flush_task_state(target); 664 665 return ret; 666 } 667 668 static int tls_get(struct task_struct *target, const struct user_regset *regset, 669 struct membuf to) 670 { 671 if (target == current) 672 tls_preserve_current_state(); 673 674 return membuf_store(&to, target->thread.uw.tp_value); 675 } 676 677 static int tls_set(struct task_struct *target, const struct user_regset *regset, 678 unsigned int pos, unsigned int count, 679 const void *kbuf, const void __user *ubuf) 680 { 681 int ret; 682 unsigned long tls = target->thread.uw.tp_value; 683 684 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 685 if (ret) 686 return ret; 687 688 target->thread.uw.tp_value = tls; 689 return ret; 690 } 691 692 static int system_call_get(struct task_struct *target, 693 const struct user_regset *regset, 694 struct membuf to) 695 { 696 return membuf_store(&to, task_pt_regs(target)->syscallno); 697 } 698 699 static int system_call_set(struct task_struct *target, 700 const struct user_regset *regset, 701 unsigned int pos, unsigned int count, 702 const void *kbuf, const void __user *ubuf) 703 { 704 int syscallno = task_pt_regs(target)->syscallno; 705 int ret; 706 707 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 708 if (ret) 709 return ret; 710 711 task_pt_regs(target)->syscallno = syscallno; 712 return ret; 713 } 714 715 #ifdef CONFIG_ARM64_SVE 716 717 static void sve_init_header_from_task(struct user_sve_header *header, 718 struct task_struct *target) 719 { 720 unsigned int vq; 721 722 memset(header, 0, sizeof(*header)); 723 724 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 725 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 726 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 727 header->flags |= SVE_PT_VL_INHERIT; 728 729 header->vl = target->thread.sve_vl; 730 vq = sve_vq_from_vl(header->vl); 731 732 header->max_vl = sve_max_vl; 733 header->size = SVE_PT_SIZE(vq, header->flags); 734 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 735 SVE_PT_REGS_SVE); 736 } 737 738 static unsigned int sve_size_from_header(struct user_sve_header const *header) 739 { 740 return ALIGN(header->size, SVE_VQ_BYTES); 741 } 742 743 static int sve_get(struct task_struct *target, 744 const struct user_regset *regset, 745 struct membuf to) 746 { 747 struct user_sve_header header; 748 unsigned int vq; 749 unsigned long start, end; 750 751 if (!system_supports_sve()) 752 return -EINVAL; 753 754 /* Header */ 755 sve_init_header_from_task(&header, target); 756 vq = sve_vq_from_vl(header.vl); 757 758 membuf_write(&to, &header, sizeof(header)); 759 760 if (target == current) 761 fpsimd_preserve_current_state(); 762 763 /* Registers: FPSIMD-only case */ 764 765 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 766 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 767 return __fpr_get(target, regset, to); 768 769 /* Otherwise: full SVE case */ 770 771 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 772 start = SVE_PT_SVE_OFFSET; 773 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 774 membuf_write(&to, target->thread.sve_state, end - start); 775 776 start = end; 777 end = SVE_PT_SVE_FPSR_OFFSET(vq); 778 membuf_zero(&to, end - start); 779 780 /* 781 * Copy fpsr, and fpcr which must follow contiguously in 782 * struct fpsimd_state: 783 */ 784 start = end; 785 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 786 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start); 787 788 start = end; 789 end = sve_size_from_header(&header); 790 return membuf_zero(&to, end - start); 791 } 792 793 static int sve_set(struct task_struct *target, 794 const struct user_regset *regset, 795 unsigned int pos, unsigned int count, 796 const void *kbuf, const void __user *ubuf) 797 { 798 int ret; 799 struct user_sve_header header; 800 unsigned int vq; 801 unsigned long start, end; 802 803 if (!system_supports_sve()) 804 return -EINVAL; 805 806 /* Header */ 807 if (count < sizeof(header)) 808 return -EINVAL; 809 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 810 0, sizeof(header)); 811 if (ret) 812 goto out; 813 814 /* 815 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by 816 * sve_set_vector_length(), which will also validate them for us: 817 */ 818 ret = sve_set_vector_length(target, header.vl, 819 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 820 if (ret) 821 goto out; 822 823 /* Actual VL set may be less than the user asked for: */ 824 vq = sve_vq_from_vl(target->thread.sve_vl); 825 826 /* Registers: FPSIMD-only case */ 827 828 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 829 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 830 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 831 SVE_PT_FPSIMD_OFFSET); 832 clear_tsk_thread_flag(target, TIF_SVE); 833 goto out; 834 } 835 836 /* Otherwise: full SVE case */ 837 838 /* 839 * If setting a different VL from the requested VL and there is 840 * register data, the data layout will be wrong: don't even 841 * try to set the registers in this case. 842 */ 843 if (count && vq != sve_vq_from_vl(header.vl)) { 844 ret = -EIO; 845 goto out; 846 } 847 848 sve_alloc(target); 849 850 /* 851 * Ensure target->thread.sve_state is up to date with target's 852 * FPSIMD regs, so that a short copyin leaves trailing registers 853 * unmodified. 854 */ 855 fpsimd_sync_to_sve(target); 856 set_tsk_thread_flag(target, TIF_SVE); 857 858 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 859 start = SVE_PT_SVE_OFFSET; 860 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 861 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 862 target->thread.sve_state, 863 start, end); 864 if (ret) 865 goto out; 866 867 start = end; 868 end = SVE_PT_SVE_FPSR_OFFSET(vq); 869 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 870 start, end); 871 if (ret) 872 goto out; 873 874 /* 875 * Copy fpsr, and fpcr which must follow contiguously in 876 * struct fpsimd_state: 877 */ 878 start = end; 879 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 880 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 881 &target->thread.uw.fpsimd_state.fpsr, 882 start, end); 883 884 out: 885 fpsimd_flush_task_state(target); 886 return ret; 887 } 888 889 #endif /* CONFIG_ARM64_SVE */ 890 891 #ifdef CONFIG_ARM64_PTR_AUTH 892 static int pac_mask_get(struct task_struct *target, 893 const struct user_regset *regset, 894 struct membuf to) 895 { 896 /* 897 * The PAC bits can differ across data and instruction pointers 898 * depending on TCR_EL1.TBID*, which we may make use of in future, so 899 * we expose separate masks. 900 */ 901 unsigned long mask = ptrauth_user_pac_mask(); 902 struct user_pac_mask uregs = { 903 .data_mask = mask, 904 .insn_mask = mask, 905 }; 906 907 if (!system_supports_address_auth()) 908 return -EINVAL; 909 910 return membuf_write(&to, &uregs, sizeof(uregs)); 911 } 912 913 #ifdef CONFIG_CHECKPOINT_RESTORE 914 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 915 { 916 return (__uint128_t)key->hi << 64 | key->lo; 917 } 918 919 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 920 { 921 struct ptrauth_key key = { 922 .lo = (unsigned long)ukey, 923 .hi = (unsigned long)(ukey >> 64), 924 }; 925 926 return key; 927 } 928 929 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 930 const struct ptrauth_keys_user *keys) 931 { 932 ukeys->apiakey = pac_key_to_user(&keys->apia); 933 ukeys->apibkey = pac_key_to_user(&keys->apib); 934 ukeys->apdakey = pac_key_to_user(&keys->apda); 935 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 936 } 937 938 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 939 const struct user_pac_address_keys *ukeys) 940 { 941 keys->apia = pac_key_from_user(ukeys->apiakey); 942 keys->apib = pac_key_from_user(ukeys->apibkey); 943 keys->apda = pac_key_from_user(ukeys->apdakey); 944 keys->apdb = pac_key_from_user(ukeys->apdbkey); 945 } 946 947 static int pac_address_keys_get(struct task_struct *target, 948 const struct user_regset *regset, 949 struct membuf to) 950 { 951 struct ptrauth_keys_user *keys = &target->thread.keys_user; 952 struct user_pac_address_keys user_keys; 953 954 if (!system_supports_address_auth()) 955 return -EINVAL; 956 957 pac_address_keys_to_user(&user_keys, keys); 958 959 return membuf_write(&to, &user_keys, sizeof(user_keys)); 960 } 961 962 static int pac_address_keys_set(struct task_struct *target, 963 const struct user_regset *regset, 964 unsigned int pos, unsigned int count, 965 const void *kbuf, const void __user *ubuf) 966 { 967 struct ptrauth_keys_user *keys = &target->thread.keys_user; 968 struct user_pac_address_keys user_keys; 969 int ret; 970 971 if (!system_supports_address_auth()) 972 return -EINVAL; 973 974 pac_address_keys_to_user(&user_keys, keys); 975 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 976 &user_keys, 0, -1); 977 if (ret) 978 return ret; 979 pac_address_keys_from_user(keys, &user_keys); 980 981 return 0; 982 } 983 984 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 985 const struct ptrauth_keys_user *keys) 986 { 987 ukeys->apgakey = pac_key_to_user(&keys->apga); 988 } 989 990 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 991 const struct user_pac_generic_keys *ukeys) 992 { 993 keys->apga = pac_key_from_user(ukeys->apgakey); 994 } 995 996 static int pac_generic_keys_get(struct task_struct *target, 997 const struct user_regset *regset, 998 struct membuf to) 999 { 1000 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1001 struct user_pac_generic_keys user_keys; 1002 1003 if (!system_supports_generic_auth()) 1004 return -EINVAL; 1005 1006 pac_generic_keys_to_user(&user_keys, keys); 1007 1008 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1009 } 1010 1011 static int pac_generic_keys_set(struct task_struct *target, 1012 const struct user_regset *regset, 1013 unsigned int pos, unsigned int count, 1014 const void *kbuf, const void __user *ubuf) 1015 { 1016 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1017 struct user_pac_generic_keys user_keys; 1018 int ret; 1019 1020 if (!system_supports_generic_auth()) 1021 return -EINVAL; 1022 1023 pac_generic_keys_to_user(&user_keys, keys); 1024 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1025 &user_keys, 0, -1); 1026 if (ret) 1027 return ret; 1028 pac_generic_keys_from_user(keys, &user_keys); 1029 1030 return 0; 1031 } 1032 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1033 #endif /* CONFIG_ARM64_PTR_AUTH */ 1034 1035 enum aarch64_regset { 1036 REGSET_GPR, 1037 REGSET_FPR, 1038 REGSET_TLS, 1039 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1040 REGSET_HW_BREAK, 1041 REGSET_HW_WATCH, 1042 #endif 1043 REGSET_SYSTEM_CALL, 1044 #ifdef CONFIG_ARM64_SVE 1045 REGSET_SVE, 1046 #endif 1047 #ifdef CONFIG_ARM64_PTR_AUTH 1048 REGSET_PAC_MASK, 1049 #ifdef CONFIG_CHECKPOINT_RESTORE 1050 REGSET_PACA_KEYS, 1051 REGSET_PACG_KEYS, 1052 #endif 1053 #endif 1054 }; 1055 1056 static const struct user_regset aarch64_regsets[] = { 1057 [REGSET_GPR] = { 1058 .core_note_type = NT_PRSTATUS, 1059 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1060 .size = sizeof(u64), 1061 .align = sizeof(u64), 1062 .regset_get = gpr_get, 1063 .set = gpr_set 1064 }, 1065 [REGSET_FPR] = { 1066 .core_note_type = NT_PRFPREG, 1067 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1068 /* 1069 * We pretend we have 32-bit registers because the fpsr and 1070 * fpcr are 32-bits wide. 1071 */ 1072 .size = sizeof(u32), 1073 .align = sizeof(u32), 1074 .active = fpr_active, 1075 .regset_get = fpr_get, 1076 .set = fpr_set 1077 }, 1078 [REGSET_TLS] = { 1079 .core_note_type = NT_ARM_TLS, 1080 .n = 1, 1081 .size = sizeof(void *), 1082 .align = sizeof(void *), 1083 .regset_get = tls_get, 1084 .set = tls_set, 1085 }, 1086 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1087 [REGSET_HW_BREAK] = { 1088 .core_note_type = NT_ARM_HW_BREAK, 1089 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1090 .size = sizeof(u32), 1091 .align = sizeof(u32), 1092 .regset_get = hw_break_get, 1093 .set = hw_break_set, 1094 }, 1095 [REGSET_HW_WATCH] = { 1096 .core_note_type = NT_ARM_HW_WATCH, 1097 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1098 .size = sizeof(u32), 1099 .align = sizeof(u32), 1100 .regset_get = hw_break_get, 1101 .set = hw_break_set, 1102 }, 1103 #endif 1104 [REGSET_SYSTEM_CALL] = { 1105 .core_note_type = NT_ARM_SYSTEM_CALL, 1106 .n = 1, 1107 .size = sizeof(int), 1108 .align = sizeof(int), 1109 .regset_get = system_call_get, 1110 .set = system_call_set, 1111 }, 1112 #ifdef CONFIG_ARM64_SVE 1113 [REGSET_SVE] = { /* Scalable Vector Extension */ 1114 .core_note_type = NT_ARM_SVE, 1115 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1116 SVE_VQ_BYTES), 1117 .size = SVE_VQ_BYTES, 1118 .align = SVE_VQ_BYTES, 1119 .regset_get = sve_get, 1120 .set = sve_set, 1121 }, 1122 #endif 1123 #ifdef CONFIG_ARM64_PTR_AUTH 1124 [REGSET_PAC_MASK] = { 1125 .core_note_type = NT_ARM_PAC_MASK, 1126 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1127 .size = sizeof(u64), 1128 .align = sizeof(u64), 1129 .regset_get = pac_mask_get, 1130 /* this cannot be set dynamically */ 1131 }, 1132 #ifdef CONFIG_CHECKPOINT_RESTORE 1133 [REGSET_PACA_KEYS] = { 1134 .core_note_type = NT_ARM_PACA_KEYS, 1135 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1136 .size = sizeof(__uint128_t), 1137 .align = sizeof(__uint128_t), 1138 .regset_get = pac_address_keys_get, 1139 .set = pac_address_keys_set, 1140 }, 1141 [REGSET_PACG_KEYS] = { 1142 .core_note_type = NT_ARM_PACG_KEYS, 1143 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1144 .size = sizeof(__uint128_t), 1145 .align = sizeof(__uint128_t), 1146 .regset_get = pac_generic_keys_get, 1147 .set = pac_generic_keys_set, 1148 }, 1149 #endif 1150 #endif 1151 }; 1152 1153 static const struct user_regset_view user_aarch64_view = { 1154 .name = "aarch64", .e_machine = EM_AARCH64, 1155 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1156 }; 1157 1158 #ifdef CONFIG_COMPAT 1159 enum compat_regset { 1160 REGSET_COMPAT_GPR, 1161 REGSET_COMPAT_VFP, 1162 }; 1163 1164 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1165 { 1166 struct pt_regs *regs = task_pt_regs(task); 1167 1168 switch (idx) { 1169 case 15: 1170 return regs->pc; 1171 case 16: 1172 return pstate_to_compat_psr(regs->pstate); 1173 case 17: 1174 return regs->orig_x0; 1175 default: 1176 return regs->regs[idx]; 1177 } 1178 } 1179 1180 static int compat_gpr_get(struct task_struct *target, 1181 const struct user_regset *regset, 1182 struct membuf to) 1183 { 1184 int i = 0; 1185 1186 while (to.left) 1187 membuf_store(&to, compat_get_user_reg(target, i++)); 1188 return 0; 1189 } 1190 1191 static int compat_gpr_set(struct task_struct *target, 1192 const struct user_regset *regset, 1193 unsigned int pos, unsigned int count, 1194 const void *kbuf, const void __user *ubuf) 1195 { 1196 struct pt_regs newregs; 1197 int ret = 0; 1198 unsigned int i, start, num_regs; 1199 1200 /* Calculate the number of AArch32 registers contained in count */ 1201 num_regs = count / regset->size; 1202 1203 /* Convert pos into an register number */ 1204 start = pos / regset->size; 1205 1206 if (start + num_regs > regset->n) 1207 return -EIO; 1208 1209 newregs = *task_pt_regs(target); 1210 1211 for (i = 0; i < num_regs; ++i) { 1212 unsigned int idx = start + i; 1213 compat_ulong_t reg; 1214 1215 if (kbuf) { 1216 memcpy(®, kbuf, sizeof(reg)); 1217 kbuf += sizeof(reg); 1218 } else { 1219 ret = copy_from_user(®, ubuf, sizeof(reg)); 1220 if (ret) { 1221 ret = -EFAULT; 1222 break; 1223 } 1224 1225 ubuf += sizeof(reg); 1226 } 1227 1228 switch (idx) { 1229 case 15: 1230 newregs.pc = reg; 1231 break; 1232 case 16: 1233 reg = compat_psr_to_pstate(reg); 1234 newregs.pstate = reg; 1235 break; 1236 case 17: 1237 newregs.orig_x0 = reg; 1238 break; 1239 default: 1240 newregs.regs[idx] = reg; 1241 } 1242 1243 } 1244 1245 if (valid_user_regs(&newregs.user_regs, target)) 1246 *task_pt_regs(target) = newregs; 1247 else 1248 ret = -EINVAL; 1249 1250 return ret; 1251 } 1252 1253 static int compat_vfp_get(struct task_struct *target, 1254 const struct user_regset *regset, 1255 struct membuf to) 1256 { 1257 struct user_fpsimd_state *uregs; 1258 compat_ulong_t fpscr; 1259 1260 if (!system_supports_fpsimd()) 1261 return -EINVAL; 1262 1263 uregs = &target->thread.uw.fpsimd_state; 1264 1265 if (target == current) 1266 fpsimd_preserve_current_state(); 1267 1268 /* 1269 * The VFP registers are packed into the fpsimd_state, so they all sit 1270 * nicely together for us. We just need to create the fpscr separately. 1271 */ 1272 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1273 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1274 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1275 return membuf_store(&to, fpscr); 1276 } 1277 1278 static int compat_vfp_set(struct task_struct *target, 1279 const struct user_regset *regset, 1280 unsigned int pos, unsigned int count, 1281 const void *kbuf, const void __user *ubuf) 1282 { 1283 struct user_fpsimd_state *uregs; 1284 compat_ulong_t fpscr; 1285 int ret, vregs_end_pos; 1286 1287 if (!system_supports_fpsimd()) 1288 return -EINVAL; 1289 1290 uregs = &target->thread.uw.fpsimd_state; 1291 1292 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1293 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1294 vregs_end_pos); 1295 1296 if (count && !ret) { 1297 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1298 vregs_end_pos, VFP_STATE_SIZE); 1299 if (!ret) { 1300 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1301 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1302 } 1303 } 1304 1305 fpsimd_flush_task_state(target); 1306 return ret; 1307 } 1308 1309 static int compat_tls_get(struct task_struct *target, 1310 const struct user_regset *regset, 1311 struct membuf to) 1312 { 1313 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1314 } 1315 1316 static int compat_tls_set(struct task_struct *target, 1317 const struct user_regset *regset, unsigned int pos, 1318 unsigned int count, const void *kbuf, 1319 const void __user *ubuf) 1320 { 1321 int ret; 1322 compat_ulong_t tls = target->thread.uw.tp_value; 1323 1324 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1325 if (ret) 1326 return ret; 1327 1328 target->thread.uw.tp_value = tls; 1329 return ret; 1330 } 1331 1332 static const struct user_regset aarch32_regsets[] = { 1333 [REGSET_COMPAT_GPR] = { 1334 .core_note_type = NT_PRSTATUS, 1335 .n = COMPAT_ELF_NGREG, 1336 .size = sizeof(compat_elf_greg_t), 1337 .align = sizeof(compat_elf_greg_t), 1338 .regset_get = compat_gpr_get, 1339 .set = compat_gpr_set 1340 }, 1341 [REGSET_COMPAT_VFP] = { 1342 .core_note_type = NT_ARM_VFP, 1343 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1344 .size = sizeof(compat_ulong_t), 1345 .align = sizeof(compat_ulong_t), 1346 .active = fpr_active, 1347 .regset_get = compat_vfp_get, 1348 .set = compat_vfp_set 1349 }, 1350 }; 1351 1352 static const struct user_regset_view user_aarch32_view = { 1353 .name = "aarch32", .e_machine = EM_ARM, 1354 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1355 }; 1356 1357 static const struct user_regset aarch32_ptrace_regsets[] = { 1358 [REGSET_GPR] = { 1359 .core_note_type = NT_PRSTATUS, 1360 .n = COMPAT_ELF_NGREG, 1361 .size = sizeof(compat_elf_greg_t), 1362 .align = sizeof(compat_elf_greg_t), 1363 .regset_get = compat_gpr_get, 1364 .set = compat_gpr_set 1365 }, 1366 [REGSET_FPR] = { 1367 .core_note_type = NT_ARM_VFP, 1368 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1369 .size = sizeof(compat_ulong_t), 1370 .align = sizeof(compat_ulong_t), 1371 .regset_get = compat_vfp_get, 1372 .set = compat_vfp_set 1373 }, 1374 [REGSET_TLS] = { 1375 .core_note_type = NT_ARM_TLS, 1376 .n = 1, 1377 .size = sizeof(compat_ulong_t), 1378 .align = sizeof(compat_ulong_t), 1379 .regset_get = compat_tls_get, 1380 .set = compat_tls_set, 1381 }, 1382 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1383 [REGSET_HW_BREAK] = { 1384 .core_note_type = NT_ARM_HW_BREAK, 1385 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1386 .size = sizeof(u32), 1387 .align = sizeof(u32), 1388 .regset_get = hw_break_get, 1389 .set = hw_break_set, 1390 }, 1391 [REGSET_HW_WATCH] = { 1392 .core_note_type = NT_ARM_HW_WATCH, 1393 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1394 .size = sizeof(u32), 1395 .align = sizeof(u32), 1396 .regset_get = hw_break_get, 1397 .set = hw_break_set, 1398 }, 1399 #endif 1400 [REGSET_SYSTEM_CALL] = { 1401 .core_note_type = NT_ARM_SYSTEM_CALL, 1402 .n = 1, 1403 .size = sizeof(int), 1404 .align = sizeof(int), 1405 .regset_get = system_call_get, 1406 .set = system_call_set, 1407 }, 1408 }; 1409 1410 static const struct user_regset_view user_aarch32_ptrace_view = { 1411 .name = "aarch32", .e_machine = EM_ARM, 1412 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1413 }; 1414 1415 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1416 compat_ulong_t __user *ret) 1417 { 1418 compat_ulong_t tmp; 1419 1420 if (off & 3) 1421 return -EIO; 1422 1423 if (off == COMPAT_PT_TEXT_ADDR) 1424 tmp = tsk->mm->start_code; 1425 else if (off == COMPAT_PT_DATA_ADDR) 1426 tmp = tsk->mm->start_data; 1427 else if (off == COMPAT_PT_TEXT_END_ADDR) 1428 tmp = tsk->mm->end_code; 1429 else if (off < sizeof(compat_elf_gregset_t)) 1430 tmp = compat_get_user_reg(tsk, off >> 2); 1431 else if (off >= COMPAT_USER_SZ) 1432 return -EIO; 1433 else 1434 tmp = 0; 1435 1436 return put_user(tmp, ret); 1437 } 1438 1439 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1440 compat_ulong_t val) 1441 { 1442 struct pt_regs newregs = *task_pt_regs(tsk); 1443 unsigned int idx = off / 4; 1444 1445 if (off & 3 || off >= COMPAT_USER_SZ) 1446 return -EIO; 1447 1448 if (off >= sizeof(compat_elf_gregset_t)) 1449 return 0; 1450 1451 switch (idx) { 1452 case 15: 1453 newregs.pc = val; 1454 break; 1455 case 16: 1456 newregs.pstate = compat_psr_to_pstate(val); 1457 break; 1458 case 17: 1459 newregs.orig_x0 = val; 1460 break; 1461 default: 1462 newregs.regs[idx] = val; 1463 } 1464 1465 if (!valid_user_regs(&newregs.user_regs, tsk)) 1466 return -EINVAL; 1467 1468 *task_pt_regs(tsk) = newregs; 1469 return 0; 1470 } 1471 1472 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1473 1474 /* 1475 * Convert a virtual register number into an index for a thread_info 1476 * breakpoint array. Breakpoints are identified using positive numbers 1477 * whilst watchpoints are negative. The registers are laid out as pairs 1478 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1479 * Register 0 is reserved for describing resource information. 1480 */ 1481 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1482 { 1483 return (abs(num) - 1) >> 1; 1484 } 1485 1486 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1487 { 1488 u8 num_brps, num_wrps, debug_arch, wp_len; 1489 u32 reg = 0; 1490 1491 num_brps = hw_breakpoint_slots(TYPE_INST); 1492 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1493 1494 debug_arch = debug_monitors_arch(); 1495 wp_len = 8; 1496 reg |= debug_arch; 1497 reg <<= 8; 1498 reg |= wp_len; 1499 reg <<= 8; 1500 reg |= num_wrps; 1501 reg <<= 8; 1502 reg |= num_brps; 1503 1504 *kdata = reg; 1505 return 0; 1506 } 1507 1508 static int compat_ptrace_hbp_get(unsigned int note_type, 1509 struct task_struct *tsk, 1510 compat_long_t num, 1511 u32 *kdata) 1512 { 1513 u64 addr = 0; 1514 u32 ctrl = 0; 1515 1516 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1517 1518 if (num & 1) { 1519 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1520 *kdata = (u32)addr; 1521 } else { 1522 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1523 *kdata = ctrl; 1524 } 1525 1526 return err; 1527 } 1528 1529 static int compat_ptrace_hbp_set(unsigned int note_type, 1530 struct task_struct *tsk, 1531 compat_long_t num, 1532 u32 *kdata) 1533 { 1534 u64 addr; 1535 u32 ctrl; 1536 1537 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1538 1539 if (num & 1) { 1540 addr = *kdata; 1541 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1542 } else { 1543 ctrl = *kdata; 1544 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1545 } 1546 1547 return err; 1548 } 1549 1550 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1551 compat_ulong_t __user *data) 1552 { 1553 int ret; 1554 u32 kdata; 1555 1556 /* Watchpoint */ 1557 if (num < 0) { 1558 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1559 /* Resource info */ 1560 } else if (num == 0) { 1561 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1562 /* Breakpoint */ 1563 } else { 1564 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1565 } 1566 1567 if (!ret) 1568 ret = put_user(kdata, data); 1569 1570 return ret; 1571 } 1572 1573 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1574 compat_ulong_t __user *data) 1575 { 1576 int ret; 1577 u32 kdata = 0; 1578 1579 if (num == 0) 1580 return 0; 1581 1582 ret = get_user(kdata, data); 1583 if (ret) 1584 return ret; 1585 1586 if (num < 0) 1587 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1588 else 1589 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1590 1591 return ret; 1592 } 1593 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1594 1595 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1596 compat_ulong_t caddr, compat_ulong_t cdata) 1597 { 1598 unsigned long addr = caddr; 1599 unsigned long data = cdata; 1600 void __user *datap = compat_ptr(data); 1601 int ret; 1602 1603 switch (request) { 1604 case PTRACE_PEEKUSR: 1605 ret = compat_ptrace_read_user(child, addr, datap); 1606 break; 1607 1608 case PTRACE_POKEUSR: 1609 ret = compat_ptrace_write_user(child, addr, data); 1610 break; 1611 1612 case COMPAT_PTRACE_GETREGS: 1613 ret = copy_regset_to_user(child, 1614 &user_aarch32_view, 1615 REGSET_COMPAT_GPR, 1616 0, sizeof(compat_elf_gregset_t), 1617 datap); 1618 break; 1619 1620 case COMPAT_PTRACE_SETREGS: 1621 ret = copy_regset_from_user(child, 1622 &user_aarch32_view, 1623 REGSET_COMPAT_GPR, 1624 0, sizeof(compat_elf_gregset_t), 1625 datap); 1626 break; 1627 1628 case COMPAT_PTRACE_GET_THREAD_AREA: 1629 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1630 (compat_ulong_t __user *)datap); 1631 break; 1632 1633 case COMPAT_PTRACE_SET_SYSCALL: 1634 task_pt_regs(child)->syscallno = data; 1635 ret = 0; 1636 break; 1637 1638 case COMPAT_PTRACE_GETVFPREGS: 1639 ret = copy_regset_to_user(child, 1640 &user_aarch32_view, 1641 REGSET_COMPAT_VFP, 1642 0, VFP_STATE_SIZE, 1643 datap); 1644 break; 1645 1646 case COMPAT_PTRACE_SETVFPREGS: 1647 ret = copy_regset_from_user(child, 1648 &user_aarch32_view, 1649 REGSET_COMPAT_VFP, 1650 0, VFP_STATE_SIZE, 1651 datap); 1652 break; 1653 1654 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1655 case COMPAT_PTRACE_GETHBPREGS: 1656 ret = compat_ptrace_gethbpregs(child, addr, datap); 1657 break; 1658 1659 case COMPAT_PTRACE_SETHBPREGS: 1660 ret = compat_ptrace_sethbpregs(child, addr, datap); 1661 break; 1662 #endif 1663 1664 default: 1665 ret = compat_ptrace_request(child, request, addr, 1666 data); 1667 break; 1668 } 1669 1670 return ret; 1671 } 1672 #endif /* CONFIG_COMPAT */ 1673 1674 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1675 { 1676 #ifdef CONFIG_COMPAT 1677 /* 1678 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1679 * user_aarch32_view compatible with arm32. Native ptrace requests on 1680 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1681 * access to the TLS register. 1682 */ 1683 if (is_compat_task()) 1684 return &user_aarch32_view; 1685 else if (is_compat_thread(task_thread_info(task))) 1686 return &user_aarch32_ptrace_view; 1687 #endif 1688 return &user_aarch64_view; 1689 } 1690 1691 long arch_ptrace(struct task_struct *child, long request, 1692 unsigned long addr, unsigned long data) 1693 { 1694 return ptrace_request(child, request, addr, data); 1695 } 1696 1697 enum ptrace_syscall_dir { 1698 PTRACE_SYSCALL_ENTER = 0, 1699 PTRACE_SYSCALL_EXIT, 1700 }; 1701 1702 static void tracehook_report_syscall(struct pt_regs *regs, 1703 enum ptrace_syscall_dir dir) 1704 { 1705 int regno; 1706 unsigned long saved_reg; 1707 1708 /* 1709 * We have some ABI weirdness here in the way that we handle syscall 1710 * exit stops because we indicate whether or not the stop has been 1711 * signalled from syscall entry or syscall exit by clobbering a general 1712 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 1713 * and restoring its old value after the stop. This means that: 1714 * 1715 * - Any writes by the tracer to this register during the stop are 1716 * ignored/discarded. 1717 * 1718 * - The actual value of the register is not available during the stop, 1719 * so the tracer cannot save it and restore it later. 1720 * 1721 * - Syscall stops behave differently to seccomp and pseudo-step traps 1722 * (the latter do not nobble any registers). 1723 */ 1724 regno = (is_compat_task() ? 12 : 7); 1725 saved_reg = regs->regs[regno]; 1726 regs->regs[regno] = dir; 1727 1728 if (dir == PTRACE_SYSCALL_ENTER) { 1729 if (tracehook_report_syscall_entry(regs)) 1730 forget_syscall(regs); 1731 regs->regs[regno] = saved_reg; 1732 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 1733 tracehook_report_syscall_exit(regs, 0); 1734 regs->regs[regno] = saved_reg; 1735 } else { 1736 regs->regs[regno] = saved_reg; 1737 1738 /* 1739 * Signal a pseudo-step exception since we are stepping but 1740 * tracer modifications to the registers may have rewound the 1741 * state machine. 1742 */ 1743 tracehook_report_syscall_exit(regs, 1); 1744 } 1745 } 1746 1747 int syscall_trace_enter(struct pt_regs *regs) 1748 { 1749 unsigned long flags = READ_ONCE(current_thread_info()->flags); 1750 1751 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 1752 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1753 if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU)) 1754 return NO_SYSCALL; 1755 } 1756 1757 /* Do the secure computing after ptrace; failures should be fast. */ 1758 if (secure_computing() == -1) 1759 return NO_SYSCALL; 1760 1761 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1762 trace_sys_enter(regs, regs->syscallno); 1763 1764 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1765 regs->regs[2], regs->regs[3]); 1766 1767 return regs->syscallno; 1768 } 1769 1770 void syscall_trace_exit(struct pt_regs *regs) 1771 { 1772 unsigned long flags = READ_ONCE(current_thread_info()->flags); 1773 1774 audit_syscall_exit(regs); 1775 1776 if (flags & _TIF_SYSCALL_TRACEPOINT) 1777 trace_sys_exit(regs, regs_return_value(regs)); 1778 1779 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 1780 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1781 1782 rseq_syscall(regs); 1783 } 1784 1785 /* 1786 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 1787 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 1788 * not described in ARM DDI 0487D.a. 1789 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 1790 * be allocated an EL0 meaning in future. 1791 * Userspace cannot use these until they have an architectural meaning. 1792 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1793 * We also reserve IL for the kernel; SS is handled dynamically. 1794 */ 1795 #define SPSR_EL1_AARCH64_RES0_BITS \ 1796 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1797 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 1798 #define SPSR_EL1_AARCH32_RES0_BITS \ 1799 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 1800 1801 static int valid_compat_regs(struct user_pt_regs *regs) 1802 { 1803 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1804 1805 if (!system_supports_mixed_endian_el0()) { 1806 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1807 regs->pstate |= PSR_AA32_E_BIT; 1808 else 1809 regs->pstate &= ~PSR_AA32_E_BIT; 1810 } 1811 1812 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1813 (regs->pstate & PSR_AA32_A_BIT) == 0 && 1814 (regs->pstate & PSR_AA32_I_BIT) == 0 && 1815 (regs->pstate & PSR_AA32_F_BIT) == 0) { 1816 return 1; 1817 } 1818 1819 /* 1820 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1821 * arch/arm. 1822 */ 1823 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 1824 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 1825 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 1826 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 1827 PSR_AA32_T_BIT; 1828 regs->pstate |= PSR_MODE32_BIT; 1829 1830 return 0; 1831 } 1832 1833 static int valid_native_regs(struct user_pt_regs *regs) 1834 { 1835 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1836 1837 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1838 (regs->pstate & PSR_D_BIT) == 0 && 1839 (regs->pstate & PSR_A_BIT) == 0 && 1840 (regs->pstate & PSR_I_BIT) == 0 && 1841 (regs->pstate & PSR_F_BIT) == 0) { 1842 return 1; 1843 } 1844 1845 /* Force PSR to a valid 64-bit EL0t */ 1846 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1847 1848 return 0; 1849 } 1850 1851 /* 1852 * Are the current registers suitable for user mode? (used to maintain 1853 * security in signal handlers) 1854 */ 1855 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1856 { 1857 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 1858 user_regs_reset_single_step(regs, task); 1859 1860 if (is_compat_thread(task_thread_info(task))) 1861 return valid_compat_regs(regs); 1862 else 1863 return valid_native_regs(regs); 1864 } 1865