1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/ptrace.c 4 * 5 * By Ross Biro 1/23/92 6 * edited by Linus Torvalds 7 * ARM modifications Copyright (C) 2000 Russell King 8 * Copyright (C) 2012 ARM Ltd. 9 */ 10 11 #include <linux/audit.h> 12 #include <linux/compat.h> 13 #include <linux/kernel.h> 14 #include <linux/sched/signal.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/nospec.h> 18 #include <linux/smp.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/seccomp.h> 22 #include <linux/security.h> 23 #include <linux/init.h> 24 #include <linux/signal.h> 25 #include <linux/string.h> 26 #include <linux/uaccess.h> 27 #include <linux/perf_event.h> 28 #include <linux/hw_breakpoint.h> 29 #include <linux/regset.h> 30 #include <linux/elf.h> 31 32 #include <asm/compat.h> 33 #include <asm/cpufeature.h> 34 #include <asm/debug-monitors.h> 35 #include <asm/fpsimd.h> 36 #include <asm/mte.h> 37 #include <asm/pointer_auth.h> 38 #include <asm/stacktrace.h> 39 #include <asm/syscall.h> 40 #include <asm/traps.h> 41 #include <asm/system_misc.h> 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/syscalls.h> 45 46 struct pt_regs_offset { 47 const char *name; 48 int offset; 49 }; 50 51 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 52 #define REG_OFFSET_END {.name = NULL, .offset = 0} 53 #define GPR_OFFSET_NAME(r) \ 54 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 55 56 static const struct pt_regs_offset regoffset_table[] = { 57 GPR_OFFSET_NAME(0), 58 GPR_OFFSET_NAME(1), 59 GPR_OFFSET_NAME(2), 60 GPR_OFFSET_NAME(3), 61 GPR_OFFSET_NAME(4), 62 GPR_OFFSET_NAME(5), 63 GPR_OFFSET_NAME(6), 64 GPR_OFFSET_NAME(7), 65 GPR_OFFSET_NAME(8), 66 GPR_OFFSET_NAME(9), 67 GPR_OFFSET_NAME(10), 68 GPR_OFFSET_NAME(11), 69 GPR_OFFSET_NAME(12), 70 GPR_OFFSET_NAME(13), 71 GPR_OFFSET_NAME(14), 72 GPR_OFFSET_NAME(15), 73 GPR_OFFSET_NAME(16), 74 GPR_OFFSET_NAME(17), 75 GPR_OFFSET_NAME(18), 76 GPR_OFFSET_NAME(19), 77 GPR_OFFSET_NAME(20), 78 GPR_OFFSET_NAME(21), 79 GPR_OFFSET_NAME(22), 80 GPR_OFFSET_NAME(23), 81 GPR_OFFSET_NAME(24), 82 GPR_OFFSET_NAME(25), 83 GPR_OFFSET_NAME(26), 84 GPR_OFFSET_NAME(27), 85 GPR_OFFSET_NAME(28), 86 GPR_OFFSET_NAME(29), 87 GPR_OFFSET_NAME(30), 88 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 89 REG_OFFSET_NAME(sp), 90 REG_OFFSET_NAME(pc), 91 REG_OFFSET_NAME(pstate), 92 REG_OFFSET_END, 93 }; 94 95 /** 96 * regs_query_register_offset() - query register offset from its name 97 * @name: the name of a register 98 * 99 * regs_query_register_offset() returns the offset of a register in struct 100 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 101 */ 102 int regs_query_register_offset(const char *name) 103 { 104 const struct pt_regs_offset *roff; 105 106 for (roff = regoffset_table; roff->name != NULL; roff++) 107 if (!strcmp(roff->name, name)) 108 return roff->offset; 109 return -EINVAL; 110 } 111 112 /** 113 * regs_within_kernel_stack() - check the address in the stack 114 * @regs: pt_regs which contains kernel stack pointer. 115 * @addr: address which is checked. 116 * 117 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 118 * If @addr is within the kernel stack, it returns true. If not, returns false. 119 */ 120 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 121 { 122 return ((addr & ~(THREAD_SIZE - 1)) == 123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 124 on_irq_stack(addr, sizeof(unsigned long), NULL); 125 } 126 127 /** 128 * regs_get_kernel_stack_nth() - get Nth entry of the stack 129 * @regs: pt_regs which contains kernel stack pointer. 130 * @n: stack entry number. 131 * 132 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 133 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 134 * this returns 0. 135 */ 136 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 137 { 138 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 139 140 addr += n; 141 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 142 return *addr; 143 else 144 return 0; 145 } 146 147 /* 148 * TODO: does not yet catch signals sent when the child dies. 149 * in exit.c or in signal.c. 150 */ 151 152 /* 153 * Called by kernel/ptrace.c when detaching.. 154 */ 155 void ptrace_disable(struct task_struct *child) 156 { 157 /* 158 * This would be better off in core code, but PTRACE_DETACH has 159 * grown its fair share of arch-specific worts and changing it 160 * is likely to cause regressions on obscure architectures. 161 */ 162 user_disable_single_step(child); 163 } 164 165 #ifdef CONFIG_HAVE_HW_BREAKPOINT 166 /* 167 * Handle hitting a HW-breakpoint. 168 */ 169 static void ptrace_hbptriggered(struct perf_event *bp, 170 struct perf_sample_data *data, 171 struct pt_regs *regs) 172 { 173 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 174 const char *desc = "Hardware breakpoint trap (ptrace)"; 175 176 #ifdef CONFIG_COMPAT 177 if (is_compat_task()) { 178 int si_errno = 0; 179 int i; 180 181 for (i = 0; i < ARM_MAX_BRP; ++i) { 182 if (current->thread.debug.hbp_break[i] == bp) { 183 si_errno = (i << 1) + 1; 184 break; 185 } 186 } 187 188 for (i = 0; i < ARM_MAX_WRP; ++i) { 189 if (current->thread.debug.hbp_watch[i] == bp) { 190 si_errno = -((i << 1) + 1); 191 break; 192 } 193 } 194 arm64_force_sig_ptrace_errno_trap(si_errno, bkpt->trigger, 195 desc); 196 return; 197 } 198 #endif 199 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, bkpt->trigger, desc); 200 } 201 202 /* 203 * Unregister breakpoints from this task and reset the pointers in 204 * the thread_struct. 205 */ 206 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 207 { 208 int i; 209 struct thread_struct *t = &tsk->thread; 210 211 for (i = 0; i < ARM_MAX_BRP; i++) { 212 if (t->debug.hbp_break[i]) { 213 unregister_hw_breakpoint(t->debug.hbp_break[i]); 214 t->debug.hbp_break[i] = NULL; 215 } 216 } 217 218 for (i = 0; i < ARM_MAX_WRP; i++) { 219 if (t->debug.hbp_watch[i]) { 220 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 221 t->debug.hbp_watch[i] = NULL; 222 } 223 } 224 } 225 226 void ptrace_hw_copy_thread(struct task_struct *tsk) 227 { 228 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 229 } 230 231 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 232 struct task_struct *tsk, 233 unsigned long idx) 234 { 235 struct perf_event *bp = ERR_PTR(-EINVAL); 236 237 switch (note_type) { 238 case NT_ARM_HW_BREAK: 239 if (idx >= ARM_MAX_BRP) 240 goto out; 241 idx = array_index_nospec(idx, ARM_MAX_BRP); 242 bp = tsk->thread.debug.hbp_break[idx]; 243 break; 244 case NT_ARM_HW_WATCH: 245 if (idx >= ARM_MAX_WRP) 246 goto out; 247 idx = array_index_nospec(idx, ARM_MAX_WRP); 248 bp = tsk->thread.debug.hbp_watch[idx]; 249 break; 250 } 251 252 out: 253 return bp; 254 } 255 256 static int ptrace_hbp_set_event(unsigned int note_type, 257 struct task_struct *tsk, 258 unsigned long idx, 259 struct perf_event *bp) 260 { 261 int err = -EINVAL; 262 263 switch (note_type) { 264 case NT_ARM_HW_BREAK: 265 if (idx >= ARM_MAX_BRP) 266 goto out; 267 idx = array_index_nospec(idx, ARM_MAX_BRP); 268 tsk->thread.debug.hbp_break[idx] = bp; 269 err = 0; 270 break; 271 case NT_ARM_HW_WATCH: 272 if (idx >= ARM_MAX_WRP) 273 goto out; 274 idx = array_index_nospec(idx, ARM_MAX_WRP); 275 tsk->thread.debug.hbp_watch[idx] = bp; 276 err = 0; 277 break; 278 } 279 280 out: 281 return err; 282 } 283 284 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 285 struct task_struct *tsk, 286 unsigned long idx) 287 { 288 struct perf_event *bp; 289 struct perf_event_attr attr; 290 int err, type; 291 292 switch (note_type) { 293 case NT_ARM_HW_BREAK: 294 type = HW_BREAKPOINT_X; 295 break; 296 case NT_ARM_HW_WATCH: 297 type = HW_BREAKPOINT_RW; 298 break; 299 default: 300 return ERR_PTR(-EINVAL); 301 } 302 303 ptrace_breakpoint_init(&attr); 304 305 /* 306 * Initialise fields to sane defaults 307 * (i.e. values that will pass validation). 308 */ 309 attr.bp_addr = 0; 310 attr.bp_len = HW_BREAKPOINT_LEN_4; 311 attr.bp_type = type; 312 attr.disabled = 1; 313 314 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 315 if (IS_ERR(bp)) 316 return bp; 317 318 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 319 if (err) 320 return ERR_PTR(err); 321 322 return bp; 323 } 324 325 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 326 struct arch_hw_breakpoint_ctrl ctrl, 327 struct perf_event_attr *attr) 328 { 329 int err, len, type, offset, disabled = !ctrl.enabled; 330 331 attr->disabled = disabled; 332 if (disabled) 333 return 0; 334 335 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 336 if (err) 337 return err; 338 339 switch (note_type) { 340 case NT_ARM_HW_BREAK: 341 if ((type & HW_BREAKPOINT_X) != type) 342 return -EINVAL; 343 break; 344 case NT_ARM_HW_WATCH: 345 if ((type & HW_BREAKPOINT_RW) != type) 346 return -EINVAL; 347 break; 348 default: 349 return -EINVAL; 350 } 351 352 attr->bp_len = len; 353 attr->bp_type = type; 354 attr->bp_addr += offset; 355 356 return 0; 357 } 358 359 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 360 { 361 u8 num; 362 u32 reg = 0; 363 364 switch (note_type) { 365 case NT_ARM_HW_BREAK: 366 num = hw_breakpoint_slots(TYPE_INST); 367 break; 368 case NT_ARM_HW_WATCH: 369 num = hw_breakpoint_slots(TYPE_DATA); 370 break; 371 default: 372 return -EINVAL; 373 } 374 375 reg |= debug_monitors_arch(); 376 reg <<= 8; 377 reg |= num; 378 379 *info = reg; 380 return 0; 381 } 382 383 static int ptrace_hbp_get_ctrl(unsigned int note_type, 384 struct task_struct *tsk, 385 unsigned long idx, 386 u32 *ctrl) 387 { 388 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 389 390 if (IS_ERR(bp)) 391 return PTR_ERR(bp); 392 393 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 394 return 0; 395 } 396 397 static int ptrace_hbp_get_addr(unsigned int note_type, 398 struct task_struct *tsk, 399 unsigned long idx, 400 u64 *addr) 401 { 402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 403 404 if (IS_ERR(bp)) 405 return PTR_ERR(bp); 406 407 *addr = bp ? counter_arch_bp(bp)->address : 0; 408 return 0; 409 } 410 411 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 412 struct task_struct *tsk, 413 unsigned long idx) 414 { 415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 416 417 if (!bp) 418 bp = ptrace_hbp_create(note_type, tsk, idx); 419 420 return bp; 421 } 422 423 static int ptrace_hbp_set_ctrl(unsigned int note_type, 424 struct task_struct *tsk, 425 unsigned long idx, 426 u32 uctrl) 427 { 428 int err; 429 struct perf_event *bp; 430 struct perf_event_attr attr; 431 struct arch_hw_breakpoint_ctrl ctrl; 432 433 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 434 if (IS_ERR(bp)) { 435 err = PTR_ERR(bp); 436 return err; 437 } 438 439 attr = bp->attr; 440 decode_ctrl_reg(uctrl, &ctrl); 441 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 442 if (err) 443 return err; 444 445 return modify_user_hw_breakpoint(bp, &attr); 446 } 447 448 static int ptrace_hbp_set_addr(unsigned int note_type, 449 struct task_struct *tsk, 450 unsigned long idx, 451 u64 addr) 452 { 453 int err; 454 struct perf_event *bp; 455 struct perf_event_attr attr; 456 457 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 458 if (IS_ERR(bp)) { 459 err = PTR_ERR(bp); 460 return err; 461 } 462 463 attr = bp->attr; 464 attr.bp_addr = addr; 465 err = modify_user_hw_breakpoint(bp, &attr); 466 return err; 467 } 468 469 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 470 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 471 #define PTRACE_HBP_PAD_SZ sizeof(u32) 472 473 static int hw_break_get(struct task_struct *target, 474 const struct user_regset *regset, 475 struct membuf to) 476 { 477 unsigned int note_type = regset->core_note_type; 478 int ret, idx = 0; 479 u32 info, ctrl; 480 u64 addr; 481 482 /* Resource info */ 483 ret = ptrace_hbp_get_resource_info(note_type, &info); 484 if (ret) 485 return ret; 486 487 membuf_write(&to, &info, sizeof(info)); 488 membuf_zero(&to, sizeof(u32)); 489 /* (address, ctrl) registers */ 490 while (to.left) { 491 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 492 if (ret) 493 return ret; 494 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 495 if (ret) 496 return ret; 497 membuf_store(&to, addr); 498 membuf_store(&to, ctrl); 499 membuf_zero(&to, sizeof(u32)); 500 idx++; 501 } 502 return 0; 503 } 504 505 static int hw_break_set(struct task_struct *target, 506 const struct user_regset *regset, 507 unsigned int pos, unsigned int count, 508 const void *kbuf, const void __user *ubuf) 509 { 510 unsigned int note_type = regset->core_note_type; 511 int ret, idx = 0, offset, limit; 512 u32 ctrl; 513 u64 addr; 514 515 /* Resource info and pad */ 516 offset = offsetof(struct user_hwdebug_state, dbg_regs); 517 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 518 if (ret) 519 return ret; 520 521 /* (address, ctrl) registers */ 522 limit = regset->n * regset->size; 523 while (count && offset < limit) { 524 if (count < PTRACE_HBP_ADDR_SZ) 525 return -EINVAL; 526 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 527 offset, offset + PTRACE_HBP_ADDR_SZ); 528 if (ret) 529 return ret; 530 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 531 if (ret) 532 return ret; 533 offset += PTRACE_HBP_ADDR_SZ; 534 535 if (!count) 536 break; 537 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 538 offset, offset + PTRACE_HBP_CTRL_SZ); 539 if (ret) 540 return ret; 541 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 542 if (ret) 543 return ret; 544 offset += PTRACE_HBP_CTRL_SZ; 545 546 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 547 offset, 548 offset + PTRACE_HBP_PAD_SZ); 549 if (ret) 550 return ret; 551 offset += PTRACE_HBP_PAD_SZ; 552 idx++; 553 } 554 555 return 0; 556 } 557 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 558 559 static int gpr_get(struct task_struct *target, 560 const struct user_regset *regset, 561 struct membuf to) 562 { 563 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 564 return membuf_write(&to, uregs, sizeof(*uregs)); 565 } 566 567 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 568 unsigned int pos, unsigned int count, 569 const void *kbuf, const void __user *ubuf) 570 { 571 int ret; 572 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 573 574 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 575 if (ret) 576 return ret; 577 578 if (!valid_user_regs(&newregs, target)) 579 return -EINVAL; 580 581 task_pt_regs(target)->user_regs = newregs; 582 return 0; 583 } 584 585 static int fpr_active(struct task_struct *target, const struct user_regset *regset) 586 { 587 if (!system_supports_fpsimd()) 588 return -ENODEV; 589 return regset->n; 590 } 591 592 /* 593 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 594 */ 595 static int __fpr_get(struct task_struct *target, 596 const struct user_regset *regset, 597 struct membuf to) 598 { 599 struct user_fpsimd_state *uregs; 600 601 sve_sync_to_fpsimd(target); 602 603 uregs = &target->thread.uw.fpsimd_state; 604 605 return membuf_write(&to, uregs, sizeof(*uregs)); 606 } 607 608 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 609 struct membuf to) 610 { 611 if (!system_supports_fpsimd()) 612 return -EINVAL; 613 614 if (target == current) 615 fpsimd_preserve_current_state(); 616 617 return __fpr_get(target, regset, to); 618 } 619 620 static int __fpr_set(struct task_struct *target, 621 const struct user_regset *regset, 622 unsigned int pos, unsigned int count, 623 const void *kbuf, const void __user *ubuf, 624 unsigned int start_pos) 625 { 626 int ret; 627 struct user_fpsimd_state newstate; 628 629 /* 630 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 631 * short copyin can't resurrect stale data. 632 */ 633 sve_sync_to_fpsimd(target); 634 635 newstate = target->thread.uw.fpsimd_state; 636 637 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 638 start_pos, start_pos + sizeof(newstate)); 639 if (ret) 640 return ret; 641 642 target->thread.uw.fpsimd_state = newstate; 643 644 return ret; 645 } 646 647 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 648 unsigned int pos, unsigned int count, 649 const void *kbuf, const void __user *ubuf) 650 { 651 int ret; 652 653 if (!system_supports_fpsimd()) 654 return -EINVAL; 655 656 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 657 if (ret) 658 return ret; 659 660 sve_sync_from_fpsimd_zeropad(target); 661 fpsimd_flush_task_state(target); 662 663 return ret; 664 } 665 666 static int tls_get(struct task_struct *target, const struct user_regset *regset, 667 struct membuf to) 668 { 669 if (target == current) 670 tls_preserve_current_state(); 671 672 return membuf_store(&to, target->thread.uw.tp_value); 673 } 674 675 static int tls_set(struct task_struct *target, const struct user_regset *regset, 676 unsigned int pos, unsigned int count, 677 const void *kbuf, const void __user *ubuf) 678 { 679 int ret; 680 unsigned long tls = target->thread.uw.tp_value; 681 682 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 683 if (ret) 684 return ret; 685 686 target->thread.uw.tp_value = tls; 687 return ret; 688 } 689 690 static int system_call_get(struct task_struct *target, 691 const struct user_regset *regset, 692 struct membuf to) 693 { 694 return membuf_store(&to, task_pt_regs(target)->syscallno); 695 } 696 697 static int system_call_set(struct task_struct *target, 698 const struct user_regset *regset, 699 unsigned int pos, unsigned int count, 700 const void *kbuf, const void __user *ubuf) 701 { 702 int syscallno = task_pt_regs(target)->syscallno; 703 int ret; 704 705 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 706 if (ret) 707 return ret; 708 709 task_pt_regs(target)->syscallno = syscallno; 710 return ret; 711 } 712 713 #ifdef CONFIG_ARM64_SVE 714 715 static void sve_init_header_from_task(struct user_sve_header *header, 716 struct task_struct *target) 717 { 718 unsigned int vq; 719 720 memset(header, 0, sizeof(*header)); 721 722 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 723 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 724 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 725 header->flags |= SVE_PT_VL_INHERIT; 726 727 header->vl = task_get_sve_vl(target); 728 vq = sve_vq_from_vl(header->vl); 729 730 header->max_vl = sve_max_vl(); 731 header->size = SVE_PT_SIZE(vq, header->flags); 732 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 733 SVE_PT_REGS_SVE); 734 } 735 736 static unsigned int sve_size_from_header(struct user_sve_header const *header) 737 { 738 return ALIGN(header->size, SVE_VQ_BYTES); 739 } 740 741 static int sve_get(struct task_struct *target, 742 const struct user_regset *regset, 743 struct membuf to) 744 { 745 struct user_sve_header header; 746 unsigned int vq; 747 unsigned long start, end; 748 749 if (!system_supports_sve()) 750 return -EINVAL; 751 752 /* Header */ 753 sve_init_header_from_task(&header, target); 754 vq = sve_vq_from_vl(header.vl); 755 756 membuf_write(&to, &header, sizeof(header)); 757 758 if (target == current) 759 fpsimd_preserve_current_state(); 760 761 /* Registers: FPSIMD-only case */ 762 763 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 764 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 765 return __fpr_get(target, regset, to); 766 767 /* Otherwise: full SVE case */ 768 769 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 770 start = SVE_PT_SVE_OFFSET; 771 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 772 membuf_write(&to, target->thread.sve_state, end - start); 773 774 start = end; 775 end = SVE_PT_SVE_FPSR_OFFSET(vq); 776 membuf_zero(&to, end - start); 777 778 /* 779 * Copy fpsr, and fpcr which must follow contiguously in 780 * struct fpsimd_state: 781 */ 782 start = end; 783 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 784 membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start); 785 786 start = end; 787 end = sve_size_from_header(&header); 788 return membuf_zero(&to, end - start); 789 } 790 791 static int sve_set(struct task_struct *target, 792 const struct user_regset *regset, 793 unsigned int pos, unsigned int count, 794 const void *kbuf, const void __user *ubuf) 795 { 796 int ret; 797 struct user_sve_header header; 798 unsigned int vq; 799 unsigned long start, end; 800 801 if (!system_supports_sve()) 802 return -EINVAL; 803 804 /* Header */ 805 if (count < sizeof(header)) 806 return -EINVAL; 807 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 808 0, sizeof(header)); 809 if (ret) 810 goto out; 811 812 /* 813 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by 814 * vec_set_vector_length(), which will also validate them for us: 815 */ 816 ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl, 817 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 818 if (ret) 819 goto out; 820 821 /* Actual VL set may be less than the user asked for: */ 822 vq = sve_vq_from_vl(task_get_sve_vl(target)); 823 824 /* Registers: FPSIMD-only case */ 825 826 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 827 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 828 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 829 SVE_PT_FPSIMD_OFFSET); 830 clear_tsk_thread_flag(target, TIF_SVE); 831 goto out; 832 } 833 834 /* Otherwise: full SVE case */ 835 836 /* 837 * If setting a different VL from the requested VL and there is 838 * register data, the data layout will be wrong: don't even 839 * try to set the registers in this case. 840 */ 841 if (count && vq != sve_vq_from_vl(header.vl)) { 842 ret = -EIO; 843 goto out; 844 } 845 846 sve_alloc(target); 847 if (!target->thread.sve_state) { 848 ret = -ENOMEM; 849 clear_tsk_thread_flag(target, TIF_SVE); 850 goto out; 851 } 852 853 /* 854 * Ensure target->thread.sve_state is up to date with target's 855 * FPSIMD regs, so that a short copyin leaves trailing registers 856 * unmodified. 857 */ 858 fpsimd_sync_to_sve(target); 859 set_tsk_thread_flag(target, TIF_SVE); 860 861 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 862 start = SVE_PT_SVE_OFFSET; 863 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 864 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 865 target->thread.sve_state, 866 start, end); 867 if (ret) 868 goto out; 869 870 start = end; 871 end = SVE_PT_SVE_FPSR_OFFSET(vq); 872 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 873 start, end); 874 if (ret) 875 goto out; 876 877 /* 878 * Copy fpsr, and fpcr which must follow contiguously in 879 * struct fpsimd_state: 880 */ 881 start = end; 882 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 883 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 884 &target->thread.uw.fpsimd_state.fpsr, 885 start, end); 886 887 out: 888 fpsimd_flush_task_state(target); 889 return ret; 890 } 891 892 #endif /* CONFIG_ARM64_SVE */ 893 894 #ifdef CONFIG_ARM64_PTR_AUTH 895 static int pac_mask_get(struct task_struct *target, 896 const struct user_regset *regset, 897 struct membuf to) 898 { 899 /* 900 * The PAC bits can differ across data and instruction pointers 901 * depending on TCR_EL1.TBID*, which we may make use of in future, so 902 * we expose separate masks. 903 */ 904 unsigned long mask = ptrauth_user_pac_mask(); 905 struct user_pac_mask uregs = { 906 .data_mask = mask, 907 .insn_mask = mask, 908 }; 909 910 if (!system_supports_address_auth()) 911 return -EINVAL; 912 913 return membuf_write(&to, &uregs, sizeof(uregs)); 914 } 915 916 static int pac_enabled_keys_get(struct task_struct *target, 917 const struct user_regset *regset, 918 struct membuf to) 919 { 920 long enabled_keys = ptrauth_get_enabled_keys(target); 921 922 if (IS_ERR_VALUE(enabled_keys)) 923 return enabled_keys; 924 925 return membuf_write(&to, &enabled_keys, sizeof(enabled_keys)); 926 } 927 928 static int pac_enabled_keys_set(struct task_struct *target, 929 const struct user_regset *regset, 930 unsigned int pos, unsigned int count, 931 const void *kbuf, const void __user *ubuf) 932 { 933 int ret; 934 long enabled_keys = ptrauth_get_enabled_keys(target); 935 936 if (IS_ERR_VALUE(enabled_keys)) 937 return enabled_keys; 938 939 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0, 940 sizeof(long)); 941 if (ret) 942 return ret; 943 944 return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK, 945 enabled_keys); 946 } 947 948 #ifdef CONFIG_CHECKPOINT_RESTORE 949 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 950 { 951 return (__uint128_t)key->hi << 64 | key->lo; 952 } 953 954 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 955 { 956 struct ptrauth_key key = { 957 .lo = (unsigned long)ukey, 958 .hi = (unsigned long)(ukey >> 64), 959 }; 960 961 return key; 962 } 963 964 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 965 const struct ptrauth_keys_user *keys) 966 { 967 ukeys->apiakey = pac_key_to_user(&keys->apia); 968 ukeys->apibkey = pac_key_to_user(&keys->apib); 969 ukeys->apdakey = pac_key_to_user(&keys->apda); 970 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 971 } 972 973 static void pac_address_keys_from_user(struct ptrauth_keys_user *keys, 974 const struct user_pac_address_keys *ukeys) 975 { 976 keys->apia = pac_key_from_user(ukeys->apiakey); 977 keys->apib = pac_key_from_user(ukeys->apibkey); 978 keys->apda = pac_key_from_user(ukeys->apdakey); 979 keys->apdb = pac_key_from_user(ukeys->apdbkey); 980 } 981 982 static int pac_address_keys_get(struct task_struct *target, 983 const struct user_regset *regset, 984 struct membuf to) 985 { 986 struct ptrauth_keys_user *keys = &target->thread.keys_user; 987 struct user_pac_address_keys user_keys; 988 989 if (!system_supports_address_auth()) 990 return -EINVAL; 991 992 pac_address_keys_to_user(&user_keys, keys); 993 994 return membuf_write(&to, &user_keys, sizeof(user_keys)); 995 } 996 997 static int pac_address_keys_set(struct task_struct *target, 998 const struct user_regset *regset, 999 unsigned int pos, unsigned int count, 1000 const void *kbuf, const void __user *ubuf) 1001 { 1002 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1003 struct user_pac_address_keys user_keys; 1004 int ret; 1005 1006 if (!system_supports_address_auth()) 1007 return -EINVAL; 1008 1009 pac_address_keys_to_user(&user_keys, keys); 1010 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1011 &user_keys, 0, -1); 1012 if (ret) 1013 return ret; 1014 pac_address_keys_from_user(keys, &user_keys); 1015 1016 return 0; 1017 } 1018 1019 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1020 const struct ptrauth_keys_user *keys) 1021 { 1022 ukeys->apgakey = pac_key_to_user(&keys->apga); 1023 } 1024 1025 static void pac_generic_keys_from_user(struct ptrauth_keys_user *keys, 1026 const struct user_pac_generic_keys *ukeys) 1027 { 1028 keys->apga = pac_key_from_user(ukeys->apgakey); 1029 } 1030 1031 static int pac_generic_keys_get(struct task_struct *target, 1032 const struct user_regset *regset, 1033 struct membuf to) 1034 { 1035 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1036 struct user_pac_generic_keys user_keys; 1037 1038 if (!system_supports_generic_auth()) 1039 return -EINVAL; 1040 1041 pac_generic_keys_to_user(&user_keys, keys); 1042 1043 return membuf_write(&to, &user_keys, sizeof(user_keys)); 1044 } 1045 1046 static int pac_generic_keys_set(struct task_struct *target, 1047 const struct user_regset *regset, 1048 unsigned int pos, unsigned int count, 1049 const void *kbuf, const void __user *ubuf) 1050 { 1051 struct ptrauth_keys_user *keys = &target->thread.keys_user; 1052 struct user_pac_generic_keys user_keys; 1053 int ret; 1054 1055 if (!system_supports_generic_auth()) 1056 return -EINVAL; 1057 1058 pac_generic_keys_to_user(&user_keys, keys); 1059 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1060 &user_keys, 0, -1); 1061 if (ret) 1062 return ret; 1063 pac_generic_keys_from_user(keys, &user_keys); 1064 1065 return 0; 1066 } 1067 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1068 #endif /* CONFIG_ARM64_PTR_AUTH */ 1069 1070 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1071 static int tagged_addr_ctrl_get(struct task_struct *target, 1072 const struct user_regset *regset, 1073 struct membuf to) 1074 { 1075 long ctrl = get_tagged_addr_ctrl(target); 1076 1077 if (IS_ERR_VALUE(ctrl)) 1078 return ctrl; 1079 1080 return membuf_write(&to, &ctrl, sizeof(ctrl)); 1081 } 1082 1083 static int tagged_addr_ctrl_set(struct task_struct *target, const struct 1084 user_regset *regset, unsigned int pos, 1085 unsigned int count, const void *kbuf, const 1086 void __user *ubuf) 1087 { 1088 int ret; 1089 long ctrl; 1090 1091 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); 1092 if (ret) 1093 return ret; 1094 1095 return set_tagged_addr_ctrl(target, ctrl); 1096 } 1097 #endif 1098 1099 enum aarch64_regset { 1100 REGSET_GPR, 1101 REGSET_FPR, 1102 REGSET_TLS, 1103 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1104 REGSET_HW_BREAK, 1105 REGSET_HW_WATCH, 1106 #endif 1107 REGSET_SYSTEM_CALL, 1108 #ifdef CONFIG_ARM64_SVE 1109 REGSET_SVE, 1110 #endif 1111 #ifdef CONFIG_ARM64_PTR_AUTH 1112 REGSET_PAC_MASK, 1113 REGSET_PAC_ENABLED_KEYS, 1114 #ifdef CONFIG_CHECKPOINT_RESTORE 1115 REGSET_PACA_KEYS, 1116 REGSET_PACG_KEYS, 1117 #endif 1118 #endif 1119 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1120 REGSET_TAGGED_ADDR_CTRL, 1121 #endif 1122 }; 1123 1124 static const struct user_regset aarch64_regsets[] = { 1125 [REGSET_GPR] = { 1126 .core_note_type = NT_PRSTATUS, 1127 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1128 .size = sizeof(u64), 1129 .align = sizeof(u64), 1130 .regset_get = gpr_get, 1131 .set = gpr_set 1132 }, 1133 [REGSET_FPR] = { 1134 .core_note_type = NT_PRFPREG, 1135 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1136 /* 1137 * We pretend we have 32-bit registers because the fpsr and 1138 * fpcr are 32-bits wide. 1139 */ 1140 .size = sizeof(u32), 1141 .align = sizeof(u32), 1142 .active = fpr_active, 1143 .regset_get = fpr_get, 1144 .set = fpr_set 1145 }, 1146 [REGSET_TLS] = { 1147 .core_note_type = NT_ARM_TLS, 1148 .n = 1, 1149 .size = sizeof(void *), 1150 .align = sizeof(void *), 1151 .regset_get = tls_get, 1152 .set = tls_set, 1153 }, 1154 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1155 [REGSET_HW_BREAK] = { 1156 .core_note_type = NT_ARM_HW_BREAK, 1157 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1158 .size = sizeof(u32), 1159 .align = sizeof(u32), 1160 .regset_get = hw_break_get, 1161 .set = hw_break_set, 1162 }, 1163 [REGSET_HW_WATCH] = { 1164 .core_note_type = NT_ARM_HW_WATCH, 1165 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1166 .size = sizeof(u32), 1167 .align = sizeof(u32), 1168 .regset_get = hw_break_get, 1169 .set = hw_break_set, 1170 }, 1171 #endif 1172 [REGSET_SYSTEM_CALL] = { 1173 .core_note_type = NT_ARM_SYSTEM_CALL, 1174 .n = 1, 1175 .size = sizeof(int), 1176 .align = sizeof(int), 1177 .regset_get = system_call_get, 1178 .set = system_call_set, 1179 }, 1180 #ifdef CONFIG_ARM64_SVE 1181 [REGSET_SVE] = { /* Scalable Vector Extension */ 1182 .core_note_type = NT_ARM_SVE, 1183 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1184 SVE_VQ_BYTES), 1185 .size = SVE_VQ_BYTES, 1186 .align = SVE_VQ_BYTES, 1187 .regset_get = sve_get, 1188 .set = sve_set, 1189 }, 1190 #endif 1191 #ifdef CONFIG_ARM64_PTR_AUTH 1192 [REGSET_PAC_MASK] = { 1193 .core_note_type = NT_ARM_PAC_MASK, 1194 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1195 .size = sizeof(u64), 1196 .align = sizeof(u64), 1197 .regset_get = pac_mask_get, 1198 /* this cannot be set dynamically */ 1199 }, 1200 [REGSET_PAC_ENABLED_KEYS] = { 1201 .core_note_type = NT_ARM_PAC_ENABLED_KEYS, 1202 .n = 1, 1203 .size = sizeof(long), 1204 .align = sizeof(long), 1205 .regset_get = pac_enabled_keys_get, 1206 .set = pac_enabled_keys_set, 1207 }, 1208 #ifdef CONFIG_CHECKPOINT_RESTORE 1209 [REGSET_PACA_KEYS] = { 1210 .core_note_type = NT_ARM_PACA_KEYS, 1211 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1212 .size = sizeof(__uint128_t), 1213 .align = sizeof(__uint128_t), 1214 .regset_get = pac_address_keys_get, 1215 .set = pac_address_keys_set, 1216 }, 1217 [REGSET_PACG_KEYS] = { 1218 .core_note_type = NT_ARM_PACG_KEYS, 1219 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1220 .size = sizeof(__uint128_t), 1221 .align = sizeof(__uint128_t), 1222 .regset_get = pac_generic_keys_get, 1223 .set = pac_generic_keys_set, 1224 }, 1225 #endif 1226 #endif 1227 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI 1228 [REGSET_TAGGED_ADDR_CTRL] = { 1229 .core_note_type = NT_ARM_TAGGED_ADDR_CTRL, 1230 .n = 1, 1231 .size = sizeof(long), 1232 .align = sizeof(long), 1233 .regset_get = tagged_addr_ctrl_get, 1234 .set = tagged_addr_ctrl_set, 1235 }, 1236 #endif 1237 }; 1238 1239 static const struct user_regset_view user_aarch64_view = { 1240 .name = "aarch64", .e_machine = EM_AARCH64, 1241 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1242 }; 1243 1244 #ifdef CONFIG_COMPAT 1245 enum compat_regset { 1246 REGSET_COMPAT_GPR, 1247 REGSET_COMPAT_VFP, 1248 }; 1249 1250 static inline compat_ulong_t compat_get_user_reg(struct task_struct *task, int idx) 1251 { 1252 struct pt_regs *regs = task_pt_regs(task); 1253 1254 switch (idx) { 1255 case 15: 1256 return regs->pc; 1257 case 16: 1258 return pstate_to_compat_psr(regs->pstate); 1259 case 17: 1260 return regs->orig_x0; 1261 default: 1262 return regs->regs[idx]; 1263 } 1264 } 1265 1266 static int compat_gpr_get(struct task_struct *target, 1267 const struct user_regset *regset, 1268 struct membuf to) 1269 { 1270 int i = 0; 1271 1272 while (to.left) 1273 membuf_store(&to, compat_get_user_reg(target, i++)); 1274 return 0; 1275 } 1276 1277 static int compat_gpr_set(struct task_struct *target, 1278 const struct user_regset *regset, 1279 unsigned int pos, unsigned int count, 1280 const void *kbuf, const void __user *ubuf) 1281 { 1282 struct pt_regs newregs; 1283 int ret = 0; 1284 unsigned int i, start, num_regs; 1285 1286 /* Calculate the number of AArch32 registers contained in count */ 1287 num_regs = count / regset->size; 1288 1289 /* Convert pos into an register number */ 1290 start = pos / regset->size; 1291 1292 if (start + num_regs > regset->n) 1293 return -EIO; 1294 1295 newregs = *task_pt_regs(target); 1296 1297 for (i = 0; i < num_regs; ++i) { 1298 unsigned int idx = start + i; 1299 compat_ulong_t reg; 1300 1301 if (kbuf) { 1302 memcpy(®, kbuf, sizeof(reg)); 1303 kbuf += sizeof(reg); 1304 } else { 1305 ret = copy_from_user(®, ubuf, sizeof(reg)); 1306 if (ret) { 1307 ret = -EFAULT; 1308 break; 1309 } 1310 1311 ubuf += sizeof(reg); 1312 } 1313 1314 switch (idx) { 1315 case 15: 1316 newregs.pc = reg; 1317 break; 1318 case 16: 1319 reg = compat_psr_to_pstate(reg); 1320 newregs.pstate = reg; 1321 break; 1322 case 17: 1323 newregs.orig_x0 = reg; 1324 break; 1325 default: 1326 newregs.regs[idx] = reg; 1327 } 1328 1329 } 1330 1331 if (valid_user_regs(&newregs.user_regs, target)) 1332 *task_pt_regs(target) = newregs; 1333 else 1334 ret = -EINVAL; 1335 1336 return ret; 1337 } 1338 1339 static int compat_vfp_get(struct task_struct *target, 1340 const struct user_regset *regset, 1341 struct membuf to) 1342 { 1343 struct user_fpsimd_state *uregs; 1344 compat_ulong_t fpscr; 1345 1346 if (!system_supports_fpsimd()) 1347 return -EINVAL; 1348 1349 uregs = &target->thread.uw.fpsimd_state; 1350 1351 if (target == current) 1352 fpsimd_preserve_current_state(); 1353 1354 /* 1355 * The VFP registers are packed into the fpsimd_state, so they all sit 1356 * nicely together for us. We just need to create the fpscr separately. 1357 */ 1358 membuf_write(&to, uregs, VFP_STATE_SIZE - sizeof(compat_ulong_t)); 1359 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1360 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1361 return membuf_store(&to, fpscr); 1362 } 1363 1364 static int compat_vfp_set(struct task_struct *target, 1365 const struct user_regset *regset, 1366 unsigned int pos, unsigned int count, 1367 const void *kbuf, const void __user *ubuf) 1368 { 1369 struct user_fpsimd_state *uregs; 1370 compat_ulong_t fpscr; 1371 int ret, vregs_end_pos; 1372 1373 if (!system_supports_fpsimd()) 1374 return -EINVAL; 1375 1376 uregs = &target->thread.uw.fpsimd_state; 1377 1378 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1379 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1380 vregs_end_pos); 1381 1382 if (count && !ret) { 1383 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1384 vregs_end_pos, VFP_STATE_SIZE); 1385 if (!ret) { 1386 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1387 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1388 } 1389 } 1390 1391 fpsimd_flush_task_state(target); 1392 return ret; 1393 } 1394 1395 static int compat_tls_get(struct task_struct *target, 1396 const struct user_regset *regset, 1397 struct membuf to) 1398 { 1399 return membuf_store(&to, (compat_ulong_t)target->thread.uw.tp_value); 1400 } 1401 1402 static int compat_tls_set(struct task_struct *target, 1403 const struct user_regset *regset, unsigned int pos, 1404 unsigned int count, const void *kbuf, 1405 const void __user *ubuf) 1406 { 1407 int ret; 1408 compat_ulong_t tls = target->thread.uw.tp_value; 1409 1410 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1411 if (ret) 1412 return ret; 1413 1414 target->thread.uw.tp_value = tls; 1415 return ret; 1416 } 1417 1418 static const struct user_regset aarch32_regsets[] = { 1419 [REGSET_COMPAT_GPR] = { 1420 .core_note_type = NT_PRSTATUS, 1421 .n = COMPAT_ELF_NGREG, 1422 .size = sizeof(compat_elf_greg_t), 1423 .align = sizeof(compat_elf_greg_t), 1424 .regset_get = compat_gpr_get, 1425 .set = compat_gpr_set 1426 }, 1427 [REGSET_COMPAT_VFP] = { 1428 .core_note_type = NT_ARM_VFP, 1429 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1430 .size = sizeof(compat_ulong_t), 1431 .align = sizeof(compat_ulong_t), 1432 .active = fpr_active, 1433 .regset_get = compat_vfp_get, 1434 .set = compat_vfp_set 1435 }, 1436 }; 1437 1438 static const struct user_regset_view user_aarch32_view = { 1439 .name = "aarch32", .e_machine = EM_ARM, 1440 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1441 }; 1442 1443 static const struct user_regset aarch32_ptrace_regsets[] = { 1444 [REGSET_GPR] = { 1445 .core_note_type = NT_PRSTATUS, 1446 .n = COMPAT_ELF_NGREG, 1447 .size = sizeof(compat_elf_greg_t), 1448 .align = sizeof(compat_elf_greg_t), 1449 .regset_get = compat_gpr_get, 1450 .set = compat_gpr_set 1451 }, 1452 [REGSET_FPR] = { 1453 .core_note_type = NT_ARM_VFP, 1454 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1455 .size = sizeof(compat_ulong_t), 1456 .align = sizeof(compat_ulong_t), 1457 .regset_get = compat_vfp_get, 1458 .set = compat_vfp_set 1459 }, 1460 [REGSET_TLS] = { 1461 .core_note_type = NT_ARM_TLS, 1462 .n = 1, 1463 .size = sizeof(compat_ulong_t), 1464 .align = sizeof(compat_ulong_t), 1465 .regset_get = compat_tls_get, 1466 .set = compat_tls_set, 1467 }, 1468 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1469 [REGSET_HW_BREAK] = { 1470 .core_note_type = NT_ARM_HW_BREAK, 1471 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1472 .size = sizeof(u32), 1473 .align = sizeof(u32), 1474 .regset_get = hw_break_get, 1475 .set = hw_break_set, 1476 }, 1477 [REGSET_HW_WATCH] = { 1478 .core_note_type = NT_ARM_HW_WATCH, 1479 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1480 .size = sizeof(u32), 1481 .align = sizeof(u32), 1482 .regset_get = hw_break_get, 1483 .set = hw_break_set, 1484 }, 1485 #endif 1486 [REGSET_SYSTEM_CALL] = { 1487 .core_note_type = NT_ARM_SYSTEM_CALL, 1488 .n = 1, 1489 .size = sizeof(int), 1490 .align = sizeof(int), 1491 .regset_get = system_call_get, 1492 .set = system_call_set, 1493 }, 1494 }; 1495 1496 static const struct user_regset_view user_aarch32_ptrace_view = { 1497 .name = "aarch32", .e_machine = EM_ARM, 1498 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1499 }; 1500 1501 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1502 compat_ulong_t __user *ret) 1503 { 1504 compat_ulong_t tmp; 1505 1506 if (off & 3) 1507 return -EIO; 1508 1509 if (off == COMPAT_PT_TEXT_ADDR) 1510 tmp = tsk->mm->start_code; 1511 else if (off == COMPAT_PT_DATA_ADDR) 1512 tmp = tsk->mm->start_data; 1513 else if (off == COMPAT_PT_TEXT_END_ADDR) 1514 tmp = tsk->mm->end_code; 1515 else if (off < sizeof(compat_elf_gregset_t)) 1516 tmp = compat_get_user_reg(tsk, off >> 2); 1517 else if (off >= COMPAT_USER_SZ) 1518 return -EIO; 1519 else 1520 tmp = 0; 1521 1522 return put_user(tmp, ret); 1523 } 1524 1525 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1526 compat_ulong_t val) 1527 { 1528 struct pt_regs newregs = *task_pt_regs(tsk); 1529 unsigned int idx = off / 4; 1530 1531 if (off & 3 || off >= COMPAT_USER_SZ) 1532 return -EIO; 1533 1534 if (off >= sizeof(compat_elf_gregset_t)) 1535 return 0; 1536 1537 switch (idx) { 1538 case 15: 1539 newregs.pc = val; 1540 break; 1541 case 16: 1542 newregs.pstate = compat_psr_to_pstate(val); 1543 break; 1544 case 17: 1545 newregs.orig_x0 = val; 1546 break; 1547 default: 1548 newregs.regs[idx] = val; 1549 } 1550 1551 if (!valid_user_regs(&newregs.user_regs, tsk)) 1552 return -EINVAL; 1553 1554 *task_pt_regs(tsk) = newregs; 1555 return 0; 1556 } 1557 1558 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1559 1560 /* 1561 * Convert a virtual register number into an index for a thread_info 1562 * breakpoint array. Breakpoints are identified using positive numbers 1563 * whilst watchpoints are negative. The registers are laid out as pairs 1564 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1565 * Register 0 is reserved for describing resource information. 1566 */ 1567 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1568 { 1569 return (abs(num) - 1) >> 1; 1570 } 1571 1572 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1573 { 1574 u8 num_brps, num_wrps, debug_arch, wp_len; 1575 u32 reg = 0; 1576 1577 num_brps = hw_breakpoint_slots(TYPE_INST); 1578 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1579 1580 debug_arch = debug_monitors_arch(); 1581 wp_len = 8; 1582 reg |= debug_arch; 1583 reg <<= 8; 1584 reg |= wp_len; 1585 reg <<= 8; 1586 reg |= num_wrps; 1587 reg <<= 8; 1588 reg |= num_brps; 1589 1590 *kdata = reg; 1591 return 0; 1592 } 1593 1594 static int compat_ptrace_hbp_get(unsigned int note_type, 1595 struct task_struct *tsk, 1596 compat_long_t num, 1597 u32 *kdata) 1598 { 1599 u64 addr = 0; 1600 u32 ctrl = 0; 1601 1602 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1603 1604 if (num & 1) { 1605 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1606 *kdata = (u32)addr; 1607 } else { 1608 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1609 *kdata = ctrl; 1610 } 1611 1612 return err; 1613 } 1614 1615 static int compat_ptrace_hbp_set(unsigned int note_type, 1616 struct task_struct *tsk, 1617 compat_long_t num, 1618 u32 *kdata) 1619 { 1620 u64 addr; 1621 u32 ctrl; 1622 1623 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1624 1625 if (num & 1) { 1626 addr = *kdata; 1627 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1628 } else { 1629 ctrl = *kdata; 1630 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1631 } 1632 1633 return err; 1634 } 1635 1636 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1637 compat_ulong_t __user *data) 1638 { 1639 int ret; 1640 u32 kdata; 1641 1642 /* Watchpoint */ 1643 if (num < 0) { 1644 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1645 /* Resource info */ 1646 } else if (num == 0) { 1647 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1648 /* Breakpoint */ 1649 } else { 1650 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1651 } 1652 1653 if (!ret) 1654 ret = put_user(kdata, data); 1655 1656 return ret; 1657 } 1658 1659 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1660 compat_ulong_t __user *data) 1661 { 1662 int ret; 1663 u32 kdata = 0; 1664 1665 if (num == 0) 1666 return 0; 1667 1668 ret = get_user(kdata, data); 1669 if (ret) 1670 return ret; 1671 1672 if (num < 0) 1673 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1674 else 1675 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1676 1677 return ret; 1678 } 1679 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1680 1681 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1682 compat_ulong_t caddr, compat_ulong_t cdata) 1683 { 1684 unsigned long addr = caddr; 1685 unsigned long data = cdata; 1686 void __user *datap = compat_ptr(data); 1687 int ret; 1688 1689 switch (request) { 1690 case PTRACE_PEEKUSR: 1691 ret = compat_ptrace_read_user(child, addr, datap); 1692 break; 1693 1694 case PTRACE_POKEUSR: 1695 ret = compat_ptrace_write_user(child, addr, data); 1696 break; 1697 1698 case COMPAT_PTRACE_GETREGS: 1699 ret = copy_regset_to_user(child, 1700 &user_aarch32_view, 1701 REGSET_COMPAT_GPR, 1702 0, sizeof(compat_elf_gregset_t), 1703 datap); 1704 break; 1705 1706 case COMPAT_PTRACE_SETREGS: 1707 ret = copy_regset_from_user(child, 1708 &user_aarch32_view, 1709 REGSET_COMPAT_GPR, 1710 0, sizeof(compat_elf_gregset_t), 1711 datap); 1712 break; 1713 1714 case COMPAT_PTRACE_GET_THREAD_AREA: 1715 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1716 (compat_ulong_t __user *)datap); 1717 break; 1718 1719 case COMPAT_PTRACE_SET_SYSCALL: 1720 task_pt_regs(child)->syscallno = data; 1721 ret = 0; 1722 break; 1723 1724 case COMPAT_PTRACE_GETVFPREGS: 1725 ret = copy_regset_to_user(child, 1726 &user_aarch32_view, 1727 REGSET_COMPAT_VFP, 1728 0, VFP_STATE_SIZE, 1729 datap); 1730 break; 1731 1732 case COMPAT_PTRACE_SETVFPREGS: 1733 ret = copy_regset_from_user(child, 1734 &user_aarch32_view, 1735 REGSET_COMPAT_VFP, 1736 0, VFP_STATE_SIZE, 1737 datap); 1738 break; 1739 1740 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1741 case COMPAT_PTRACE_GETHBPREGS: 1742 ret = compat_ptrace_gethbpregs(child, addr, datap); 1743 break; 1744 1745 case COMPAT_PTRACE_SETHBPREGS: 1746 ret = compat_ptrace_sethbpregs(child, addr, datap); 1747 break; 1748 #endif 1749 1750 default: 1751 ret = compat_ptrace_request(child, request, addr, 1752 data); 1753 break; 1754 } 1755 1756 return ret; 1757 } 1758 #endif /* CONFIG_COMPAT */ 1759 1760 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1761 { 1762 #ifdef CONFIG_COMPAT 1763 /* 1764 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1765 * user_aarch32_view compatible with arm32. Native ptrace requests on 1766 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1767 * access to the TLS register. 1768 */ 1769 if (is_compat_task()) 1770 return &user_aarch32_view; 1771 else if (is_compat_thread(task_thread_info(task))) 1772 return &user_aarch32_ptrace_view; 1773 #endif 1774 return &user_aarch64_view; 1775 } 1776 1777 long arch_ptrace(struct task_struct *child, long request, 1778 unsigned long addr, unsigned long data) 1779 { 1780 switch (request) { 1781 case PTRACE_PEEKMTETAGS: 1782 case PTRACE_POKEMTETAGS: 1783 return mte_ptrace_copy_tags(child, request, addr, data); 1784 } 1785 1786 return ptrace_request(child, request, addr, data); 1787 } 1788 1789 enum ptrace_syscall_dir { 1790 PTRACE_SYSCALL_ENTER = 0, 1791 PTRACE_SYSCALL_EXIT, 1792 }; 1793 1794 static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) 1795 { 1796 int regno; 1797 unsigned long saved_reg; 1798 1799 /* 1800 * We have some ABI weirdness here in the way that we handle syscall 1801 * exit stops because we indicate whether or not the stop has been 1802 * signalled from syscall entry or syscall exit by clobbering a general 1803 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee 1804 * and restoring its old value after the stop. This means that: 1805 * 1806 * - Any writes by the tracer to this register during the stop are 1807 * ignored/discarded. 1808 * 1809 * - The actual value of the register is not available during the stop, 1810 * so the tracer cannot save it and restore it later. 1811 * 1812 * - Syscall stops behave differently to seccomp and pseudo-step traps 1813 * (the latter do not nobble any registers). 1814 */ 1815 regno = (is_compat_task() ? 12 : 7); 1816 saved_reg = regs->regs[regno]; 1817 regs->regs[regno] = dir; 1818 1819 if (dir == PTRACE_SYSCALL_ENTER) { 1820 if (ptrace_report_syscall_entry(regs)) 1821 forget_syscall(regs); 1822 regs->regs[regno] = saved_reg; 1823 } else if (!test_thread_flag(TIF_SINGLESTEP)) { 1824 ptrace_report_syscall_exit(regs, 0); 1825 regs->regs[regno] = saved_reg; 1826 } else { 1827 regs->regs[regno] = saved_reg; 1828 1829 /* 1830 * Signal a pseudo-step exception since we are stepping but 1831 * tracer modifications to the registers may have rewound the 1832 * state machine. 1833 */ 1834 ptrace_report_syscall_exit(regs, 1); 1835 } 1836 } 1837 1838 int syscall_trace_enter(struct pt_regs *regs) 1839 { 1840 unsigned long flags = read_thread_flags(); 1841 1842 if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) { 1843 report_syscall(regs, PTRACE_SYSCALL_ENTER); 1844 if (flags & _TIF_SYSCALL_EMU) 1845 return NO_SYSCALL; 1846 } 1847 1848 /* Do the secure computing after ptrace; failures should be fast. */ 1849 if (secure_computing() == -1) 1850 return NO_SYSCALL; 1851 1852 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1853 trace_sys_enter(regs, regs->syscallno); 1854 1855 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1856 regs->regs[2], regs->regs[3]); 1857 1858 return regs->syscallno; 1859 } 1860 1861 void syscall_trace_exit(struct pt_regs *regs) 1862 { 1863 unsigned long flags = read_thread_flags(); 1864 1865 audit_syscall_exit(regs); 1866 1867 if (flags & _TIF_SYSCALL_TRACEPOINT) 1868 trace_sys_exit(regs, syscall_get_return_value(current, regs)); 1869 1870 if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP)) 1871 report_syscall(regs, PTRACE_SYSCALL_EXIT); 1872 1873 rseq_syscall(regs); 1874 } 1875 1876 /* 1877 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 1878 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 1879 * not described in ARM DDI 0487D.a. 1880 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 1881 * be allocated an EL0 meaning in future. 1882 * Userspace cannot use these until they have an architectural meaning. 1883 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1884 * We also reserve IL for the kernel; SS is handled dynamically. 1885 */ 1886 #define SPSR_EL1_AARCH64_RES0_BITS \ 1887 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \ 1888 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5)) 1889 #define SPSR_EL1_AARCH32_RES0_BITS \ 1890 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 1891 1892 static int valid_compat_regs(struct user_pt_regs *regs) 1893 { 1894 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1895 1896 if (!system_supports_mixed_endian_el0()) { 1897 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1898 regs->pstate |= PSR_AA32_E_BIT; 1899 else 1900 regs->pstate &= ~PSR_AA32_E_BIT; 1901 } 1902 1903 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1904 (regs->pstate & PSR_AA32_A_BIT) == 0 && 1905 (regs->pstate & PSR_AA32_I_BIT) == 0 && 1906 (regs->pstate & PSR_AA32_F_BIT) == 0) { 1907 return 1; 1908 } 1909 1910 /* 1911 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1912 * arch/arm. 1913 */ 1914 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 1915 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 1916 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 1917 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 1918 PSR_AA32_T_BIT; 1919 regs->pstate |= PSR_MODE32_BIT; 1920 1921 return 0; 1922 } 1923 1924 static int valid_native_regs(struct user_pt_regs *regs) 1925 { 1926 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1927 1928 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1929 (regs->pstate & PSR_D_BIT) == 0 && 1930 (regs->pstate & PSR_A_BIT) == 0 && 1931 (regs->pstate & PSR_I_BIT) == 0 && 1932 (regs->pstate & PSR_F_BIT) == 0) { 1933 return 1; 1934 } 1935 1936 /* Force PSR to a valid 64-bit EL0t */ 1937 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1938 1939 return 0; 1940 } 1941 1942 /* 1943 * Are the current registers suitable for user mode? (used to maintain 1944 * security in signal handlers) 1945 */ 1946 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1947 { 1948 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */ 1949 user_regs_reset_single_step(regs, task); 1950 1951 if (is_compat_thread(task_thread_info(task))) 1952 return valid_compat_regs(regs); 1953 else 1954 return valid_native_regs(regs); 1955 } 1956