1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/nospec.h> 29 #include <linux/smp.h> 30 #include <linux/ptrace.h> 31 #include <linux/user.h> 32 #include <linux/seccomp.h> 33 #include <linux/security.h> 34 #include <linux/init.h> 35 #include <linux/signal.h> 36 #include <linux/string.h> 37 #include <linux/uaccess.h> 38 #include <linux/perf_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/regset.h> 41 #include <linux/tracehook.h> 42 #include <linux/elf.h> 43 44 #include <asm/compat.h> 45 #include <asm/cpufeature.h> 46 #include <asm/debug-monitors.h> 47 #include <asm/fpsimd.h> 48 #include <asm/pgtable.h> 49 #include <asm/pointer_auth.h> 50 #include <asm/stacktrace.h> 51 #include <asm/syscall.h> 52 #include <asm/traps.h> 53 #include <asm/system_misc.h> 54 55 #define CREATE_TRACE_POINTS 56 #include <trace/events/syscalls.h> 57 58 struct pt_regs_offset { 59 const char *name; 60 int offset; 61 }; 62 63 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 64 #define REG_OFFSET_END {.name = NULL, .offset = 0} 65 #define GPR_OFFSET_NAME(r) \ 66 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 67 68 static const struct pt_regs_offset regoffset_table[] = { 69 GPR_OFFSET_NAME(0), 70 GPR_OFFSET_NAME(1), 71 GPR_OFFSET_NAME(2), 72 GPR_OFFSET_NAME(3), 73 GPR_OFFSET_NAME(4), 74 GPR_OFFSET_NAME(5), 75 GPR_OFFSET_NAME(6), 76 GPR_OFFSET_NAME(7), 77 GPR_OFFSET_NAME(8), 78 GPR_OFFSET_NAME(9), 79 GPR_OFFSET_NAME(10), 80 GPR_OFFSET_NAME(11), 81 GPR_OFFSET_NAME(12), 82 GPR_OFFSET_NAME(13), 83 GPR_OFFSET_NAME(14), 84 GPR_OFFSET_NAME(15), 85 GPR_OFFSET_NAME(16), 86 GPR_OFFSET_NAME(17), 87 GPR_OFFSET_NAME(18), 88 GPR_OFFSET_NAME(19), 89 GPR_OFFSET_NAME(20), 90 GPR_OFFSET_NAME(21), 91 GPR_OFFSET_NAME(22), 92 GPR_OFFSET_NAME(23), 93 GPR_OFFSET_NAME(24), 94 GPR_OFFSET_NAME(25), 95 GPR_OFFSET_NAME(26), 96 GPR_OFFSET_NAME(27), 97 GPR_OFFSET_NAME(28), 98 GPR_OFFSET_NAME(29), 99 GPR_OFFSET_NAME(30), 100 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 101 REG_OFFSET_NAME(sp), 102 REG_OFFSET_NAME(pc), 103 REG_OFFSET_NAME(pstate), 104 REG_OFFSET_END, 105 }; 106 107 /** 108 * regs_query_register_offset() - query register offset from its name 109 * @name: the name of a register 110 * 111 * regs_query_register_offset() returns the offset of a register in struct 112 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 113 */ 114 int regs_query_register_offset(const char *name) 115 { 116 const struct pt_regs_offset *roff; 117 118 for (roff = regoffset_table; roff->name != NULL; roff++) 119 if (!strcmp(roff->name, name)) 120 return roff->offset; 121 return -EINVAL; 122 } 123 124 /** 125 * regs_within_kernel_stack() - check the address in the stack 126 * @regs: pt_regs which contains kernel stack pointer. 127 * @addr: address which is checked. 128 * 129 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 130 * If @addr is within the kernel stack, it returns true. If not, returns false. 131 */ 132 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 133 { 134 return ((addr & ~(THREAD_SIZE - 1)) == 135 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 136 on_irq_stack(addr, NULL); 137 } 138 139 /** 140 * regs_get_kernel_stack_nth() - get Nth entry of the stack 141 * @regs: pt_regs which contains kernel stack pointer. 142 * @n: stack entry number. 143 * 144 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 145 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 146 * this returns 0. 147 */ 148 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 149 { 150 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 151 152 addr += n; 153 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 154 return *addr; 155 else 156 return 0; 157 } 158 159 /* 160 * TODO: does not yet catch signals sent when the child dies. 161 * in exit.c or in signal.c. 162 */ 163 164 /* 165 * Called by kernel/ptrace.c when detaching.. 166 */ 167 void ptrace_disable(struct task_struct *child) 168 { 169 /* 170 * This would be better off in core code, but PTRACE_DETACH has 171 * grown its fair share of arch-specific worts and changing it 172 * is likely to cause regressions on obscure architectures. 173 */ 174 user_disable_single_step(child); 175 } 176 177 #ifdef CONFIG_HAVE_HW_BREAKPOINT 178 /* 179 * Handle hitting a HW-breakpoint. 180 */ 181 static void ptrace_hbptriggered(struct perf_event *bp, 182 struct perf_sample_data *data, 183 struct pt_regs *regs) 184 { 185 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 186 const char *desc = "Hardware breakpoint trap (ptrace)"; 187 188 #ifdef CONFIG_COMPAT 189 if (is_compat_task()) { 190 int si_errno = 0; 191 int i; 192 193 for (i = 0; i < ARM_MAX_BRP; ++i) { 194 if (current->thread.debug.hbp_break[i] == bp) { 195 si_errno = (i << 1) + 1; 196 break; 197 } 198 } 199 200 for (i = 0; i < ARM_MAX_WRP; ++i) { 201 if (current->thread.debug.hbp_watch[i] == bp) { 202 si_errno = -((i << 1) + 1); 203 break; 204 } 205 } 206 arm64_force_sig_ptrace_errno_trap(si_errno, 207 (void __user *)bkpt->trigger, 208 desc); 209 } 210 #endif 211 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, 212 (void __user *)(bkpt->trigger), 213 desc); 214 } 215 216 /* 217 * Unregister breakpoints from this task and reset the pointers in 218 * the thread_struct. 219 */ 220 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 221 { 222 int i; 223 struct thread_struct *t = &tsk->thread; 224 225 for (i = 0; i < ARM_MAX_BRP; i++) { 226 if (t->debug.hbp_break[i]) { 227 unregister_hw_breakpoint(t->debug.hbp_break[i]); 228 t->debug.hbp_break[i] = NULL; 229 } 230 } 231 232 for (i = 0; i < ARM_MAX_WRP; i++) { 233 if (t->debug.hbp_watch[i]) { 234 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 235 t->debug.hbp_watch[i] = NULL; 236 } 237 } 238 } 239 240 void ptrace_hw_copy_thread(struct task_struct *tsk) 241 { 242 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 243 } 244 245 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 246 struct task_struct *tsk, 247 unsigned long idx) 248 { 249 struct perf_event *bp = ERR_PTR(-EINVAL); 250 251 switch (note_type) { 252 case NT_ARM_HW_BREAK: 253 if (idx >= ARM_MAX_BRP) 254 goto out; 255 idx = array_index_nospec(idx, ARM_MAX_BRP); 256 bp = tsk->thread.debug.hbp_break[idx]; 257 break; 258 case NT_ARM_HW_WATCH: 259 if (idx >= ARM_MAX_WRP) 260 goto out; 261 idx = array_index_nospec(idx, ARM_MAX_WRP); 262 bp = tsk->thread.debug.hbp_watch[idx]; 263 break; 264 } 265 266 out: 267 return bp; 268 } 269 270 static int ptrace_hbp_set_event(unsigned int note_type, 271 struct task_struct *tsk, 272 unsigned long idx, 273 struct perf_event *bp) 274 { 275 int err = -EINVAL; 276 277 switch (note_type) { 278 case NT_ARM_HW_BREAK: 279 if (idx >= ARM_MAX_BRP) 280 goto out; 281 idx = array_index_nospec(idx, ARM_MAX_BRP); 282 tsk->thread.debug.hbp_break[idx] = bp; 283 err = 0; 284 break; 285 case NT_ARM_HW_WATCH: 286 if (idx >= ARM_MAX_WRP) 287 goto out; 288 idx = array_index_nospec(idx, ARM_MAX_WRP); 289 tsk->thread.debug.hbp_watch[idx] = bp; 290 err = 0; 291 break; 292 } 293 294 out: 295 return err; 296 } 297 298 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 299 struct task_struct *tsk, 300 unsigned long idx) 301 { 302 struct perf_event *bp; 303 struct perf_event_attr attr; 304 int err, type; 305 306 switch (note_type) { 307 case NT_ARM_HW_BREAK: 308 type = HW_BREAKPOINT_X; 309 break; 310 case NT_ARM_HW_WATCH: 311 type = HW_BREAKPOINT_RW; 312 break; 313 default: 314 return ERR_PTR(-EINVAL); 315 } 316 317 ptrace_breakpoint_init(&attr); 318 319 /* 320 * Initialise fields to sane defaults 321 * (i.e. values that will pass validation). 322 */ 323 attr.bp_addr = 0; 324 attr.bp_len = HW_BREAKPOINT_LEN_4; 325 attr.bp_type = type; 326 attr.disabled = 1; 327 328 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 329 if (IS_ERR(bp)) 330 return bp; 331 332 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 333 if (err) 334 return ERR_PTR(err); 335 336 return bp; 337 } 338 339 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 340 struct arch_hw_breakpoint_ctrl ctrl, 341 struct perf_event_attr *attr) 342 { 343 int err, len, type, offset, disabled = !ctrl.enabled; 344 345 attr->disabled = disabled; 346 if (disabled) 347 return 0; 348 349 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 350 if (err) 351 return err; 352 353 switch (note_type) { 354 case NT_ARM_HW_BREAK: 355 if ((type & HW_BREAKPOINT_X) != type) 356 return -EINVAL; 357 break; 358 case NT_ARM_HW_WATCH: 359 if ((type & HW_BREAKPOINT_RW) != type) 360 return -EINVAL; 361 break; 362 default: 363 return -EINVAL; 364 } 365 366 attr->bp_len = len; 367 attr->bp_type = type; 368 attr->bp_addr += offset; 369 370 return 0; 371 } 372 373 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 374 { 375 u8 num; 376 u32 reg = 0; 377 378 switch (note_type) { 379 case NT_ARM_HW_BREAK: 380 num = hw_breakpoint_slots(TYPE_INST); 381 break; 382 case NT_ARM_HW_WATCH: 383 num = hw_breakpoint_slots(TYPE_DATA); 384 break; 385 default: 386 return -EINVAL; 387 } 388 389 reg |= debug_monitors_arch(); 390 reg <<= 8; 391 reg |= num; 392 393 *info = reg; 394 return 0; 395 } 396 397 static int ptrace_hbp_get_ctrl(unsigned int note_type, 398 struct task_struct *tsk, 399 unsigned long idx, 400 u32 *ctrl) 401 { 402 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 403 404 if (IS_ERR(bp)) 405 return PTR_ERR(bp); 406 407 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 408 return 0; 409 } 410 411 static int ptrace_hbp_get_addr(unsigned int note_type, 412 struct task_struct *tsk, 413 unsigned long idx, 414 u64 *addr) 415 { 416 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 417 418 if (IS_ERR(bp)) 419 return PTR_ERR(bp); 420 421 *addr = bp ? counter_arch_bp(bp)->address : 0; 422 return 0; 423 } 424 425 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 426 struct task_struct *tsk, 427 unsigned long idx) 428 { 429 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 430 431 if (!bp) 432 bp = ptrace_hbp_create(note_type, tsk, idx); 433 434 return bp; 435 } 436 437 static int ptrace_hbp_set_ctrl(unsigned int note_type, 438 struct task_struct *tsk, 439 unsigned long idx, 440 u32 uctrl) 441 { 442 int err; 443 struct perf_event *bp; 444 struct perf_event_attr attr; 445 struct arch_hw_breakpoint_ctrl ctrl; 446 447 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 448 if (IS_ERR(bp)) { 449 err = PTR_ERR(bp); 450 return err; 451 } 452 453 attr = bp->attr; 454 decode_ctrl_reg(uctrl, &ctrl); 455 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 456 if (err) 457 return err; 458 459 return modify_user_hw_breakpoint(bp, &attr); 460 } 461 462 static int ptrace_hbp_set_addr(unsigned int note_type, 463 struct task_struct *tsk, 464 unsigned long idx, 465 u64 addr) 466 { 467 int err; 468 struct perf_event *bp; 469 struct perf_event_attr attr; 470 471 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 472 if (IS_ERR(bp)) { 473 err = PTR_ERR(bp); 474 return err; 475 } 476 477 attr = bp->attr; 478 attr.bp_addr = addr; 479 err = modify_user_hw_breakpoint(bp, &attr); 480 return err; 481 } 482 483 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 484 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 485 #define PTRACE_HBP_PAD_SZ sizeof(u32) 486 487 static int hw_break_get(struct task_struct *target, 488 const struct user_regset *regset, 489 unsigned int pos, unsigned int count, 490 void *kbuf, void __user *ubuf) 491 { 492 unsigned int note_type = regset->core_note_type; 493 int ret, idx = 0, offset, limit; 494 u32 info, ctrl; 495 u64 addr; 496 497 /* Resource info */ 498 ret = ptrace_hbp_get_resource_info(note_type, &info); 499 if (ret) 500 return ret; 501 502 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 503 sizeof(info)); 504 if (ret) 505 return ret; 506 507 /* Pad */ 508 offset = offsetof(struct user_hwdebug_state, pad); 509 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 510 offset + PTRACE_HBP_PAD_SZ); 511 if (ret) 512 return ret; 513 514 /* (address, ctrl) registers */ 515 offset = offsetof(struct user_hwdebug_state, dbg_regs); 516 limit = regset->n * regset->size; 517 while (count && offset < limit) { 518 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 519 if (ret) 520 return ret; 521 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 522 offset, offset + PTRACE_HBP_ADDR_SZ); 523 if (ret) 524 return ret; 525 offset += PTRACE_HBP_ADDR_SZ; 526 527 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 528 if (ret) 529 return ret; 530 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 531 offset, offset + PTRACE_HBP_CTRL_SZ); 532 if (ret) 533 return ret; 534 offset += PTRACE_HBP_CTRL_SZ; 535 536 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 537 offset, 538 offset + PTRACE_HBP_PAD_SZ); 539 if (ret) 540 return ret; 541 offset += PTRACE_HBP_PAD_SZ; 542 idx++; 543 } 544 545 return 0; 546 } 547 548 static int hw_break_set(struct task_struct *target, 549 const struct user_regset *regset, 550 unsigned int pos, unsigned int count, 551 const void *kbuf, const void __user *ubuf) 552 { 553 unsigned int note_type = regset->core_note_type; 554 int ret, idx = 0, offset, limit; 555 u32 ctrl; 556 u64 addr; 557 558 /* Resource info and pad */ 559 offset = offsetof(struct user_hwdebug_state, dbg_regs); 560 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 561 if (ret) 562 return ret; 563 564 /* (address, ctrl) registers */ 565 limit = regset->n * regset->size; 566 while (count && offset < limit) { 567 if (count < PTRACE_HBP_ADDR_SZ) 568 return -EINVAL; 569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 570 offset, offset + PTRACE_HBP_ADDR_SZ); 571 if (ret) 572 return ret; 573 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 574 if (ret) 575 return ret; 576 offset += PTRACE_HBP_ADDR_SZ; 577 578 if (!count) 579 break; 580 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 581 offset, offset + PTRACE_HBP_CTRL_SZ); 582 if (ret) 583 return ret; 584 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 585 if (ret) 586 return ret; 587 offset += PTRACE_HBP_CTRL_SZ; 588 589 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 590 offset, 591 offset + PTRACE_HBP_PAD_SZ); 592 if (ret) 593 return ret; 594 offset += PTRACE_HBP_PAD_SZ; 595 idx++; 596 } 597 598 return 0; 599 } 600 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 601 602 static int gpr_get(struct task_struct *target, 603 const struct user_regset *regset, 604 unsigned int pos, unsigned int count, 605 void *kbuf, void __user *ubuf) 606 { 607 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 608 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 609 } 610 611 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 612 unsigned int pos, unsigned int count, 613 const void *kbuf, const void __user *ubuf) 614 { 615 int ret; 616 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 617 618 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 619 if (ret) 620 return ret; 621 622 if (!valid_user_regs(&newregs, target)) 623 return -EINVAL; 624 625 task_pt_regs(target)->user_regs = newregs; 626 return 0; 627 } 628 629 /* 630 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 631 */ 632 static int __fpr_get(struct task_struct *target, 633 const struct user_regset *regset, 634 unsigned int pos, unsigned int count, 635 void *kbuf, void __user *ubuf, unsigned int start_pos) 636 { 637 struct user_fpsimd_state *uregs; 638 639 sve_sync_to_fpsimd(target); 640 641 uregs = &target->thread.uw.fpsimd_state; 642 643 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 644 start_pos, start_pos + sizeof(*uregs)); 645 } 646 647 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 648 unsigned int pos, unsigned int count, 649 void *kbuf, void __user *ubuf) 650 { 651 if (target == current) 652 fpsimd_preserve_current_state(); 653 654 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); 655 } 656 657 static int __fpr_set(struct task_struct *target, 658 const struct user_regset *regset, 659 unsigned int pos, unsigned int count, 660 const void *kbuf, const void __user *ubuf, 661 unsigned int start_pos) 662 { 663 int ret; 664 struct user_fpsimd_state newstate; 665 666 /* 667 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 668 * short copyin can't resurrect stale data. 669 */ 670 sve_sync_to_fpsimd(target); 671 672 newstate = target->thread.uw.fpsimd_state; 673 674 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 675 start_pos, start_pos + sizeof(newstate)); 676 if (ret) 677 return ret; 678 679 target->thread.uw.fpsimd_state = newstate; 680 681 return ret; 682 } 683 684 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 685 unsigned int pos, unsigned int count, 686 const void *kbuf, const void __user *ubuf) 687 { 688 int ret; 689 690 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 691 if (ret) 692 return ret; 693 694 sve_sync_from_fpsimd_zeropad(target); 695 fpsimd_flush_task_state(target); 696 697 return ret; 698 } 699 700 static int tls_get(struct task_struct *target, const struct user_regset *regset, 701 unsigned int pos, unsigned int count, 702 void *kbuf, void __user *ubuf) 703 { 704 unsigned long *tls = &target->thread.uw.tp_value; 705 706 if (target == current) 707 tls_preserve_current_state(); 708 709 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 710 } 711 712 static int tls_set(struct task_struct *target, const struct user_regset *regset, 713 unsigned int pos, unsigned int count, 714 const void *kbuf, const void __user *ubuf) 715 { 716 int ret; 717 unsigned long tls = target->thread.uw.tp_value; 718 719 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 720 if (ret) 721 return ret; 722 723 target->thread.uw.tp_value = tls; 724 return ret; 725 } 726 727 static int system_call_get(struct task_struct *target, 728 const struct user_regset *regset, 729 unsigned int pos, unsigned int count, 730 void *kbuf, void __user *ubuf) 731 { 732 int syscallno = task_pt_regs(target)->syscallno; 733 734 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 735 &syscallno, 0, -1); 736 } 737 738 static int system_call_set(struct task_struct *target, 739 const struct user_regset *regset, 740 unsigned int pos, unsigned int count, 741 const void *kbuf, const void __user *ubuf) 742 { 743 int syscallno = task_pt_regs(target)->syscallno; 744 int ret; 745 746 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 747 if (ret) 748 return ret; 749 750 task_pt_regs(target)->syscallno = syscallno; 751 return ret; 752 } 753 754 #ifdef CONFIG_ARM64_SVE 755 756 static void sve_init_header_from_task(struct user_sve_header *header, 757 struct task_struct *target) 758 { 759 unsigned int vq; 760 761 memset(header, 0, sizeof(*header)); 762 763 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 764 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 765 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 766 header->flags |= SVE_PT_VL_INHERIT; 767 768 header->vl = target->thread.sve_vl; 769 vq = sve_vq_from_vl(header->vl); 770 771 header->max_vl = sve_max_vl; 772 header->size = SVE_PT_SIZE(vq, header->flags); 773 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 774 SVE_PT_REGS_SVE); 775 } 776 777 static unsigned int sve_size_from_header(struct user_sve_header const *header) 778 { 779 return ALIGN(header->size, SVE_VQ_BYTES); 780 } 781 782 static unsigned int sve_get_size(struct task_struct *target, 783 const struct user_regset *regset) 784 { 785 struct user_sve_header header; 786 787 if (!system_supports_sve()) 788 return 0; 789 790 sve_init_header_from_task(&header, target); 791 return sve_size_from_header(&header); 792 } 793 794 static int sve_get(struct task_struct *target, 795 const struct user_regset *regset, 796 unsigned int pos, unsigned int count, 797 void *kbuf, void __user *ubuf) 798 { 799 int ret; 800 struct user_sve_header header; 801 unsigned int vq; 802 unsigned long start, end; 803 804 if (!system_supports_sve()) 805 return -EINVAL; 806 807 /* Header */ 808 sve_init_header_from_task(&header, target); 809 vq = sve_vq_from_vl(header.vl); 810 811 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, 812 0, sizeof(header)); 813 if (ret) 814 return ret; 815 816 if (target == current) 817 fpsimd_preserve_current_state(); 818 819 /* Registers: FPSIMD-only case */ 820 821 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 822 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 823 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 824 SVE_PT_FPSIMD_OFFSET); 825 826 /* Otherwise: full SVE case */ 827 828 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 829 start = SVE_PT_SVE_OFFSET; 830 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 831 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 832 target->thread.sve_state, 833 start, end); 834 if (ret) 835 return ret; 836 837 start = end; 838 end = SVE_PT_SVE_FPSR_OFFSET(vq); 839 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 840 start, end); 841 if (ret) 842 return ret; 843 844 /* 845 * Copy fpsr, and fpcr which must follow contiguously in 846 * struct fpsimd_state: 847 */ 848 start = end; 849 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 850 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 851 &target->thread.uw.fpsimd_state.fpsr, 852 start, end); 853 if (ret) 854 return ret; 855 856 start = end; 857 end = sve_size_from_header(&header); 858 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 859 start, end); 860 } 861 862 static int sve_set(struct task_struct *target, 863 const struct user_regset *regset, 864 unsigned int pos, unsigned int count, 865 const void *kbuf, const void __user *ubuf) 866 { 867 int ret; 868 struct user_sve_header header; 869 unsigned int vq; 870 unsigned long start, end; 871 872 if (!system_supports_sve()) 873 return -EINVAL; 874 875 /* Header */ 876 if (count < sizeof(header)) 877 return -EINVAL; 878 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 879 0, sizeof(header)); 880 if (ret) 881 goto out; 882 883 /* 884 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 885 * sve_set_vector_length(), which will also validate them for us: 886 */ 887 ret = sve_set_vector_length(target, header.vl, 888 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 889 if (ret) 890 goto out; 891 892 /* Actual VL set may be less than the user asked for: */ 893 vq = sve_vq_from_vl(target->thread.sve_vl); 894 895 /* Registers: FPSIMD-only case */ 896 897 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 898 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 899 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 900 SVE_PT_FPSIMD_OFFSET); 901 clear_tsk_thread_flag(target, TIF_SVE); 902 goto out; 903 } 904 905 /* Otherwise: full SVE case */ 906 907 /* 908 * If setting a different VL from the requested VL and there is 909 * register data, the data layout will be wrong: don't even 910 * try to set the registers in this case. 911 */ 912 if (count && vq != sve_vq_from_vl(header.vl)) { 913 ret = -EIO; 914 goto out; 915 } 916 917 sve_alloc(target); 918 919 /* 920 * Ensure target->thread.sve_state is up to date with target's 921 * FPSIMD regs, so that a short copyin leaves trailing registers 922 * unmodified. 923 */ 924 fpsimd_sync_to_sve(target); 925 set_tsk_thread_flag(target, TIF_SVE); 926 927 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 928 start = SVE_PT_SVE_OFFSET; 929 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 930 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 931 target->thread.sve_state, 932 start, end); 933 if (ret) 934 goto out; 935 936 start = end; 937 end = SVE_PT_SVE_FPSR_OFFSET(vq); 938 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 939 start, end); 940 if (ret) 941 goto out; 942 943 /* 944 * Copy fpsr, and fpcr which must follow contiguously in 945 * struct fpsimd_state: 946 */ 947 start = end; 948 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 949 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 950 &target->thread.uw.fpsimd_state.fpsr, 951 start, end); 952 953 out: 954 fpsimd_flush_task_state(target); 955 return ret; 956 } 957 958 #endif /* CONFIG_ARM64_SVE */ 959 960 #ifdef CONFIG_ARM64_PTR_AUTH 961 static int pac_mask_get(struct task_struct *target, 962 const struct user_regset *regset, 963 unsigned int pos, unsigned int count, 964 void *kbuf, void __user *ubuf) 965 { 966 /* 967 * The PAC bits can differ across data and instruction pointers 968 * depending on TCR_EL1.TBID*, which we may make use of in future, so 969 * we expose separate masks. 970 */ 971 unsigned long mask = ptrauth_user_pac_mask(); 972 struct user_pac_mask uregs = { 973 .data_mask = mask, 974 .insn_mask = mask, 975 }; 976 977 if (!system_supports_address_auth()) 978 return -EINVAL; 979 980 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1); 981 } 982 983 #ifdef CONFIG_CHECKPOINT_RESTORE 984 static __uint128_t pac_key_to_user(const struct ptrauth_key *key) 985 { 986 return (__uint128_t)key->hi << 64 | key->lo; 987 } 988 989 static struct ptrauth_key pac_key_from_user(__uint128_t ukey) 990 { 991 struct ptrauth_key key = { 992 .lo = (unsigned long)ukey, 993 .hi = (unsigned long)(ukey >> 64), 994 }; 995 996 return key; 997 } 998 999 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, 1000 const struct ptrauth_keys *keys) 1001 { 1002 ukeys->apiakey = pac_key_to_user(&keys->apia); 1003 ukeys->apibkey = pac_key_to_user(&keys->apib); 1004 ukeys->apdakey = pac_key_to_user(&keys->apda); 1005 ukeys->apdbkey = pac_key_to_user(&keys->apdb); 1006 } 1007 1008 static void pac_address_keys_from_user(struct ptrauth_keys *keys, 1009 const struct user_pac_address_keys *ukeys) 1010 { 1011 keys->apia = pac_key_from_user(ukeys->apiakey); 1012 keys->apib = pac_key_from_user(ukeys->apibkey); 1013 keys->apda = pac_key_from_user(ukeys->apdakey); 1014 keys->apdb = pac_key_from_user(ukeys->apdbkey); 1015 } 1016 1017 static int pac_address_keys_get(struct task_struct *target, 1018 const struct user_regset *regset, 1019 unsigned int pos, unsigned int count, 1020 void *kbuf, void __user *ubuf) 1021 { 1022 struct ptrauth_keys *keys = &target->thread.keys_user; 1023 struct user_pac_address_keys user_keys; 1024 1025 if (!system_supports_address_auth()) 1026 return -EINVAL; 1027 1028 pac_address_keys_to_user(&user_keys, keys); 1029 1030 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1031 &user_keys, 0, -1); 1032 } 1033 1034 static int pac_address_keys_set(struct task_struct *target, 1035 const struct user_regset *regset, 1036 unsigned int pos, unsigned int count, 1037 const void *kbuf, const void __user *ubuf) 1038 { 1039 struct ptrauth_keys *keys = &target->thread.keys_user; 1040 struct user_pac_address_keys user_keys; 1041 int ret; 1042 1043 if (!system_supports_address_auth()) 1044 return -EINVAL; 1045 1046 pac_address_keys_to_user(&user_keys, keys); 1047 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1048 &user_keys, 0, -1); 1049 if (ret) 1050 return ret; 1051 pac_address_keys_from_user(keys, &user_keys); 1052 1053 return 0; 1054 } 1055 1056 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, 1057 const struct ptrauth_keys *keys) 1058 { 1059 ukeys->apgakey = pac_key_to_user(&keys->apga); 1060 } 1061 1062 static void pac_generic_keys_from_user(struct ptrauth_keys *keys, 1063 const struct user_pac_generic_keys *ukeys) 1064 { 1065 keys->apga = pac_key_from_user(ukeys->apgakey); 1066 } 1067 1068 static int pac_generic_keys_get(struct task_struct *target, 1069 const struct user_regset *regset, 1070 unsigned int pos, unsigned int count, 1071 void *kbuf, void __user *ubuf) 1072 { 1073 struct ptrauth_keys *keys = &target->thread.keys_user; 1074 struct user_pac_generic_keys user_keys; 1075 1076 if (!system_supports_generic_auth()) 1077 return -EINVAL; 1078 1079 pac_generic_keys_to_user(&user_keys, keys); 1080 1081 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 1082 &user_keys, 0, -1); 1083 } 1084 1085 static int pac_generic_keys_set(struct task_struct *target, 1086 const struct user_regset *regset, 1087 unsigned int pos, unsigned int count, 1088 const void *kbuf, const void __user *ubuf) 1089 { 1090 struct ptrauth_keys *keys = &target->thread.keys_user; 1091 struct user_pac_generic_keys user_keys; 1092 int ret; 1093 1094 if (!system_supports_generic_auth()) 1095 return -EINVAL; 1096 1097 pac_generic_keys_to_user(&user_keys, keys); 1098 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1099 &user_keys, 0, -1); 1100 if (ret) 1101 return ret; 1102 pac_generic_keys_from_user(keys, &user_keys); 1103 1104 return 0; 1105 } 1106 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1107 #endif /* CONFIG_ARM64_PTR_AUTH */ 1108 1109 enum aarch64_regset { 1110 REGSET_GPR, 1111 REGSET_FPR, 1112 REGSET_TLS, 1113 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1114 REGSET_HW_BREAK, 1115 REGSET_HW_WATCH, 1116 #endif 1117 REGSET_SYSTEM_CALL, 1118 #ifdef CONFIG_ARM64_SVE 1119 REGSET_SVE, 1120 #endif 1121 #ifdef CONFIG_ARM64_PTR_AUTH 1122 REGSET_PAC_MASK, 1123 #ifdef CONFIG_CHECKPOINT_RESTORE 1124 REGSET_PACA_KEYS, 1125 REGSET_PACG_KEYS, 1126 #endif 1127 #endif 1128 }; 1129 1130 static const struct user_regset aarch64_regsets[] = { 1131 [REGSET_GPR] = { 1132 .core_note_type = NT_PRSTATUS, 1133 .n = sizeof(struct user_pt_regs) / sizeof(u64), 1134 .size = sizeof(u64), 1135 .align = sizeof(u64), 1136 .get = gpr_get, 1137 .set = gpr_set 1138 }, 1139 [REGSET_FPR] = { 1140 .core_note_type = NT_PRFPREG, 1141 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 1142 /* 1143 * We pretend we have 32-bit registers because the fpsr and 1144 * fpcr are 32-bits wide. 1145 */ 1146 .size = sizeof(u32), 1147 .align = sizeof(u32), 1148 .get = fpr_get, 1149 .set = fpr_set 1150 }, 1151 [REGSET_TLS] = { 1152 .core_note_type = NT_ARM_TLS, 1153 .n = 1, 1154 .size = sizeof(void *), 1155 .align = sizeof(void *), 1156 .get = tls_get, 1157 .set = tls_set, 1158 }, 1159 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1160 [REGSET_HW_BREAK] = { 1161 .core_note_type = NT_ARM_HW_BREAK, 1162 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1163 .size = sizeof(u32), 1164 .align = sizeof(u32), 1165 .get = hw_break_get, 1166 .set = hw_break_set, 1167 }, 1168 [REGSET_HW_WATCH] = { 1169 .core_note_type = NT_ARM_HW_WATCH, 1170 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1171 .size = sizeof(u32), 1172 .align = sizeof(u32), 1173 .get = hw_break_get, 1174 .set = hw_break_set, 1175 }, 1176 #endif 1177 [REGSET_SYSTEM_CALL] = { 1178 .core_note_type = NT_ARM_SYSTEM_CALL, 1179 .n = 1, 1180 .size = sizeof(int), 1181 .align = sizeof(int), 1182 .get = system_call_get, 1183 .set = system_call_set, 1184 }, 1185 #ifdef CONFIG_ARM64_SVE 1186 [REGSET_SVE] = { /* Scalable Vector Extension */ 1187 .core_note_type = NT_ARM_SVE, 1188 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1189 SVE_VQ_BYTES), 1190 .size = SVE_VQ_BYTES, 1191 .align = SVE_VQ_BYTES, 1192 .get = sve_get, 1193 .set = sve_set, 1194 .get_size = sve_get_size, 1195 }, 1196 #endif 1197 #ifdef CONFIG_ARM64_PTR_AUTH 1198 [REGSET_PAC_MASK] = { 1199 .core_note_type = NT_ARM_PAC_MASK, 1200 .n = sizeof(struct user_pac_mask) / sizeof(u64), 1201 .size = sizeof(u64), 1202 .align = sizeof(u64), 1203 .get = pac_mask_get, 1204 /* this cannot be set dynamically */ 1205 }, 1206 #ifdef CONFIG_CHECKPOINT_RESTORE 1207 [REGSET_PACA_KEYS] = { 1208 .core_note_type = NT_ARM_PACA_KEYS, 1209 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), 1210 .size = sizeof(__uint128_t), 1211 .align = sizeof(__uint128_t), 1212 .get = pac_address_keys_get, 1213 .set = pac_address_keys_set, 1214 }, 1215 [REGSET_PACG_KEYS] = { 1216 .core_note_type = NT_ARM_PACG_KEYS, 1217 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), 1218 .size = sizeof(__uint128_t), 1219 .align = sizeof(__uint128_t), 1220 .get = pac_generic_keys_get, 1221 .set = pac_generic_keys_set, 1222 }, 1223 #endif 1224 #endif 1225 }; 1226 1227 static const struct user_regset_view user_aarch64_view = { 1228 .name = "aarch64", .e_machine = EM_AARCH64, 1229 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1230 }; 1231 1232 #ifdef CONFIG_COMPAT 1233 enum compat_regset { 1234 REGSET_COMPAT_GPR, 1235 REGSET_COMPAT_VFP, 1236 }; 1237 1238 static int compat_gpr_get(struct task_struct *target, 1239 const struct user_regset *regset, 1240 unsigned int pos, unsigned int count, 1241 void *kbuf, void __user *ubuf) 1242 { 1243 int ret = 0; 1244 unsigned int i, start, num_regs; 1245 1246 /* Calculate the number of AArch32 registers contained in count */ 1247 num_regs = count / regset->size; 1248 1249 /* Convert pos into an register number */ 1250 start = pos / regset->size; 1251 1252 if (start + num_regs > regset->n) 1253 return -EIO; 1254 1255 for (i = 0; i < num_regs; ++i) { 1256 unsigned int idx = start + i; 1257 compat_ulong_t reg; 1258 1259 switch (idx) { 1260 case 15: 1261 reg = task_pt_regs(target)->pc; 1262 break; 1263 case 16: 1264 reg = task_pt_regs(target)->pstate; 1265 reg = pstate_to_compat_psr(reg); 1266 break; 1267 case 17: 1268 reg = task_pt_regs(target)->orig_x0; 1269 break; 1270 default: 1271 reg = task_pt_regs(target)->regs[idx]; 1272 } 1273 1274 if (kbuf) { 1275 memcpy(kbuf, ®, sizeof(reg)); 1276 kbuf += sizeof(reg); 1277 } else { 1278 ret = copy_to_user(ubuf, ®, sizeof(reg)); 1279 if (ret) { 1280 ret = -EFAULT; 1281 break; 1282 } 1283 1284 ubuf += sizeof(reg); 1285 } 1286 } 1287 1288 return ret; 1289 } 1290 1291 static int compat_gpr_set(struct task_struct *target, 1292 const struct user_regset *regset, 1293 unsigned int pos, unsigned int count, 1294 const void *kbuf, const void __user *ubuf) 1295 { 1296 struct pt_regs newregs; 1297 int ret = 0; 1298 unsigned int i, start, num_regs; 1299 1300 /* Calculate the number of AArch32 registers contained in count */ 1301 num_regs = count / regset->size; 1302 1303 /* Convert pos into an register number */ 1304 start = pos / regset->size; 1305 1306 if (start + num_regs > regset->n) 1307 return -EIO; 1308 1309 newregs = *task_pt_regs(target); 1310 1311 for (i = 0; i < num_regs; ++i) { 1312 unsigned int idx = start + i; 1313 compat_ulong_t reg; 1314 1315 if (kbuf) { 1316 memcpy(®, kbuf, sizeof(reg)); 1317 kbuf += sizeof(reg); 1318 } else { 1319 ret = copy_from_user(®, ubuf, sizeof(reg)); 1320 if (ret) { 1321 ret = -EFAULT; 1322 break; 1323 } 1324 1325 ubuf += sizeof(reg); 1326 } 1327 1328 switch (idx) { 1329 case 15: 1330 newregs.pc = reg; 1331 break; 1332 case 16: 1333 reg = compat_psr_to_pstate(reg); 1334 newregs.pstate = reg; 1335 break; 1336 case 17: 1337 newregs.orig_x0 = reg; 1338 break; 1339 default: 1340 newregs.regs[idx] = reg; 1341 } 1342 1343 } 1344 1345 if (valid_user_regs(&newregs.user_regs, target)) 1346 *task_pt_regs(target) = newregs; 1347 else 1348 ret = -EINVAL; 1349 1350 return ret; 1351 } 1352 1353 static int compat_vfp_get(struct task_struct *target, 1354 const struct user_regset *regset, 1355 unsigned int pos, unsigned int count, 1356 void *kbuf, void __user *ubuf) 1357 { 1358 struct user_fpsimd_state *uregs; 1359 compat_ulong_t fpscr; 1360 int ret, vregs_end_pos; 1361 1362 uregs = &target->thread.uw.fpsimd_state; 1363 1364 if (target == current) 1365 fpsimd_preserve_current_state(); 1366 1367 /* 1368 * The VFP registers are packed into the fpsimd_state, so they all sit 1369 * nicely together for us. We just need to create the fpscr separately. 1370 */ 1371 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1372 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 1373 0, vregs_end_pos); 1374 1375 if (count && !ret) { 1376 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1377 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1378 1379 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 1380 vregs_end_pos, VFP_STATE_SIZE); 1381 } 1382 1383 return ret; 1384 } 1385 1386 static int compat_vfp_set(struct task_struct *target, 1387 const struct user_regset *regset, 1388 unsigned int pos, unsigned int count, 1389 const void *kbuf, const void __user *ubuf) 1390 { 1391 struct user_fpsimd_state *uregs; 1392 compat_ulong_t fpscr; 1393 int ret, vregs_end_pos; 1394 1395 uregs = &target->thread.uw.fpsimd_state; 1396 1397 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1398 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1399 vregs_end_pos); 1400 1401 if (count && !ret) { 1402 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1403 vregs_end_pos, VFP_STATE_SIZE); 1404 if (!ret) { 1405 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1406 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1407 } 1408 } 1409 1410 fpsimd_flush_task_state(target); 1411 return ret; 1412 } 1413 1414 static int compat_tls_get(struct task_struct *target, 1415 const struct user_regset *regset, unsigned int pos, 1416 unsigned int count, void *kbuf, void __user *ubuf) 1417 { 1418 compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value; 1419 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1420 } 1421 1422 static int compat_tls_set(struct task_struct *target, 1423 const struct user_regset *regset, unsigned int pos, 1424 unsigned int count, const void *kbuf, 1425 const void __user *ubuf) 1426 { 1427 int ret; 1428 compat_ulong_t tls = target->thread.uw.tp_value; 1429 1430 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1431 if (ret) 1432 return ret; 1433 1434 target->thread.uw.tp_value = tls; 1435 return ret; 1436 } 1437 1438 static const struct user_regset aarch32_regsets[] = { 1439 [REGSET_COMPAT_GPR] = { 1440 .core_note_type = NT_PRSTATUS, 1441 .n = COMPAT_ELF_NGREG, 1442 .size = sizeof(compat_elf_greg_t), 1443 .align = sizeof(compat_elf_greg_t), 1444 .get = compat_gpr_get, 1445 .set = compat_gpr_set 1446 }, 1447 [REGSET_COMPAT_VFP] = { 1448 .core_note_type = NT_ARM_VFP, 1449 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1450 .size = sizeof(compat_ulong_t), 1451 .align = sizeof(compat_ulong_t), 1452 .get = compat_vfp_get, 1453 .set = compat_vfp_set 1454 }, 1455 }; 1456 1457 static const struct user_regset_view user_aarch32_view = { 1458 .name = "aarch32", .e_machine = EM_ARM, 1459 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1460 }; 1461 1462 static const struct user_regset aarch32_ptrace_regsets[] = { 1463 [REGSET_GPR] = { 1464 .core_note_type = NT_PRSTATUS, 1465 .n = COMPAT_ELF_NGREG, 1466 .size = sizeof(compat_elf_greg_t), 1467 .align = sizeof(compat_elf_greg_t), 1468 .get = compat_gpr_get, 1469 .set = compat_gpr_set 1470 }, 1471 [REGSET_FPR] = { 1472 .core_note_type = NT_ARM_VFP, 1473 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1474 .size = sizeof(compat_ulong_t), 1475 .align = sizeof(compat_ulong_t), 1476 .get = compat_vfp_get, 1477 .set = compat_vfp_set 1478 }, 1479 [REGSET_TLS] = { 1480 .core_note_type = NT_ARM_TLS, 1481 .n = 1, 1482 .size = sizeof(compat_ulong_t), 1483 .align = sizeof(compat_ulong_t), 1484 .get = compat_tls_get, 1485 .set = compat_tls_set, 1486 }, 1487 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1488 [REGSET_HW_BREAK] = { 1489 .core_note_type = NT_ARM_HW_BREAK, 1490 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1491 .size = sizeof(u32), 1492 .align = sizeof(u32), 1493 .get = hw_break_get, 1494 .set = hw_break_set, 1495 }, 1496 [REGSET_HW_WATCH] = { 1497 .core_note_type = NT_ARM_HW_WATCH, 1498 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1499 .size = sizeof(u32), 1500 .align = sizeof(u32), 1501 .get = hw_break_get, 1502 .set = hw_break_set, 1503 }, 1504 #endif 1505 [REGSET_SYSTEM_CALL] = { 1506 .core_note_type = NT_ARM_SYSTEM_CALL, 1507 .n = 1, 1508 .size = sizeof(int), 1509 .align = sizeof(int), 1510 .get = system_call_get, 1511 .set = system_call_set, 1512 }, 1513 }; 1514 1515 static const struct user_regset_view user_aarch32_ptrace_view = { 1516 .name = "aarch32", .e_machine = EM_ARM, 1517 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1518 }; 1519 1520 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1521 compat_ulong_t __user *ret) 1522 { 1523 compat_ulong_t tmp; 1524 1525 if (off & 3) 1526 return -EIO; 1527 1528 if (off == COMPAT_PT_TEXT_ADDR) 1529 tmp = tsk->mm->start_code; 1530 else if (off == COMPAT_PT_DATA_ADDR) 1531 tmp = tsk->mm->start_data; 1532 else if (off == COMPAT_PT_TEXT_END_ADDR) 1533 tmp = tsk->mm->end_code; 1534 else if (off < sizeof(compat_elf_gregset_t)) 1535 return copy_regset_to_user(tsk, &user_aarch32_view, 1536 REGSET_COMPAT_GPR, off, 1537 sizeof(compat_ulong_t), ret); 1538 else if (off >= COMPAT_USER_SZ) 1539 return -EIO; 1540 else 1541 tmp = 0; 1542 1543 return put_user(tmp, ret); 1544 } 1545 1546 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1547 compat_ulong_t val) 1548 { 1549 int ret; 1550 mm_segment_t old_fs = get_fs(); 1551 1552 if (off & 3 || off >= COMPAT_USER_SZ) 1553 return -EIO; 1554 1555 if (off >= sizeof(compat_elf_gregset_t)) 1556 return 0; 1557 1558 set_fs(KERNEL_DS); 1559 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1560 REGSET_COMPAT_GPR, off, 1561 sizeof(compat_ulong_t), 1562 &val); 1563 set_fs(old_fs); 1564 1565 return ret; 1566 } 1567 1568 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1569 1570 /* 1571 * Convert a virtual register number into an index for a thread_info 1572 * breakpoint array. Breakpoints are identified using positive numbers 1573 * whilst watchpoints are negative. The registers are laid out as pairs 1574 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1575 * Register 0 is reserved for describing resource information. 1576 */ 1577 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1578 { 1579 return (abs(num) - 1) >> 1; 1580 } 1581 1582 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1583 { 1584 u8 num_brps, num_wrps, debug_arch, wp_len; 1585 u32 reg = 0; 1586 1587 num_brps = hw_breakpoint_slots(TYPE_INST); 1588 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1589 1590 debug_arch = debug_monitors_arch(); 1591 wp_len = 8; 1592 reg |= debug_arch; 1593 reg <<= 8; 1594 reg |= wp_len; 1595 reg <<= 8; 1596 reg |= num_wrps; 1597 reg <<= 8; 1598 reg |= num_brps; 1599 1600 *kdata = reg; 1601 return 0; 1602 } 1603 1604 static int compat_ptrace_hbp_get(unsigned int note_type, 1605 struct task_struct *tsk, 1606 compat_long_t num, 1607 u32 *kdata) 1608 { 1609 u64 addr = 0; 1610 u32 ctrl = 0; 1611 1612 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1613 1614 if (num & 1) { 1615 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1616 *kdata = (u32)addr; 1617 } else { 1618 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1619 *kdata = ctrl; 1620 } 1621 1622 return err; 1623 } 1624 1625 static int compat_ptrace_hbp_set(unsigned int note_type, 1626 struct task_struct *tsk, 1627 compat_long_t num, 1628 u32 *kdata) 1629 { 1630 u64 addr; 1631 u32 ctrl; 1632 1633 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1634 1635 if (num & 1) { 1636 addr = *kdata; 1637 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1638 } else { 1639 ctrl = *kdata; 1640 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1641 } 1642 1643 return err; 1644 } 1645 1646 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1647 compat_ulong_t __user *data) 1648 { 1649 int ret; 1650 u32 kdata; 1651 1652 /* Watchpoint */ 1653 if (num < 0) { 1654 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1655 /* Resource info */ 1656 } else if (num == 0) { 1657 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1658 /* Breakpoint */ 1659 } else { 1660 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1661 } 1662 1663 if (!ret) 1664 ret = put_user(kdata, data); 1665 1666 return ret; 1667 } 1668 1669 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1670 compat_ulong_t __user *data) 1671 { 1672 int ret; 1673 u32 kdata = 0; 1674 1675 if (num == 0) 1676 return 0; 1677 1678 ret = get_user(kdata, data); 1679 if (ret) 1680 return ret; 1681 1682 if (num < 0) 1683 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1684 else 1685 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1686 1687 return ret; 1688 } 1689 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1690 1691 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1692 compat_ulong_t caddr, compat_ulong_t cdata) 1693 { 1694 unsigned long addr = caddr; 1695 unsigned long data = cdata; 1696 void __user *datap = compat_ptr(data); 1697 int ret; 1698 1699 switch (request) { 1700 case PTRACE_PEEKUSR: 1701 ret = compat_ptrace_read_user(child, addr, datap); 1702 break; 1703 1704 case PTRACE_POKEUSR: 1705 ret = compat_ptrace_write_user(child, addr, data); 1706 break; 1707 1708 case COMPAT_PTRACE_GETREGS: 1709 ret = copy_regset_to_user(child, 1710 &user_aarch32_view, 1711 REGSET_COMPAT_GPR, 1712 0, sizeof(compat_elf_gregset_t), 1713 datap); 1714 break; 1715 1716 case COMPAT_PTRACE_SETREGS: 1717 ret = copy_regset_from_user(child, 1718 &user_aarch32_view, 1719 REGSET_COMPAT_GPR, 1720 0, sizeof(compat_elf_gregset_t), 1721 datap); 1722 break; 1723 1724 case COMPAT_PTRACE_GET_THREAD_AREA: 1725 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1726 (compat_ulong_t __user *)datap); 1727 break; 1728 1729 case COMPAT_PTRACE_SET_SYSCALL: 1730 task_pt_regs(child)->syscallno = data; 1731 ret = 0; 1732 break; 1733 1734 case COMPAT_PTRACE_GETVFPREGS: 1735 ret = copy_regset_to_user(child, 1736 &user_aarch32_view, 1737 REGSET_COMPAT_VFP, 1738 0, VFP_STATE_SIZE, 1739 datap); 1740 break; 1741 1742 case COMPAT_PTRACE_SETVFPREGS: 1743 ret = copy_regset_from_user(child, 1744 &user_aarch32_view, 1745 REGSET_COMPAT_VFP, 1746 0, VFP_STATE_SIZE, 1747 datap); 1748 break; 1749 1750 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1751 case COMPAT_PTRACE_GETHBPREGS: 1752 ret = compat_ptrace_gethbpregs(child, addr, datap); 1753 break; 1754 1755 case COMPAT_PTRACE_SETHBPREGS: 1756 ret = compat_ptrace_sethbpregs(child, addr, datap); 1757 break; 1758 #endif 1759 1760 default: 1761 ret = compat_ptrace_request(child, request, addr, 1762 data); 1763 break; 1764 } 1765 1766 return ret; 1767 } 1768 #endif /* CONFIG_COMPAT */ 1769 1770 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1771 { 1772 #ifdef CONFIG_COMPAT 1773 /* 1774 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1775 * user_aarch32_view compatible with arm32. Native ptrace requests on 1776 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1777 * access to the TLS register. 1778 */ 1779 if (is_compat_task()) 1780 return &user_aarch32_view; 1781 else if (is_compat_thread(task_thread_info(task))) 1782 return &user_aarch32_ptrace_view; 1783 #endif 1784 return &user_aarch64_view; 1785 } 1786 1787 long arch_ptrace(struct task_struct *child, long request, 1788 unsigned long addr, unsigned long data) 1789 { 1790 return ptrace_request(child, request, addr, data); 1791 } 1792 1793 enum ptrace_syscall_dir { 1794 PTRACE_SYSCALL_ENTER = 0, 1795 PTRACE_SYSCALL_EXIT, 1796 }; 1797 1798 static void tracehook_report_syscall(struct pt_regs *regs, 1799 enum ptrace_syscall_dir dir) 1800 { 1801 int regno; 1802 unsigned long saved_reg; 1803 1804 /* 1805 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1806 * used to denote syscall entry/exit: 1807 */ 1808 regno = (is_compat_task() ? 12 : 7); 1809 saved_reg = regs->regs[regno]; 1810 regs->regs[regno] = dir; 1811 1812 if (dir == PTRACE_SYSCALL_EXIT) 1813 tracehook_report_syscall_exit(regs, 0); 1814 else if (tracehook_report_syscall_entry(regs)) 1815 forget_syscall(regs); 1816 1817 regs->regs[regno] = saved_reg; 1818 } 1819 1820 int syscall_trace_enter(struct pt_regs *regs) 1821 { 1822 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1823 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1824 1825 /* Do the secure computing after ptrace; failures should be fast. */ 1826 if (secure_computing(NULL) == -1) 1827 return -1; 1828 1829 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1830 trace_sys_enter(regs, regs->syscallno); 1831 1832 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1833 regs->regs[2], regs->regs[3]); 1834 1835 return regs->syscallno; 1836 } 1837 1838 void syscall_trace_exit(struct pt_regs *regs) 1839 { 1840 audit_syscall_exit(regs); 1841 1842 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1843 trace_sys_exit(regs, regs_return_value(regs)); 1844 1845 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1846 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1847 1848 rseq_syscall(regs); 1849 } 1850 1851 /* 1852 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. 1853 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is 1854 * not described in ARM DDI 0487D.a. 1855 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may 1856 * be allocated an EL0 meaning in future. 1857 * Userspace cannot use these until they have an architectural meaning. 1858 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1859 * We also reserve IL for the kernel; SS is handled dynamically. 1860 */ 1861 #define SPSR_EL1_AARCH64_RES0_BITS \ 1862 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1863 GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) 1864 #define SPSR_EL1_AARCH32_RES0_BITS \ 1865 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) 1866 1867 static int valid_compat_regs(struct user_pt_regs *regs) 1868 { 1869 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1870 1871 if (!system_supports_mixed_endian_el0()) { 1872 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1873 regs->pstate |= PSR_AA32_E_BIT; 1874 else 1875 regs->pstate &= ~PSR_AA32_E_BIT; 1876 } 1877 1878 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1879 (regs->pstate & PSR_AA32_A_BIT) == 0 && 1880 (regs->pstate & PSR_AA32_I_BIT) == 0 && 1881 (regs->pstate & PSR_AA32_F_BIT) == 0) { 1882 return 1; 1883 } 1884 1885 /* 1886 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1887 * arch/arm. 1888 */ 1889 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 1890 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 1891 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 1892 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 1893 PSR_AA32_T_BIT; 1894 regs->pstate |= PSR_MODE32_BIT; 1895 1896 return 0; 1897 } 1898 1899 static int valid_native_regs(struct user_pt_regs *regs) 1900 { 1901 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1902 1903 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1904 (regs->pstate & PSR_D_BIT) == 0 && 1905 (regs->pstate & PSR_A_BIT) == 0 && 1906 (regs->pstate & PSR_I_BIT) == 0 && 1907 (regs->pstate & PSR_F_BIT) == 0) { 1908 return 1; 1909 } 1910 1911 /* Force PSR to a valid 64-bit EL0t */ 1912 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1913 1914 return 0; 1915 } 1916 1917 /* 1918 * Are the current registers suitable for user mode? (used to maintain 1919 * security in signal handlers) 1920 */ 1921 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1922 { 1923 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1924 regs->pstate &= ~DBG_SPSR_SS; 1925 1926 if (is_compat_thread(task_thread_info(task))) 1927 return valid_compat_regs(regs); 1928 else 1929 return valid_native_regs(regs); 1930 } 1931