1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/nospec.h> 29 #include <linux/smp.h> 30 #include <linux/ptrace.h> 31 #include <linux/user.h> 32 #include <linux/seccomp.h> 33 #include <linux/security.h> 34 #include <linux/init.h> 35 #include <linux/signal.h> 36 #include <linux/string.h> 37 #include <linux/uaccess.h> 38 #include <linux/perf_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/regset.h> 41 #include <linux/tracehook.h> 42 #include <linux/elf.h> 43 44 #include <asm/compat.h> 45 #include <asm/cpufeature.h> 46 #include <asm/debug-monitors.h> 47 #include <asm/fpsimd.h> 48 #include <asm/pgtable.h> 49 #include <asm/stacktrace.h> 50 #include <asm/syscall.h> 51 #include <asm/traps.h> 52 #include <asm/system_misc.h> 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/syscalls.h> 56 57 struct pt_regs_offset { 58 const char *name; 59 int offset; 60 }; 61 62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 63 #define REG_OFFSET_END {.name = NULL, .offset = 0} 64 #define GPR_OFFSET_NAME(r) \ 65 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 66 67 static const struct pt_regs_offset regoffset_table[] = { 68 GPR_OFFSET_NAME(0), 69 GPR_OFFSET_NAME(1), 70 GPR_OFFSET_NAME(2), 71 GPR_OFFSET_NAME(3), 72 GPR_OFFSET_NAME(4), 73 GPR_OFFSET_NAME(5), 74 GPR_OFFSET_NAME(6), 75 GPR_OFFSET_NAME(7), 76 GPR_OFFSET_NAME(8), 77 GPR_OFFSET_NAME(9), 78 GPR_OFFSET_NAME(10), 79 GPR_OFFSET_NAME(11), 80 GPR_OFFSET_NAME(12), 81 GPR_OFFSET_NAME(13), 82 GPR_OFFSET_NAME(14), 83 GPR_OFFSET_NAME(15), 84 GPR_OFFSET_NAME(16), 85 GPR_OFFSET_NAME(17), 86 GPR_OFFSET_NAME(18), 87 GPR_OFFSET_NAME(19), 88 GPR_OFFSET_NAME(20), 89 GPR_OFFSET_NAME(21), 90 GPR_OFFSET_NAME(22), 91 GPR_OFFSET_NAME(23), 92 GPR_OFFSET_NAME(24), 93 GPR_OFFSET_NAME(25), 94 GPR_OFFSET_NAME(26), 95 GPR_OFFSET_NAME(27), 96 GPR_OFFSET_NAME(28), 97 GPR_OFFSET_NAME(29), 98 GPR_OFFSET_NAME(30), 99 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 100 REG_OFFSET_NAME(sp), 101 REG_OFFSET_NAME(pc), 102 REG_OFFSET_NAME(pstate), 103 REG_OFFSET_END, 104 }; 105 106 /** 107 * regs_query_register_offset() - query register offset from its name 108 * @name: the name of a register 109 * 110 * regs_query_register_offset() returns the offset of a register in struct 111 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 112 */ 113 int regs_query_register_offset(const char *name) 114 { 115 const struct pt_regs_offset *roff; 116 117 for (roff = regoffset_table; roff->name != NULL; roff++) 118 if (!strcmp(roff->name, name)) 119 return roff->offset; 120 return -EINVAL; 121 } 122 123 /** 124 * regs_within_kernel_stack() - check the address in the stack 125 * @regs: pt_regs which contains kernel stack pointer. 126 * @addr: address which is checked. 127 * 128 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 129 * If @addr is within the kernel stack, it returns true. If not, returns false. 130 */ 131 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 132 { 133 return ((addr & ~(THREAD_SIZE - 1)) == 134 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 135 on_irq_stack(addr, NULL); 136 } 137 138 /** 139 * regs_get_kernel_stack_nth() - get Nth entry of the stack 140 * @regs: pt_regs which contains kernel stack pointer. 141 * @n: stack entry number. 142 * 143 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 144 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 145 * this returns 0. 146 */ 147 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 148 { 149 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 150 151 addr += n; 152 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 153 return *addr; 154 else 155 return 0; 156 } 157 158 /* 159 * TODO: does not yet catch signals sent when the child dies. 160 * in exit.c or in signal.c. 161 */ 162 163 /* 164 * Called by kernel/ptrace.c when detaching.. 165 */ 166 void ptrace_disable(struct task_struct *child) 167 { 168 /* 169 * This would be better off in core code, but PTRACE_DETACH has 170 * grown its fair share of arch-specific worts and changing it 171 * is likely to cause regressions on obscure architectures. 172 */ 173 user_disable_single_step(child); 174 } 175 176 #ifdef CONFIG_HAVE_HW_BREAKPOINT 177 /* 178 * Handle hitting a HW-breakpoint. 179 */ 180 static void ptrace_hbptriggered(struct perf_event *bp, 181 struct perf_sample_data *data, 182 struct pt_regs *regs) 183 { 184 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 185 siginfo_t info; 186 187 clear_siginfo(&info); 188 info.si_signo = SIGTRAP; 189 info.si_errno = 0; 190 info.si_code = TRAP_HWBKPT; 191 info.si_addr = (void __user *)(bkpt->trigger); 192 193 #ifdef CONFIG_COMPAT 194 if (is_compat_task()) { 195 int si_errno = 0; 196 int i; 197 198 for (i = 0; i < ARM_MAX_BRP; ++i) { 199 if (current->thread.debug.hbp_break[i] == bp) { 200 si_errno = (i << 1) + 1; 201 break; 202 } 203 } 204 205 for (i = 0; i < ARM_MAX_WRP; ++i) { 206 if (current->thread.debug.hbp_watch[i] == bp) { 207 si_errno = -((i << 1) + 1); 208 break; 209 } 210 } 211 force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger); 212 } 213 #endif 214 arm64_force_sig_info(&info, "Hardware breakpoint trap (ptrace)", current); 215 } 216 217 /* 218 * Unregister breakpoints from this task and reset the pointers in 219 * the thread_struct. 220 */ 221 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 222 { 223 int i; 224 struct thread_struct *t = &tsk->thread; 225 226 for (i = 0; i < ARM_MAX_BRP; i++) { 227 if (t->debug.hbp_break[i]) { 228 unregister_hw_breakpoint(t->debug.hbp_break[i]); 229 t->debug.hbp_break[i] = NULL; 230 } 231 } 232 233 for (i = 0; i < ARM_MAX_WRP; i++) { 234 if (t->debug.hbp_watch[i]) { 235 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 236 t->debug.hbp_watch[i] = NULL; 237 } 238 } 239 } 240 241 void ptrace_hw_copy_thread(struct task_struct *tsk) 242 { 243 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 244 } 245 246 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 247 struct task_struct *tsk, 248 unsigned long idx) 249 { 250 struct perf_event *bp = ERR_PTR(-EINVAL); 251 252 switch (note_type) { 253 case NT_ARM_HW_BREAK: 254 if (idx >= ARM_MAX_BRP) 255 goto out; 256 idx = array_index_nospec(idx, ARM_MAX_BRP); 257 bp = tsk->thread.debug.hbp_break[idx]; 258 break; 259 case NT_ARM_HW_WATCH: 260 if (idx >= ARM_MAX_WRP) 261 goto out; 262 idx = array_index_nospec(idx, ARM_MAX_WRP); 263 bp = tsk->thread.debug.hbp_watch[idx]; 264 break; 265 } 266 267 out: 268 return bp; 269 } 270 271 static int ptrace_hbp_set_event(unsigned int note_type, 272 struct task_struct *tsk, 273 unsigned long idx, 274 struct perf_event *bp) 275 { 276 int err = -EINVAL; 277 278 switch (note_type) { 279 case NT_ARM_HW_BREAK: 280 if (idx >= ARM_MAX_BRP) 281 goto out; 282 idx = array_index_nospec(idx, ARM_MAX_BRP); 283 tsk->thread.debug.hbp_break[idx] = bp; 284 err = 0; 285 break; 286 case NT_ARM_HW_WATCH: 287 if (idx >= ARM_MAX_WRP) 288 goto out; 289 idx = array_index_nospec(idx, ARM_MAX_WRP); 290 tsk->thread.debug.hbp_watch[idx] = bp; 291 err = 0; 292 break; 293 } 294 295 out: 296 return err; 297 } 298 299 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 300 struct task_struct *tsk, 301 unsigned long idx) 302 { 303 struct perf_event *bp; 304 struct perf_event_attr attr; 305 int err, type; 306 307 switch (note_type) { 308 case NT_ARM_HW_BREAK: 309 type = HW_BREAKPOINT_X; 310 break; 311 case NT_ARM_HW_WATCH: 312 type = HW_BREAKPOINT_RW; 313 break; 314 default: 315 return ERR_PTR(-EINVAL); 316 } 317 318 ptrace_breakpoint_init(&attr); 319 320 /* 321 * Initialise fields to sane defaults 322 * (i.e. values that will pass validation). 323 */ 324 attr.bp_addr = 0; 325 attr.bp_len = HW_BREAKPOINT_LEN_4; 326 attr.bp_type = type; 327 attr.disabled = 1; 328 329 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 330 if (IS_ERR(bp)) 331 return bp; 332 333 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 334 if (err) 335 return ERR_PTR(err); 336 337 return bp; 338 } 339 340 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 341 struct arch_hw_breakpoint_ctrl ctrl, 342 struct perf_event_attr *attr) 343 { 344 int err, len, type, offset, disabled = !ctrl.enabled; 345 346 attr->disabled = disabled; 347 if (disabled) 348 return 0; 349 350 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 351 if (err) 352 return err; 353 354 switch (note_type) { 355 case NT_ARM_HW_BREAK: 356 if ((type & HW_BREAKPOINT_X) != type) 357 return -EINVAL; 358 break; 359 case NT_ARM_HW_WATCH: 360 if ((type & HW_BREAKPOINT_RW) != type) 361 return -EINVAL; 362 break; 363 default: 364 return -EINVAL; 365 } 366 367 attr->bp_len = len; 368 attr->bp_type = type; 369 attr->bp_addr += offset; 370 371 return 0; 372 } 373 374 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 375 { 376 u8 num; 377 u32 reg = 0; 378 379 switch (note_type) { 380 case NT_ARM_HW_BREAK: 381 num = hw_breakpoint_slots(TYPE_INST); 382 break; 383 case NT_ARM_HW_WATCH: 384 num = hw_breakpoint_slots(TYPE_DATA); 385 break; 386 default: 387 return -EINVAL; 388 } 389 390 reg |= debug_monitors_arch(); 391 reg <<= 8; 392 reg |= num; 393 394 *info = reg; 395 return 0; 396 } 397 398 static int ptrace_hbp_get_ctrl(unsigned int note_type, 399 struct task_struct *tsk, 400 unsigned long idx, 401 u32 *ctrl) 402 { 403 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 404 405 if (IS_ERR(bp)) 406 return PTR_ERR(bp); 407 408 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 409 return 0; 410 } 411 412 static int ptrace_hbp_get_addr(unsigned int note_type, 413 struct task_struct *tsk, 414 unsigned long idx, 415 u64 *addr) 416 { 417 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 418 419 if (IS_ERR(bp)) 420 return PTR_ERR(bp); 421 422 *addr = bp ? counter_arch_bp(bp)->address : 0; 423 return 0; 424 } 425 426 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 427 struct task_struct *tsk, 428 unsigned long idx) 429 { 430 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 431 432 if (!bp) 433 bp = ptrace_hbp_create(note_type, tsk, idx); 434 435 return bp; 436 } 437 438 static int ptrace_hbp_set_ctrl(unsigned int note_type, 439 struct task_struct *tsk, 440 unsigned long idx, 441 u32 uctrl) 442 { 443 int err; 444 struct perf_event *bp; 445 struct perf_event_attr attr; 446 struct arch_hw_breakpoint_ctrl ctrl; 447 448 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 449 if (IS_ERR(bp)) { 450 err = PTR_ERR(bp); 451 return err; 452 } 453 454 attr = bp->attr; 455 decode_ctrl_reg(uctrl, &ctrl); 456 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 457 if (err) 458 return err; 459 460 return modify_user_hw_breakpoint(bp, &attr); 461 } 462 463 static int ptrace_hbp_set_addr(unsigned int note_type, 464 struct task_struct *tsk, 465 unsigned long idx, 466 u64 addr) 467 { 468 int err; 469 struct perf_event *bp; 470 struct perf_event_attr attr; 471 472 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 473 if (IS_ERR(bp)) { 474 err = PTR_ERR(bp); 475 return err; 476 } 477 478 attr = bp->attr; 479 attr.bp_addr = addr; 480 err = modify_user_hw_breakpoint(bp, &attr); 481 return err; 482 } 483 484 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 485 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 486 #define PTRACE_HBP_PAD_SZ sizeof(u32) 487 488 static int hw_break_get(struct task_struct *target, 489 const struct user_regset *regset, 490 unsigned int pos, unsigned int count, 491 void *kbuf, void __user *ubuf) 492 { 493 unsigned int note_type = regset->core_note_type; 494 int ret, idx = 0, offset, limit; 495 u32 info, ctrl; 496 u64 addr; 497 498 /* Resource info */ 499 ret = ptrace_hbp_get_resource_info(note_type, &info); 500 if (ret) 501 return ret; 502 503 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 504 sizeof(info)); 505 if (ret) 506 return ret; 507 508 /* Pad */ 509 offset = offsetof(struct user_hwdebug_state, pad); 510 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 511 offset + PTRACE_HBP_PAD_SZ); 512 if (ret) 513 return ret; 514 515 /* (address, ctrl) registers */ 516 offset = offsetof(struct user_hwdebug_state, dbg_regs); 517 limit = regset->n * regset->size; 518 while (count && offset < limit) { 519 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 520 if (ret) 521 return ret; 522 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 523 offset, offset + PTRACE_HBP_ADDR_SZ); 524 if (ret) 525 return ret; 526 offset += PTRACE_HBP_ADDR_SZ; 527 528 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 529 if (ret) 530 return ret; 531 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 532 offset, offset + PTRACE_HBP_CTRL_SZ); 533 if (ret) 534 return ret; 535 offset += PTRACE_HBP_CTRL_SZ; 536 537 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 538 offset, 539 offset + PTRACE_HBP_PAD_SZ); 540 if (ret) 541 return ret; 542 offset += PTRACE_HBP_PAD_SZ; 543 idx++; 544 } 545 546 return 0; 547 } 548 549 static int hw_break_set(struct task_struct *target, 550 const struct user_regset *regset, 551 unsigned int pos, unsigned int count, 552 const void *kbuf, const void __user *ubuf) 553 { 554 unsigned int note_type = regset->core_note_type; 555 int ret, idx = 0, offset, limit; 556 u32 ctrl; 557 u64 addr; 558 559 /* Resource info and pad */ 560 offset = offsetof(struct user_hwdebug_state, dbg_regs); 561 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 562 if (ret) 563 return ret; 564 565 /* (address, ctrl) registers */ 566 limit = regset->n * regset->size; 567 while (count && offset < limit) { 568 if (count < PTRACE_HBP_ADDR_SZ) 569 return -EINVAL; 570 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 571 offset, offset + PTRACE_HBP_ADDR_SZ); 572 if (ret) 573 return ret; 574 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 575 if (ret) 576 return ret; 577 offset += PTRACE_HBP_ADDR_SZ; 578 579 if (!count) 580 break; 581 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 582 offset, offset + PTRACE_HBP_CTRL_SZ); 583 if (ret) 584 return ret; 585 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 586 if (ret) 587 return ret; 588 offset += PTRACE_HBP_CTRL_SZ; 589 590 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 591 offset, 592 offset + PTRACE_HBP_PAD_SZ); 593 if (ret) 594 return ret; 595 offset += PTRACE_HBP_PAD_SZ; 596 idx++; 597 } 598 599 return 0; 600 } 601 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 602 603 static int gpr_get(struct task_struct *target, 604 const struct user_regset *regset, 605 unsigned int pos, unsigned int count, 606 void *kbuf, void __user *ubuf) 607 { 608 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 609 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 610 } 611 612 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 613 unsigned int pos, unsigned int count, 614 const void *kbuf, const void __user *ubuf) 615 { 616 int ret; 617 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 618 619 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 620 if (ret) 621 return ret; 622 623 if (!valid_user_regs(&newregs, target)) 624 return -EINVAL; 625 626 task_pt_regs(target)->user_regs = newregs; 627 return 0; 628 } 629 630 /* 631 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 632 */ 633 static int __fpr_get(struct task_struct *target, 634 const struct user_regset *regset, 635 unsigned int pos, unsigned int count, 636 void *kbuf, void __user *ubuf, unsigned int start_pos) 637 { 638 struct user_fpsimd_state *uregs; 639 640 sve_sync_to_fpsimd(target); 641 642 uregs = &target->thread.uw.fpsimd_state; 643 644 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 645 start_pos, start_pos + sizeof(*uregs)); 646 } 647 648 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 649 unsigned int pos, unsigned int count, 650 void *kbuf, void __user *ubuf) 651 { 652 if (target == current) 653 fpsimd_preserve_current_state(); 654 655 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); 656 } 657 658 static int __fpr_set(struct task_struct *target, 659 const struct user_regset *regset, 660 unsigned int pos, unsigned int count, 661 const void *kbuf, const void __user *ubuf, 662 unsigned int start_pos) 663 { 664 int ret; 665 struct user_fpsimd_state newstate; 666 667 /* 668 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 669 * short copyin can't resurrect stale data. 670 */ 671 sve_sync_to_fpsimd(target); 672 673 newstate = target->thread.uw.fpsimd_state; 674 675 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 676 start_pos, start_pos + sizeof(newstate)); 677 if (ret) 678 return ret; 679 680 target->thread.uw.fpsimd_state = newstate; 681 682 return ret; 683 } 684 685 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 686 unsigned int pos, unsigned int count, 687 const void *kbuf, const void __user *ubuf) 688 { 689 int ret; 690 691 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 692 if (ret) 693 return ret; 694 695 sve_sync_from_fpsimd_zeropad(target); 696 fpsimd_flush_task_state(target); 697 698 return ret; 699 } 700 701 static int tls_get(struct task_struct *target, const struct user_regset *regset, 702 unsigned int pos, unsigned int count, 703 void *kbuf, void __user *ubuf) 704 { 705 unsigned long *tls = &target->thread.uw.tp_value; 706 707 if (target == current) 708 tls_preserve_current_state(); 709 710 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 711 } 712 713 static int tls_set(struct task_struct *target, const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 int ret; 718 unsigned long tls = target->thread.uw.tp_value; 719 720 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 721 if (ret) 722 return ret; 723 724 target->thread.uw.tp_value = tls; 725 return ret; 726 } 727 728 static int system_call_get(struct task_struct *target, 729 const struct user_regset *regset, 730 unsigned int pos, unsigned int count, 731 void *kbuf, void __user *ubuf) 732 { 733 int syscallno = task_pt_regs(target)->syscallno; 734 735 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 736 &syscallno, 0, -1); 737 } 738 739 static int system_call_set(struct task_struct *target, 740 const struct user_regset *regset, 741 unsigned int pos, unsigned int count, 742 const void *kbuf, const void __user *ubuf) 743 { 744 int syscallno = task_pt_regs(target)->syscallno; 745 int ret; 746 747 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 748 if (ret) 749 return ret; 750 751 task_pt_regs(target)->syscallno = syscallno; 752 return ret; 753 } 754 755 #ifdef CONFIG_ARM64_SVE 756 757 static void sve_init_header_from_task(struct user_sve_header *header, 758 struct task_struct *target) 759 { 760 unsigned int vq; 761 762 memset(header, 0, sizeof(*header)); 763 764 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 765 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 766 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 767 header->flags |= SVE_PT_VL_INHERIT; 768 769 header->vl = target->thread.sve_vl; 770 vq = sve_vq_from_vl(header->vl); 771 772 header->max_vl = sve_max_vl; 773 header->size = SVE_PT_SIZE(vq, header->flags); 774 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 775 SVE_PT_REGS_SVE); 776 } 777 778 static unsigned int sve_size_from_header(struct user_sve_header const *header) 779 { 780 return ALIGN(header->size, SVE_VQ_BYTES); 781 } 782 783 static unsigned int sve_get_size(struct task_struct *target, 784 const struct user_regset *regset) 785 { 786 struct user_sve_header header; 787 788 if (!system_supports_sve()) 789 return 0; 790 791 sve_init_header_from_task(&header, target); 792 return sve_size_from_header(&header); 793 } 794 795 static int sve_get(struct task_struct *target, 796 const struct user_regset *regset, 797 unsigned int pos, unsigned int count, 798 void *kbuf, void __user *ubuf) 799 { 800 int ret; 801 struct user_sve_header header; 802 unsigned int vq; 803 unsigned long start, end; 804 805 if (!system_supports_sve()) 806 return -EINVAL; 807 808 /* Header */ 809 sve_init_header_from_task(&header, target); 810 vq = sve_vq_from_vl(header.vl); 811 812 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, 813 0, sizeof(header)); 814 if (ret) 815 return ret; 816 817 if (target == current) 818 fpsimd_preserve_current_state(); 819 820 /* Registers: FPSIMD-only case */ 821 822 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 823 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 824 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 825 SVE_PT_FPSIMD_OFFSET); 826 827 /* Otherwise: full SVE case */ 828 829 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 830 start = SVE_PT_SVE_OFFSET; 831 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 832 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 833 target->thread.sve_state, 834 start, end); 835 if (ret) 836 return ret; 837 838 start = end; 839 end = SVE_PT_SVE_FPSR_OFFSET(vq); 840 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 841 start, end); 842 if (ret) 843 return ret; 844 845 /* 846 * Copy fpsr, and fpcr which must follow contiguously in 847 * struct fpsimd_state: 848 */ 849 start = end; 850 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 851 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 852 &target->thread.uw.fpsimd_state.fpsr, 853 start, end); 854 if (ret) 855 return ret; 856 857 start = end; 858 end = sve_size_from_header(&header); 859 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 860 start, end); 861 } 862 863 static int sve_set(struct task_struct *target, 864 const struct user_regset *regset, 865 unsigned int pos, unsigned int count, 866 const void *kbuf, const void __user *ubuf) 867 { 868 int ret; 869 struct user_sve_header header; 870 unsigned int vq; 871 unsigned long start, end; 872 873 if (!system_supports_sve()) 874 return -EINVAL; 875 876 /* Header */ 877 if (count < sizeof(header)) 878 return -EINVAL; 879 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 880 0, sizeof(header)); 881 if (ret) 882 goto out; 883 884 /* 885 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 886 * sve_set_vector_length(), which will also validate them for us: 887 */ 888 ret = sve_set_vector_length(target, header.vl, 889 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 890 if (ret) 891 goto out; 892 893 /* Actual VL set may be less than the user asked for: */ 894 vq = sve_vq_from_vl(target->thread.sve_vl); 895 896 /* Registers: FPSIMD-only case */ 897 898 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 899 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 900 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 901 SVE_PT_FPSIMD_OFFSET); 902 clear_tsk_thread_flag(target, TIF_SVE); 903 goto out; 904 } 905 906 /* Otherwise: full SVE case */ 907 908 /* 909 * If setting a different VL from the requested VL and there is 910 * register data, the data layout will be wrong: don't even 911 * try to set the registers in this case. 912 */ 913 if (count && vq != sve_vq_from_vl(header.vl)) { 914 ret = -EIO; 915 goto out; 916 } 917 918 sve_alloc(target); 919 920 /* 921 * Ensure target->thread.sve_state is up to date with target's 922 * FPSIMD regs, so that a short copyin leaves trailing registers 923 * unmodified. 924 */ 925 fpsimd_sync_to_sve(target); 926 set_tsk_thread_flag(target, TIF_SVE); 927 928 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 929 start = SVE_PT_SVE_OFFSET; 930 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 931 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 932 target->thread.sve_state, 933 start, end); 934 if (ret) 935 goto out; 936 937 start = end; 938 end = SVE_PT_SVE_FPSR_OFFSET(vq); 939 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 940 start, end); 941 if (ret) 942 goto out; 943 944 /* 945 * Copy fpsr, and fpcr which must follow contiguously in 946 * struct fpsimd_state: 947 */ 948 start = end; 949 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 950 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 951 &target->thread.uw.fpsimd_state.fpsr, 952 start, end); 953 954 out: 955 fpsimd_flush_task_state(target); 956 return ret; 957 } 958 959 #endif /* CONFIG_ARM64_SVE */ 960 961 enum aarch64_regset { 962 REGSET_GPR, 963 REGSET_FPR, 964 REGSET_TLS, 965 #ifdef CONFIG_HAVE_HW_BREAKPOINT 966 REGSET_HW_BREAK, 967 REGSET_HW_WATCH, 968 #endif 969 REGSET_SYSTEM_CALL, 970 #ifdef CONFIG_ARM64_SVE 971 REGSET_SVE, 972 #endif 973 }; 974 975 static const struct user_regset aarch64_regsets[] = { 976 [REGSET_GPR] = { 977 .core_note_type = NT_PRSTATUS, 978 .n = sizeof(struct user_pt_regs) / sizeof(u64), 979 .size = sizeof(u64), 980 .align = sizeof(u64), 981 .get = gpr_get, 982 .set = gpr_set 983 }, 984 [REGSET_FPR] = { 985 .core_note_type = NT_PRFPREG, 986 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 987 /* 988 * We pretend we have 32-bit registers because the fpsr and 989 * fpcr are 32-bits wide. 990 */ 991 .size = sizeof(u32), 992 .align = sizeof(u32), 993 .get = fpr_get, 994 .set = fpr_set 995 }, 996 [REGSET_TLS] = { 997 .core_note_type = NT_ARM_TLS, 998 .n = 1, 999 .size = sizeof(void *), 1000 .align = sizeof(void *), 1001 .get = tls_get, 1002 .set = tls_set, 1003 }, 1004 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1005 [REGSET_HW_BREAK] = { 1006 .core_note_type = NT_ARM_HW_BREAK, 1007 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1008 .size = sizeof(u32), 1009 .align = sizeof(u32), 1010 .get = hw_break_get, 1011 .set = hw_break_set, 1012 }, 1013 [REGSET_HW_WATCH] = { 1014 .core_note_type = NT_ARM_HW_WATCH, 1015 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1016 .size = sizeof(u32), 1017 .align = sizeof(u32), 1018 .get = hw_break_get, 1019 .set = hw_break_set, 1020 }, 1021 #endif 1022 [REGSET_SYSTEM_CALL] = { 1023 .core_note_type = NT_ARM_SYSTEM_CALL, 1024 .n = 1, 1025 .size = sizeof(int), 1026 .align = sizeof(int), 1027 .get = system_call_get, 1028 .set = system_call_set, 1029 }, 1030 #ifdef CONFIG_ARM64_SVE 1031 [REGSET_SVE] = { /* Scalable Vector Extension */ 1032 .core_note_type = NT_ARM_SVE, 1033 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1034 SVE_VQ_BYTES), 1035 .size = SVE_VQ_BYTES, 1036 .align = SVE_VQ_BYTES, 1037 .get = sve_get, 1038 .set = sve_set, 1039 .get_size = sve_get_size, 1040 }, 1041 #endif 1042 }; 1043 1044 static const struct user_regset_view user_aarch64_view = { 1045 .name = "aarch64", .e_machine = EM_AARCH64, 1046 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1047 }; 1048 1049 #ifdef CONFIG_COMPAT 1050 enum compat_regset { 1051 REGSET_COMPAT_GPR, 1052 REGSET_COMPAT_VFP, 1053 }; 1054 1055 static int compat_gpr_get(struct task_struct *target, 1056 const struct user_regset *regset, 1057 unsigned int pos, unsigned int count, 1058 void *kbuf, void __user *ubuf) 1059 { 1060 int ret = 0; 1061 unsigned int i, start, num_regs; 1062 1063 /* Calculate the number of AArch32 registers contained in count */ 1064 num_regs = count / regset->size; 1065 1066 /* Convert pos into an register number */ 1067 start = pos / regset->size; 1068 1069 if (start + num_regs > regset->n) 1070 return -EIO; 1071 1072 for (i = 0; i < num_regs; ++i) { 1073 unsigned int idx = start + i; 1074 compat_ulong_t reg; 1075 1076 switch (idx) { 1077 case 15: 1078 reg = task_pt_regs(target)->pc; 1079 break; 1080 case 16: 1081 reg = task_pt_regs(target)->pstate; 1082 reg = pstate_to_compat_psr(reg); 1083 break; 1084 case 17: 1085 reg = task_pt_regs(target)->orig_x0; 1086 break; 1087 default: 1088 reg = task_pt_regs(target)->regs[idx]; 1089 } 1090 1091 if (kbuf) { 1092 memcpy(kbuf, ®, sizeof(reg)); 1093 kbuf += sizeof(reg); 1094 } else { 1095 ret = copy_to_user(ubuf, ®, sizeof(reg)); 1096 if (ret) { 1097 ret = -EFAULT; 1098 break; 1099 } 1100 1101 ubuf += sizeof(reg); 1102 } 1103 } 1104 1105 return ret; 1106 } 1107 1108 static int compat_gpr_set(struct task_struct *target, 1109 const struct user_regset *regset, 1110 unsigned int pos, unsigned int count, 1111 const void *kbuf, const void __user *ubuf) 1112 { 1113 struct pt_regs newregs; 1114 int ret = 0; 1115 unsigned int i, start, num_regs; 1116 1117 /* Calculate the number of AArch32 registers contained in count */ 1118 num_regs = count / regset->size; 1119 1120 /* Convert pos into an register number */ 1121 start = pos / regset->size; 1122 1123 if (start + num_regs > regset->n) 1124 return -EIO; 1125 1126 newregs = *task_pt_regs(target); 1127 1128 for (i = 0; i < num_regs; ++i) { 1129 unsigned int idx = start + i; 1130 compat_ulong_t reg; 1131 1132 if (kbuf) { 1133 memcpy(®, kbuf, sizeof(reg)); 1134 kbuf += sizeof(reg); 1135 } else { 1136 ret = copy_from_user(®, ubuf, sizeof(reg)); 1137 if (ret) { 1138 ret = -EFAULT; 1139 break; 1140 } 1141 1142 ubuf += sizeof(reg); 1143 } 1144 1145 switch (idx) { 1146 case 15: 1147 newregs.pc = reg; 1148 break; 1149 case 16: 1150 reg = compat_psr_to_pstate(reg); 1151 newregs.pstate = reg; 1152 break; 1153 case 17: 1154 newregs.orig_x0 = reg; 1155 break; 1156 default: 1157 newregs.regs[idx] = reg; 1158 } 1159 1160 } 1161 1162 if (valid_user_regs(&newregs.user_regs, target)) 1163 *task_pt_regs(target) = newregs; 1164 else 1165 ret = -EINVAL; 1166 1167 return ret; 1168 } 1169 1170 static int compat_vfp_get(struct task_struct *target, 1171 const struct user_regset *regset, 1172 unsigned int pos, unsigned int count, 1173 void *kbuf, void __user *ubuf) 1174 { 1175 struct user_fpsimd_state *uregs; 1176 compat_ulong_t fpscr; 1177 int ret, vregs_end_pos; 1178 1179 uregs = &target->thread.uw.fpsimd_state; 1180 1181 if (target == current) 1182 fpsimd_preserve_current_state(); 1183 1184 /* 1185 * The VFP registers are packed into the fpsimd_state, so they all sit 1186 * nicely together for us. We just need to create the fpscr separately. 1187 */ 1188 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1189 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 1190 0, vregs_end_pos); 1191 1192 if (count && !ret) { 1193 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1194 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1195 1196 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 1197 vregs_end_pos, VFP_STATE_SIZE); 1198 } 1199 1200 return ret; 1201 } 1202 1203 static int compat_vfp_set(struct task_struct *target, 1204 const struct user_regset *regset, 1205 unsigned int pos, unsigned int count, 1206 const void *kbuf, const void __user *ubuf) 1207 { 1208 struct user_fpsimd_state *uregs; 1209 compat_ulong_t fpscr; 1210 int ret, vregs_end_pos; 1211 1212 uregs = &target->thread.uw.fpsimd_state; 1213 1214 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1215 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1216 vregs_end_pos); 1217 1218 if (count && !ret) { 1219 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1220 vregs_end_pos, VFP_STATE_SIZE); 1221 if (!ret) { 1222 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1223 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1224 } 1225 } 1226 1227 fpsimd_flush_task_state(target); 1228 return ret; 1229 } 1230 1231 static int compat_tls_get(struct task_struct *target, 1232 const struct user_regset *regset, unsigned int pos, 1233 unsigned int count, void *kbuf, void __user *ubuf) 1234 { 1235 compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value; 1236 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1237 } 1238 1239 static int compat_tls_set(struct task_struct *target, 1240 const struct user_regset *regset, unsigned int pos, 1241 unsigned int count, const void *kbuf, 1242 const void __user *ubuf) 1243 { 1244 int ret; 1245 compat_ulong_t tls = target->thread.uw.tp_value; 1246 1247 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1248 if (ret) 1249 return ret; 1250 1251 target->thread.uw.tp_value = tls; 1252 return ret; 1253 } 1254 1255 static const struct user_regset aarch32_regsets[] = { 1256 [REGSET_COMPAT_GPR] = { 1257 .core_note_type = NT_PRSTATUS, 1258 .n = COMPAT_ELF_NGREG, 1259 .size = sizeof(compat_elf_greg_t), 1260 .align = sizeof(compat_elf_greg_t), 1261 .get = compat_gpr_get, 1262 .set = compat_gpr_set 1263 }, 1264 [REGSET_COMPAT_VFP] = { 1265 .core_note_type = NT_ARM_VFP, 1266 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1267 .size = sizeof(compat_ulong_t), 1268 .align = sizeof(compat_ulong_t), 1269 .get = compat_vfp_get, 1270 .set = compat_vfp_set 1271 }, 1272 }; 1273 1274 static const struct user_regset_view user_aarch32_view = { 1275 .name = "aarch32", .e_machine = EM_ARM, 1276 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1277 }; 1278 1279 static const struct user_regset aarch32_ptrace_regsets[] = { 1280 [REGSET_GPR] = { 1281 .core_note_type = NT_PRSTATUS, 1282 .n = COMPAT_ELF_NGREG, 1283 .size = sizeof(compat_elf_greg_t), 1284 .align = sizeof(compat_elf_greg_t), 1285 .get = compat_gpr_get, 1286 .set = compat_gpr_set 1287 }, 1288 [REGSET_FPR] = { 1289 .core_note_type = NT_ARM_VFP, 1290 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1291 .size = sizeof(compat_ulong_t), 1292 .align = sizeof(compat_ulong_t), 1293 .get = compat_vfp_get, 1294 .set = compat_vfp_set 1295 }, 1296 [REGSET_TLS] = { 1297 .core_note_type = NT_ARM_TLS, 1298 .n = 1, 1299 .size = sizeof(compat_ulong_t), 1300 .align = sizeof(compat_ulong_t), 1301 .get = compat_tls_get, 1302 .set = compat_tls_set, 1303 }, 1304 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1305 [REGSET_HW_BREAK] = { 1306 .core_note_type = NT_ARM_HW_BREAK, 1307 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1308 .size = sizeof(u32), 1309 .align = sizeof(u32), 1310 .get = hw_break_get, 1311 .set = hw_break_set, 1312 }, 1313 [REGSET_HW_WATCH] = { 1314 .core_note_type = NT_ARM_HW_WATCH, 1315 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1316 .size = sizeof(u32), 1317 .align = sizeof(u32), 1318 .get = hw_break_get, 1319 .set = hw_break_set, 1320 }, 1321 #endif 1322 [REGSET_SYSTEM_CALL] = { 1323 .core_note_type = NT_ARM_SYSTEM_CALL, 1324 .n = 1, 1325 .size = sizeof(int), 1326 .align = sizeof(int), 1327 .get = system_call_get, 1328 .set = system_call_set, 1329 }, 1330 }; 1331 1332 static const struct user_regset_view user_aarch32_ptrace_view = { 1333 .name = "aarch32", .e_machine = EM_ARM, 1334 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1335 }; 1336 1337 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1338 compat_ulong_t __user *ret) 1339 { 1340 compat_ulong_t tmp; 1341 1342 if (off & 3) 1343 return -EIO; 1344 1345 if (off == COMPAT_PT_TEXT_ADDR) 1346 tmp = tsk->mm->start_code; 1347 else if (off == COMPAT_PT_DATA_ADDR) 1348 tmp = tsk->mm->start_data; 1349 else if (off == COMPAT_PT_TEXT_END_ADDR) 1350 tmp = tsk->mm->end_code; 1351 else if (off < sizeof(compat_elf_gregset_t)) 1352 return copy_regset_to_user(tsk, &user_aarch32_view, 1353 REGSET_COMPAT_GPR, off, 1354 sizeof(compat_ulong_t), ret); 1355 else if (off >= COMPAT_USER_SZ) 1356 return -EIO; 1357 else 1358 tmp = 0; 1359 1360 return put_user(tmp, ret); 1361 } 1362 1363 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1364 compat_ulong_t val) 1365 { 1366 int ret; 1367 mm_segment_t old_fs = get_fs(); 1368 1369 if (off & 3 || off >= COMPAT_USER_SZ) 1370 return -EIO; 1371 1372 if (off >= sizeof(compat_elf_gregset_t)) 1373 return 0; 1374 1375 set_fs(KERNEL_DS); 1376 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1377 REGSET_COMPAT_GPR, off, 1378 sizeof(compat_ulong_t), 1379 &val); 1380 set_fs(old_fs); 1381 1382 return ret; 1383 } 1384 1385 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1386 1387 /* 1388 * Convert a virtual register number into an index for a thread_info 1389 * breakpoint array. Breakpoints are identified using positive numbers 1390 * whilst watchpoints are negative. The registers are laid out as pairs 1391 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1392 * Register 0 is reserved for describing resource information. 1393 */ 1394 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1395 { 1396 return (abs(num) - 1) >> 1; 1397 } 1398 1399 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1400 { 1401 u8 num_brps, num_wrps, debug_arch, wp_len; 1402 u32 reg = 0; 1403 1404 num_brps = hw_breakpoint_slots(TYPE_INST); 1405 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1406 1407 debug_arch = debug_monitors_arch(); 1408 wp_len = 8; 1409 reg |= debug_arch; 1410 reg <<= 8; 1411 reg |= wp_len; 1412 reg <<= 8; 1413 reg |= num_wrps; 1414 reg <<= 8; 1415 reg |= num_brps; 1416 1417 *kdata = reg; 1418 return 0; 1419 } 1420 1421 static int compat_ptrace_hbp_get(unsigned int note_type, 1422 struct task_struct *tsk, 1423 compat_long_t num, 1424 u32 *kdata) 1425 { 1426 u64 addr = 0; 1427 u32 ctrl = 0; 1428 1429 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1430 1431 if (num & 1) { 1432 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1433 *kdata = (u32)addr; 1434 } else { 1435 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1436 *kdata = ctrl; 1437 } 1438 1439 return err; 1440 } 1441 1442 static int compat_ptrace_hbp_set(unsigned int note_type, 1443 struct task_struct *tsk, 1444 compat_long_t num, 1445 u32 *kdata) 1446 { 1447 u64 addr; 1448 u32 ctrl; 1449 1450 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1451 1452 if (num & 1) { 1453 addr = *kdata; 1454 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1455 } else { 1456 ctrl = *kdata; 1457 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1458 } 1459 1460 return err; 1461 } 1462 1463 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1464 compat_ulong_t __user *data) 1465 { 1466 int ret; 1467 u32 kdata; 1468 1469 /* Watchpoint */ 1470 if (num < 0) { 1471 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1472 /* Resource info */ 1473 } else if (num == 0) { 1474 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1475 /* Breakpoint */ 1476 } else { 1477 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1478 } 1479 1480 if (!ret) 1481 ret = put_user(kdata, data); 1482 1483 return ret; 1484 } 1485 1486 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1487 compat_ulong_t __user *data) 1488 { 1489 int ret; 1490 u32 kdata = 0; 1491 1492 if (num == 0) 1493 return 0; 1494 1495 ret = get_user(kdata, data); 1496 if (ret) 1497 return ret; 1498 1499 if (num < 0) 1500 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1501 else 1502 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1503 1504 return ret; 1505 } 1506 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1507 1508 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1509 compat_ulong_t caddr, compat_ulong_t cdata) 1510 { 1511 unsigned long addr = caddr; 1512 unsigned long data = cdata; 1513 void __user *datap = compat_ptr(data); 1514 int ret; 1515 1516 switch (request) { 1517 case PTRACE_PEEKUSR: 1518 ret = compat_ptrace_read_user(child, addr, datap); 1519 break; 1520 1521 case PTRACE_POKEUSR: 1522 ret = compat_ptrace_write_user(child, addr, data); 1523 break; 1524 1525 case COMPAT_PTRACE_GETREGS: 1526 ret = copy_regset_to_user(child, 1527 &user_aarch32_view, 1528 REGSET_COMPAT_GPR, 1529 0, sizeof(compat_elf_gregset_t), 1530 datap); 1531 break; 1532 1533 case COMPAT_PTRACE_SETREGS: 1534 ret = copy_regset_from_user(child, 1535 &user_aarch32_view, 1536 REGSET_COMPAT_GPR, 1537 0, sizeof(compat_elf_gregset_t), 1538 datap); 1539 break; 1540 1541 case COMPAT_PTRACE_GET_THREAD_AREA: 1542 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1543 (compat_ulong_t __user *)datap); 1544 break; 1545 1546 case COMPAT_PTRACE_SET_SYSCALL: 1547 task_pt_regs(child)->syscallno = data; 1548 ret = 0; 1549 break; 1550 1551 case COMPAT_PTRACE_GETVFPREGS: 1552 ret = copy_regset_to_user(child, 1553 &user_aarch32_view, 1554 REGSET_COMPAT_VFP, 1555 0, VFP_STATE_SIZE, 1556 datap); 1557 break; 1558 1559 case COMPAT_PTRACE_SETVFPREGS: 1560 ret = copy_regset_from_user(child, 1561 &user_aarch32_view, 1562 REGSET_COMPAT_VFP, 1563 0, VFP_STATE_SIZE, 1564 datap); 1565 break; 1566 1567 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1568 case COMPAT_PTRACE_GETHBPREGS: 1569 ret = compat_ptrace_gethbpregs(child, addr, datap); 1570 break; 1571 1572 case COMPAT_PTRACE_SETHBPREGS: 1573 ret = compat_ptrace_sethbpregs(child, addr, datap); 1574 break; 1575 #endif 1576 1577 default: 1578 ret = compat_ptrace_request(child, request, addr, 1579 data); 1580 break; 1581 } 1582 1583 return ret; 1584 } 1585 #endif /* CONFIG_COMPAT */ 1586 1587 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1588 { 1589 #ifdef CONFIG_COMPAT 1590 /* 1591 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1592 * user_aarch32_view compatible with arm32. Native ptrace requests on 1593 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1594 * access to the TLS register. 1595 */ 1596 if (is_compat_task()) 1597 return &user_aarch32_view; 1598 else if (is_compat_thread(task_thread_info(task))) 1599 return &user_aarch32_ptrace_view; 1600 #endif 1601 return &user_aarch64_view; 1602 } 1603 1604 long arch_ptrace(struct task_struct *child, long request, 1605 unsigned long addr, unsigned long data) 1606 { 1607 return ptrace_request(child, request, addr, data); 1608 } 1609 1610 enum ptrace_syscall_dir { 1611 PTRACE_SYSCALL_ENTER = 0, 1612 PTRACE_SYSCALL_EXIT, 1613 }; 1614 1615 static void tracehook_report_syscall(struct pt_regs *regs, 1616 enum ptrace_syscall_dir dir) 1617 { 1618 int regno; 1619 unsigned long saved_reg; 1620 1621 /* 1622 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1623 * used to denote syscall entry/exit: 1624 */ 1625 regno = (is_compat_task() ? 12 : 7); 1626 saved_reg = regs->regs[regno]; 1627 regs->regs[regno] = dir; 1628 1629 if (dir == PTRACE_SYSCALL_EXIT) 1630 tracehook_report_syscall_exit(regs, 0); 1631 else if (tracehook_report_syscall_entry(regs)) 1632 forget_syscall(regs); 1633 1634 regs->regs[regno] = saved_reg; 1635 } 1636 1637 int syscall_trace_enter(struct pt_regs *regs) 1638 { 1639 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1640 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1641 1642 /* Do the secure computing after ptrace; failures should be fast. */ 1643 if (secure_computing(NULL) == -1) 1644 return -1; 1645 1646 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1647 trace_sys_enter(regs, regs->syscallno); 1648 1649 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1650 regs->regs[2], regs->regs[3]); 1651 1652 return regs->syscallno; 1653 } 1654 1655 void syscall_trace_exit(struct pt_regs *regs) 1656 { 1657 audit_syscall_exit(regs); 1658 1659 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1660 trace_sys_exit(regs, regs_return_value(regs)); 1661 1662 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1663 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1664 1665 rseq_syscall(regs); 1666 } 1667 1668 /* 1669 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a 1670 * We also take into account DIT (bit 24), which is not yet documented, and 1671 * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be 1672 * allocated an EL0 meaning in future. 1673 * Userspace cannot use these until they have an architectural meaning. 1674 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1675 * We also reserve IL for the kernel; SS is handled dynamically. 1676 */ 1677 #define SPSR_EL1_AARCH64_RES0_BITS \ 1678 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1679 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) 1680 #define SPSR_EL1_AARCH32_RES0_BITS \ 1681 (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) 1682 1683 static int valid_compat_regs(struct user_pt_regs *regs) 1684 { 1685 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1686 1687 if (!system_supports_mixed_endian_el0()) { 1688 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1689 regs->pstate |= PSR_AA32_E_BIT; 1690 else 1691 regs->pstate &= ~PSR_AA32_E_BIT; 1692 } 1693 1694 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1695 (regs->pstate & PSR_AA32_A_BIT) == 0 && 1696 (regs->pstate & PSR_AA32_I_BIT) == 0 && 1697 (regs->pstate & PSR_AA32_F_BIT) == 0) { 1698 return 1; 1699 } 1700 1701 /* 1702 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1703 * arch/arm. 1704 */ 1705 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 1706 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 1707 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 1708 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 1709 PSR_AA32_T_BIT; 1710 regs->pstate |= PSR_MODE32_BIT; 1711 1712 return 0; 1713 } 1714 1715 static int valid_native_regs(struct user_pt_regs *regs) 1716 { 1717 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1718 1719 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1720 (regs->pstate & PSR_D_BIT) == 0 && 1721 (regs->pstate & PSR_A_BIT) == 0 && 1722 (regs->pstate & PSR_I_BIT) == 0 && 1723 (regs->pstate & PSR_F_BIT) == 0) { 1724 return 1; 1725 } 1726 1727 /* Force PSR to a valid 64-bit EL0t */ 1728 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1729 1730 return 0; 1731 } 1732 1733 /* 1734 * Are the current registers suitable for user mode? (used to maintain 1735 * security in signal handlers) 1736 */ 1737 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1738 { 1739 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1740 regs->pstate &= ~DBG_SPSR_SS; 1741 1742 if (is_compat_thread(task_thread_info(task))) 1743 return valid_compat_regs(regs); 1744 else 1745 return valid_native_regs(regs); 1746 } 1747