1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/nospec.h> 29 #include <linux/smp.h> 30 #include <linux/ptrace.h> 31 #include <linux/user.h> 32 #include <linux/seccomp.h> 33 #include <linux/security.h> 34 #include <linux/init.h> 35 #include <linux/signal.h> 36 #include <linux/string.h> 37 #include <linux/uaccess.h> 38 #include <linux/perf_event.h> 39 #include <linux/hw_breakpoint.h> 40 #include <linux/regset.h> 41 #include <linux/tracehook.h> 42 #include <linux/elf.h> 43 44 #include <asm/compat.h> 45 #include <asm/cpufeature.h> 46 #include <asm/debug-monitors.h> 47 #include <asm/fpsimd.h> 48 #include <asm/pgtable.h> 49 #include <asm/stacktrace.h> 50 #include <asm/syscall.h> 51 #include <asm/traps.h> 52 #include <asm/system_misc.h> 53 54 #define CREATE_TRACE_POINTS 55 #include <trace/events/syscalls.h> 56 57 struct pt_regs_offset { 58 const char *name; 59 int offset; 60 }; 61 62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 63 #define REG_OFFSET_END {.name = NULL, .offset = 0} 64 #define GPR_OFFSET_NAME(r) \ 65 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 66 67 static const struct pt_regs_offset regoffset_table[] = { 68 GPR_OFFSET_NAME(0), 69 GPR_OFFSET_NAME(1), 70 GPR_OFFSET_NAME(2), 71 GPR_OFFSET_NAME(3), 72 GPR_OFFSET_NAME(4), 73 GPR_OFFSET_NAME(5), 74 GPR_OFFSET_NAME(6), 75 GPR_OFFSET_NAME(7), 76 GPR_OFFSET_NAME(8), 77 GPR_OFFSET_NAME(9), 78 GPR_OFFSET_NAME(10), 79 GPR_OFFSET_NAME(11), 80 GPR_OFFSET_NAME(12), 81 GPR_OFFSET_NAME(13), 82 GPR_OFFSET_NAME(14), 83 GPR_OFFSET_NAME(15), 84 GPR_OFFSET_NAME(16), 85 GPR_OFFSET_NAME(17), 86 GPR_OFFSET_NAME(18), 87 GPR_OFFSET_NAME(19), 88 GPR_OFFSET_NAME(20), 89 GPR_OFFSET_NAME(21), 90 GPR_OFFSET_NAME(22), 91 GPR_OFFSET_NAME(23), 92 GPR_OFFSET_NAME(24), 93 GPR_OFFSET_NAME(25), 94 GPR_OFFSET_NAME(26), 95 GPR_OFFSET_NAME(27), 96 GPR_OFFSET_NAME(28), 97 GPR_OFFSET_NAME(29), 98 GPR_OFFSET_NAME(30), 99 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 100 REG_OFFSET_NAME(sp), 101 REG_OFFSET_NAME(pc), 102 REG_OFFSET_NAME(pstate), 103 REG_OFFSET_END, 104 }; 105 106 /** 107 * regs_query_register_offset() - query register offset from its name 108 * @name: the name of a register 109 * 110 * regs_query_register_offset() returns the offset of a register in struct 111 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 112 */ 113 int regs_query_register_offset(const char *name) 114 { 115 const struct pt_regs_offset *roff; 116 117 for (roff = regoffset_table; roff->name != NULL; roff++) 118 if (!strcmp(roff->name, name)) 119 return roff->offset; 120 return -EINVAL; 121 } 122 123 /** 124 * regs_within_kernel_stack() - check the address in the stack 125 * @regs: pt_regs which contains kernel stack pointer. 126 * @addr: address which is checked. 127 * 128 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 129 * If @addr is within the kernel stack, it returns true. If not, returns false. 130 */ 131 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 132 { 133 return ((addr & ~(THREAD_SIZE - 1)) == 134 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 135 on_irq_stack(addr, NULL); 136 } 137 138 /** 139 * regs_get_kernel_stack_nth() - get Nth entry of the stack 140 * @regs: pt_regs which contains kernel stack pointer. 141 * @n: stack entry number. 142 * 143 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 144 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 145 * this returns 0. 146 */ 147 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 148 { 149 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 150 151 addr += n; 152 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 153 return *addr; 154 else 155 return 0; 156 } 157 158 /* 159 * TODO: does not yet catch signals sent when the child dies. 160 * in exit.c or in signal.c. 161 */ 162 163 /* 164 * Called by kernel/ptrace.c when detaching.. 165 */ 166 void ptrace_disable(struct task_struct *child) 167 { 168 /* 169 * This would be better off in core code, but PTRACE_DETACH has 170 * grown its fair share of arch-specific worts and changing it 171 * is likely to cause regressions on obscure architectures. 172 */ 173 user_disable_single_step(child); 174 } 175 176 #ifdef CONFIG_HAVE_HW_BREAKPOINT 177 /* 178 * Handle hitting a HW-breakpoint. 179 */ 180 static void ptrace_hbptriggered(struct perf_event *bp, 181 struct perf_sample_data *data, 182 struct pt_regs *regs) 183 { 184 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 185 const char *desc = "Hardware breakpoint trap (ptrace)"; 186 187 #ifdef CONFIG_COMPAT 188 if (is_compat_task()) { 189 int si_errno = 0; 190 int i; 191 192 for (i = 0; i < ARM_MAX_BRP; ++i) { 193 if (current->thread.debug.hbp_break[i] == bp) { 194 si_errno = (i << 1) + 1; 195 break; 196 } 197 } 198 199 for (i = 0; i < ARM_MAX_WRP; ++i) { 200 if (current->thread.debug.hbp_watch[i] == bp) { 201 si_errno = -((i << 1) + 1); 202 break; 203 } 204 } 205 arm64_force_sig_ptrace_errno_trap(si_errno, 206 (void __user *)bkpt->trigger, 207 desc); 208 } 209 #endif 210 arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT, 211 (void __user *)(bkpt->trigger), 212 desc); 213 } 214 215 /* 216 * Unregister breakpoints from this task and reset the pointers in 217 * the thread_struct. 218 */ 219 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 220 { 221 int i; 222 struct thread_struct *t = &tsk->thread; 223 224 for (i = 0; i < ARM_MAX_BRP; i++) { 225 if (t->debug.hbp_break[i]) { 226 unregister_hw_breakpoint(t->debug.hbp_break[i]); 227 t->debug.hbp_break[i] = NULL; 228 } 229 } 230 231 for (i = 0; i < ARM_MAX_WRP; i++) { 232 if (t->debug.hbp_watch[i]) { 233 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 234 t->debug.hbp_watch[i] = NULL; 235 } 236 } 237 } 238 239 void ptrace_hw_copy_thread(struct task_struct *tsk) 240 { 241 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 242 } 243 244 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 245 struct task_struct *tsk, 246 unsigned long idx) 247 { 248 struct perf_event *bp = ERR_PTR(-EINVAL); 249 250 switch (note_type) { 251 case NT_ARM_HW_BREAK: 252 if (idx >= ARM_MAX_BRP) 253 goto out; 254 idx = array_index_nospec(idx, ARM_MAX_BRP); 255 bp = tsk->thread.debug.hbp_break[idx]; 256 break; 257 case NT_ARM_HW_WATCH: 258 if (idx >= ARM_MAX_WRP) 259 goto out; 260 idx = array_index_nospec(idx, ARM_MAX_WRP); 261 bp = tsk->thread.debug.hbp_watch[idx]; 262 break; 263 } 264 265 out: 266 return bp; 267 } 268 269 static int ptrace_hbp_set_event(unsigned int note_type, 270 struct task_struct *tsk, 271 unsigned long idx, 272 struct perf_event *bp) 273 { 274 int err = -EINVAL; 275 276 switch (note_type) { 277 case NT_ARM_HW_BREAK: 278 if (idx >= ARM_MAX_BRP) 279 goto out; 280 idx = array_index_nospec(idx, ARM_MAX_BRP); 281 tsk->thread.debug.hbp_break[idx] = bp; 282 err = 0; 283 break; 284 case NT_ARM_HW_WATCH: 285 if (idx >= ARM_MAX_WRP) 286 goto out; 287 idx = array_index_nospec(idx, ARM_MAX_WRP); 288 tsk->thread.debug.hbp_watch[idx] = bp; 289 err = 0; 290 break; 291 } 292 293 out: 294 return err; 295 } 296 297 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 298 struct task_struct *tsk, 299 unsigned long idx) 300 { 301 struct perf_event *bp; 302 struct perf_event_attr attr; 303 int err, type; 304 305 switch (note_type) { 306 case NT_ARM_HW_BREAK: 307 type = HW_BREAKPOINT_X; 308 break; 309 case NT_ARM_HW_WATCH: 310 type = HW_BREAKPOINT_RW; 311 break; 312 default: 313 return ERR_PTR(-EINVAL); 314 } 315 316 ptrace_breakpoint_init(&attr); 317 318 /* 319 * Initialise fields to sane defaults 320 * (i.e. values that will pass validation). 321 */ 322 attr.bp_addr = 0; 323 attr.bp_len = HW_BREAKPOINT_LEN_4; 324 attr.bp_type = type; 325 attr.disabled = 1; 326 327 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 328 if (IS_ERR(bp)) 329 return bp; 330 331 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 332 if (err) 333 return ERR_PTR(err); 334 335 return bp; 336 } 337 338 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 339 struct arch_hw_breakpoint_ctrl ctrl, 340 struct perf_event_attr *attr) 341 { 342 int err, len, type, offset, disabled = !ctrl.enabled; 343 344 attr->disabled = disabled; 345 if (disabled) 346 return 0; 347 348 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 349 if (err) 350 return err; 351 352 switch (note_type) { 353 case NT_ARM_HW_BREAK: 354 if ((type & HW_BREAKPOINT_X) != type) 355 return -EINVAL; 356 break; 357 case NT_ARM_HW_WATCH: 358 if ((type & HW_BREAKPOINT_RW) != type) 359 return -EINVAL; 360 break; 361 default: 362 return -EINVAL; 363 } 364 365 attr->bp_len = len; 366 attr->bp_type = type; 367 attr->bp_addr += offset; 368 369 return 0; 370 } 371 372 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 373 { 374 u8 num; 375 u32 reg = 0; 376 377 switch (note_type) { 378 case NT_ARM_HW_BREAK: 379 num = hw_breakpoint_slots(TYPE_INST); 380 break; 381 case NT_ARM_HW_WATCH: 382 num = hw_breakpoint_slots(TYPE_DATA); 383 break; 384 default: 385 return -EINVAL; 386 } 387 388 reg |= debug_monitors_arch(); 389 reg <<= 8; 390 reg |= num; 391 392 *info = reg; 393 return 0; 394 } 395 396 static int ptrace_hbp_get_ctrl(unsigned int note_type, 397 struct task_struct *tsk, 398 unsigned long idx, 399 u32 *ctrl) 400 { 401 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 402 403 if (IS_ERR(bp)) 404 return PTR_ERR(bp); 405 406 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 407 return 0; 408 } 409 410 static int ptrace_hbp_get_addr(unsigned int note_type, 411 struct task_struct *tsk, 412 unsigned long idx, 413 u64 *addr) 414 { 415 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 416 417 if (IS_ERR(bp)) 418 return PTR_ERR(bp); 419 420 *addr = bp ? counter_arch_bp(bp)->address : 0; 421 return 0; 422 } 423 424 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 425 struct task_struct *tsk, 426 unsigned long idx) 427 { 428 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 429 430 if (!bp) 431 bp = ptrace_hbp_create(note_type, tsk, idx); 432 433 return bp; 434 } 435 436 static int ptrace_hbp_set_ctrl(unsigned int note_type, 437 struct task_struct *tsk, 438 unsigned long idx, 439 u32 uctrl) 440 { 441 int err; 442 struct perf_event *bp; 443 struct perf_event_attr attr; 444 struct arch_hw_breakpoint_ctrl ctrl; 445 446 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 447 if (IS_ERR(bp)) { 448 err = PTR_ERR(bp); 449 return err; 450 } 451 452 attr = bp->attr; 453 decode_ctrl_reg(uctrl, &ctrl); 454 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 455 if (err) 456 return err; 457 458 return modify_user_hw_breakpoint(bp, &attr); 459 } 460 461 static int ptrace_hbp_set_addr(unsigned int note_type, 462 struct task_struct *tsk, 463 unsigned long idx, 464 u64 addr) 465 { 466 int err; 467 struct perf_event *bp; 468 struct perf_event_attr attr; 469 470 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 471 if (IS_ERR(bp)) { 472 err = PTR_ERR(bp); 473 return err; 474 } 475 476 attr = bp->attr; 477 attr.bp_addr = addr; 478 err = modify_user_hw_breakpoint(bp, &attr); 479 return err; 480 } 481 482 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 483 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 484 #define PTRACE_HBP_PAD_SZ sizeof(u32) 485 486 static int hw_break_get(struct task_struct *target, 487 const struct user_regset *regset, 488 unsigned int pos, unsigned int count, 489 void *kbuf, void __user *ubuf) 490 { 491 unsigned int note_type = regset->core_note_type; 492 int ret, idx = 0, offset, limit; 493 u32 info, ctrl; 494 u64 addr; 495 496 /* Resource info */ 497 ret = ptrace_hbp_get_resource_info(note_type, &info); 498 if (ret) 499 return ret; 500 501 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 502 sizeof(info)); 503 if (ret) 504 return ret; 505 506 /* Pad */ 507 offset = offsetof(struct user_hwdebug_state, pad); 508 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 509 offset + PTRACE_HBP_PAD_SZ); 510 if (ret) 511 return ret; 512 513 /* (address, ctrl) registers */ 514 offset = offsetof(struct user_hwdebug_state, dbg_regs); 515 limit = regset->n * regset->size; 516 while (count && offset < limit) { 517 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 518 if (ret) 519 return ret; 520 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 521 offset, offset + PTRACE_HBP_ADDR_SZ); 522 if (ret) 523 return ret; 524 offset += PTRACE_HBP_ADDR_SZ; 525 526 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 527 if (ret) 528 return ret; 529 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 530 offset, offset + PTRACE_HBP_CTRL_SZ); 531 if (ret) 532 return ret; 533 offset += PTRACE_HBP_CTRL_SZ; 534 535 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 536 offset, 537 offset + PTRACE_HBP_PAD_SZ); 538 if (ret) 539 return ret; 540 offset += PTRACE_HBP_PAD_SZ; 541 idx++; 542 } 543 544 return 0; 545 } 546 547 static int hw_break_set(struct task_struct *target, 548 const struct user_regset *regset, 549 unsigned int pos, unsigned int count, 550 const void *kbuf, const void __user *ubuf) 551 { 552 unsigned int note_type = regset->core_note_type; 553 int ret, idx = 0, offset, limit; 554 u32 ctrl; 555 u64 addr; 556 557 /* Resource info and pad */ 558 offset = offsetof(struct user_hwdebug_state, dbg_regs); 559 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 560 if (ret) 561 return ret; 562 563 /* (address, ctrl) registers */ 564 limit = regset->n * regset->size; 565 while (count && offset < limit) { 566 if (count < PTRACE_HBP_ADDR_SZ) 567 return -EINVAL; 568 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 569 offset, offset + PTRACE_HBP_ADDR_SZ); 570 if (ret) 571 return ret; 572 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 573 if (ret) 574 return ret; 575 offset += PTRACE_HBP_ADDR_SZ; 576 577 if (!count) 578 break; 579 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 580 offset, offset + PTRACE_HBP_CTRL_SZ); 581 if (ret) 582 return ret; 583 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 584 if (ret) 585 return ret; 586 offset += PTRACE_HBP_CTRL_SZ; 587 588 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 589 offset, 590 offset + PTRACE_HBP_PAD_SZ); 591 if (ret) 592 return ret; 593 offset += PTRACE_HBP_PAD_SZ; 594 idx++; 595 } 596 597 return 0; 598 } 599 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 600 601 static int gpr_get(struct task_struct *target, 602 const struct user_regset *regset, 603 unsigned int pos, unsigned int count, 604 void *kbuf, void __user *ubuf) 605 { 606 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 607 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 608 } 609 610 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 611 unsigned int pos, unsigned int count, 612 const void *kbuf, const void __user *ubuf) 613 { 614 int ret; 615 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 616 617 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 618 if (ret) 619 return ret; 620 621 if (!valid_user_regs(&newregs, target)) 622 return -EINVAL; 623 624 task_pt_regs(target)->user_regs = newregs; 625 return 0; 626 } 627 628 /* 629 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 630 */ 631 static int __fpr_get(struct task_struct *target, 632 const struct user_regset *regset, 633 unsigned int pos, unsigned int count, 634 void *kbuf, void __user *ubuf, unsigned int start_pos) 635 { 636 struct user_fpsimd_state *uregs; 637 638 sve_sync_to_fpsimd(target); 639 640 uregs = &target->thread.uw.fpsimd_state; 641 642 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 643 start_pos, start_pos + sizeof(*uregs)); 644 } 645 646 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 647 unsigned int pos, unsigned int count, 648 void *kbuf, void __user *ubuf) 649 { 650 if (target == current) 651 fpsimd_preserve_current_state(); 652 653 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0); 654 } 655 656 static int __fpr_set(struct task_struct *target, 657 const struct user_regset *regset, 658 unsigned int pos, unsigned int count, 659 const void *kbuf, const void __user *ubuf, 660 unsigned int start_pos) 661 { 662 int ret; 663 struct user_fpsimd_state newstate; 664 665 /* 666 * Ensure target->thread.uw.fpsimd_state is up to date, so that a 667 * short copyin can't resurrect stale data. 668 */ 669 sve_sync_to_fpsimd(target); 670 671 newstate = target->thread.uw.fpsimd_state; 672 673 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 674 start_pos, start_pos + sizeof(newstate)); 675 if (ret) 676 return ret; 677 678 target->thread.uw.fpsimd_state = newstate; 679 680 return ret; 681 } 682 683 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 684 unsigned int pos, unsigned int count, 685 const void *kbuf, const void __user *ubuf) 686 { 687 int ret; 688 689 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0); 690 if (ret) 691 return ret; 692 693 sve_sync_from_fpsimd_zeropad(target); 694 fpsimd_flush_task_state(target); 695 696 return ret; 697 } 698 699 static int tls_get(struct task_struct *target, const struct user_regset *regset, 700 unsigned int pos, unsigned int count, 701 void *kbuf, void __user *ubuf) 702 { 703 unsigned long *tls = &target->thread.uw.tp_value; 704 705 if (target == current) 706 tls_preserve_current_state(); 707 708 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 709 } 710 711 static int tls_set(struct task_struct *target, const struct user_regset *regset, 712 unsigned int pos, unsigned int count, 713 const void *kbuf, const void __user *ubuf) 714 { 715 int ret; 716 unsigned long tls = target->thread.uw.tp_value; 717 718 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 719 if (ret) 720 return ret; 721 722 target->thread.uw.tp_value = tls; 723 return ret; 724 } 725 726 static int system_call_get(struct task_struct *target, 727 const struct user_regset *regset, 728 unsigned int pos, unsigned int count, 729 void *kbuf, void __user *ubuf) 730 { 731 int syscallno = task_pt_regs(target)->syscallno; 732 733 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 734 &syscallno, 0, -1); 735 } 736 737 static int system_call_set(struct task_struct *target, 738 const struct user_regset *regset, 739 unsigned int pos, unsigned int count, 740 const void *kbuf, const void __user *ubuf) 741 { 742 int syscallno = task_pt_regs(target)->syscallno; 743 int ret; 744 745 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 746 if (ret) 747 return ret; 748 749 task_pt_regs(target)->syscallno = syscallno; 750 return ret; 751 } 752 753 #ifdef CONFIG_ARM64_SVE 754 755 static void sve_init_header_from_task(struct user_sve_header *header, 756 struct task_struct *target) 757 { 758 unsigned int vq; 759 760 memset(header, 0, sizeof(*header)); 761 762 header->flags = test_tsk_thread_flag(target, TIF_SVE) ? 763 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD; 764 if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT)) 765 header->flags |= SVE_PT_VL_INHERIT; 766 767 header->vl = target->thread.sve_vl; 768 vq = sve_vq_from_vl(header->vl); 769 770 header->max_vl = sve_max_vl; 771 header->size = SVE_PT_SIZE(vq, header->flags); 772 header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl), 773 SVE_PT_REGS_SVE); 774 } 775 776 static unsigned int sve_size_from_header(struct user_sve_header const *header) 777 { 778 return ALIGN(header->size, SVE_VQ_BYTES); 779 } 780 781 static unsigned int sve_get_size(struct task_struct *target, 782 const struct user_regset *regset) 783 { 784 struct user_sve_header header; 785 786 if (!system_supports_sve()) 787 return 0; 788 789 sve_init_header_from_task(&header, target); 790 return sve_size_from_header(&header); 791 } 792 793 static int sve_get(struct task_struct *target, 794 const struct user_regset *regset, 795 unsigned int pos, unsigned int count, 796 void *kbuf, void __user *ubuf) 797 { 798 int ret; 799 struct user_sve_header header; 800 unsigned int vq; 801 unsigned long start, end; 802 803 if (!system_supports_sve()) 804 return -EINVAL; 805 806 /* Header */ 807 sve_init_header_from_task(&header, target); 808 vq = sve_vq_from_vl(header.vl); 809 810 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header, 811 0, sizeof(header)); 812 if (ret) 813 return ret; 814 815 if (target == current) 816 fpsimd_preserve_current_state(); 817 818 /* Registers: FPSIMD-only case */ 819 820 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 821 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) 822 return __fpr_get(target, regset, pos, count, kbuf, ubuf, 823 SVE_PT_FPSIMD_OFFSET); 824 825 /* Otherwise: full SVE case */ 826 827 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 828 start = SVE_PT_SVE_OFFSET; 829 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 830 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 831 target->thread.sve_state, 832 start, end); 833 if (ret) 834 return ret; 835 836 start = end; 837 end = SVE_PT_SVE_FPSR_OFFSET(vq); 838 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 839 start, end); 840 if (ret) 841 return ret; 842 843 /* 844 * Copy fpsr, and fpcr which must follow contiguously in 845 * struct fpsimd_state: 846 */ 847 start = end; 848 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 849 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 850 &target->thread.uw.fpsimd_state.fpsr, 851 start, end); 852 if (ret) 853 return ret; 854 855 start = end; 856 end = sve_size_from_header(&header); 857 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 858 start, end); 859 } 860 861 static int sve_set(struct task_struct *target, 862 const struct user_regset *regset, 863 unsigned int pos, unsigned int count, 864 const void *kbuf, const void __user *ubuf) 865 { 866 int ret; 867 struct user_sve_header header; 868 unsigned int vq; 869 unsigned long start, end; 870 871 if (!system_supports_sve()) 872 return -EINVAL; 873 874 /* Header */ 875 if (count < sizeof(header)) 876 return -EINVAL; 877 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header, 878 0, sizeof(header)); 879 if (ret) 880 goto out; 881 882 /* 883 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 884 * sve_set_vector_length(), which will also validate them for us: 885 */ 886 ret = sve_set_vector_length(target, header.vl, 887 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16); 888 if (ret) 889 goto out; 890 891 /* Actual VL set may be less than the user asked for: */ 892 vq = sve_vq_from_vl(target->thread.sve_vl); 893 894 /* Registers: FPSIMD-only case */ 895 896 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header)); 897 if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) { 898 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 899 SVE_PT_FPSIMD_OFFSET); 900 clear_tsk_thread_flag(target, TIF_SVE); 901 goto out; 902 } 903 904 /* Otherwise: full SVE case */ 905 906 /* 907 * If setting a different VL from the requested VL and there is 908 * register data, the data layout will be wrong: don't even 909 * try to set the registers in this case. 910 */ 911 if (count && vq != sve_vq_from_vl(header.vl)) { 912 ret = -EIO; 913 goto out; 914 } 915 916 sve_alloc(target); 917 918 /* 919 * Ensure target->thread.sve_state is up to date with target's 920 * FPSIMD regs, so that a short copyin leaves trailing registers 921 * unmodified. 922 */ 923 fpsimd_sync_to_sve(target); 924 set_tsk_thread_flag(target, TIF_SVE); 925 926 BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); 927 start = SVE_PT_SVE_OFFSET; 928 end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq); 929 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 930 target->thread.sve_state, 931 start, end); 932 if (ret) 933 goto out; 934 935 start = end; 936 end = SVE_PT_SVE_FPSR_OFFSET(vq); 937 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 938 start, end); 939 if (ret) 940 goto out; 941 942 /* 943 * Copy fpsr, and fpcr which must follow contiguously in 944 * struct fpsimd_state: 945 */ 946 start = end; 947 end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE; 948 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 949 &target->thread.uw.fpsimd_state.fpsr, 950 start, end); 951 952 out: 953 fpsimd_flush_task_state(target); 954 return ret; 955 } 956 957 #endif /* CONFIG_ARM64_SVE */ 958 959 enum aarch64_regset { 960 REGSET_GPR, 961 REGSET_FPR, 962 REGSET_TLS, 963 #ifdef CONFIG_HAVE_HW_BREAKPOINT 964 REGSET_HW_BREAK, 965 REGSET_HW_WATCH, 966 #endif 967 REGSET_SYSTEM_CALL, 968 #ifdef CONFIG_ARM64_SVE 969 REGSET_SVE, 970 #endif 971 }; 972 973 static const struct user_regset aarch64_regsets[] = { 974 [REGSET_GPR] = { 975 .core_note_type = NT_PRSTATUS, 976 .n = sizeof(struct user_pt_regs) / sizeof(u64), 977 .size = sizeof(u64), 978 .align = sizeof(u64), 979 .get = gpr_get, 980 .set = gpr_set 981 }, 982 [REGSET_FPR] = { 983 .core_note_type = NT_PRFPREG, 984 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 985 /* 986 * We pretend we have 32-bit registers because the fpsr and 987 * fpcr are 32-bits wide. 988 */ 989 .size = sizeof(u32), 990 .align = sizeof(u32), 991 .get = fpr_get, 992 .set = fpr_set 993 }, 994 [REGSET_TLS] = { 995 .core_note_type = NT_ARM_TLS, 996 .n = 1, 997 .size = sizeof(void *), 998 .align = sizeof(void *), 999 .get = tls_get, 1000 .set = tls_set, 1001 }, 1002 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1003 [REGSET_HW_BREAK] = { 1004 .core_note_type = NT_ARM_HW_BREAK, 1005 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1006 .size = sizeof(u32), 1007 .align = sizeof(u32), 1008 .get = hw_break_get, 1009 .set = hw_break_set, 1010 }, 1011 [REGSET_HW_WATCH] = { 1012 .core_note_type = NT_ARM_HW_WATCH, 1013 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1014 .size = sizeof(u32), 1015 .align = sizeof(u32), 1016 .get = hw_break_get, 1017 .set = hw_break_set, 1018 }, 1019 #endif 1020 [REGSET_SYSTEM_CALL] = { 1021 .core_note_type = NT_ARM_SYSTEM_CALL, 1022 .n = 1, 1023 .size = sizeof(int), 1024 .align = sizeof(int), 1025 .get = system_call_get, 1026 .set = system_call_set, 1027 }, 1028 #ifdef CONFIG_ARM64_SVE 1029 [REGSET_SVE] = { /* Scalable Vector Extension */ 1030 .core_note_type = NT_ARM_SVE, 1031 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), 1032 SVE_VQ_BYTES), 1033 .size = SVE_VQ_BYTES, 1034 .align = SVE_VQ_BYTES, 1035 .get = sve_get, 1036 .set = sve_set, 1037 .get_size = sve_get_size, 1038 }, 1039 #endif 1040 }; 1041 1042 static const struct user_regset_view user_aarch64_view = { 1043 .name = "aarch64", .e_machine = EM_AARCH64, 1044 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 1045 }; 1046 1047 #ifdef CONFIG_COMPAT 1048 enum compat_regset { 1049 REGSET_COMPAT_GPR, 1050 REGSET_COMPAT_VFP, 1051 }; 1052 1053 static int compat_gpr_get(struct task_struct *target, 1054 const struct user_regset *regset, 1055 unsigned int pos, unsigned int count, 1056 void *kbuf, void __user *ubuf) 1057 { 1058 int ret = 0; 1059 unsigned int i, start, num_regs; 1060 1061 /* Calculate the number of AArch32 registers contained in count */ 1062 num_regs = count / regset->size; 1063 1064 /* Convert pos into an register number */ 1065 start = pos / regset->size; 1066 1067 if (start + num_regs > regset->n) 1068 return -EIO; 1069 1070 for (i = 0; i < num_regs; ++i) { 1071 unsigned int idx = start + i; 1072 compat_ulong_t reg; 1073 1074 switch (idx) { 1075 case 15: 1076 reg = task_pt_regs(target)->pc; 1077 break; 1078 case 16: 1079 reg = task_pt_regs(target)->pstate; 1080 reg = pstate_to_compat_psr(reg); 1081 break; 1082 case 17: 1083 reg = task_pt_regs(target)->orig_x0; 1084 break; 1085 default: 1086 reg = task_pt_regs(target)->regs[idx]; 1087 } 1088 1089 if (kbuf) { 1090 memcpy(kbuf, ®, sizeof(reg)); 1091 kbuf += sizeof(reg); 1092 } else { 1093 ret = copy_to_user(ubuf, ®, sizeof(reg)); 1094 if (ret) { 1095 ret = -EFAULT; 1096 break; 1097 } 1098 1099 ubuf += sizeof(reg); 1100 } 1101 } 1102 1103 return ret; 1104 } 1105 1106 static int compat_gpr_set(struct task_struct *target, 1107 const struct user_regset *regset, 1108 unsigned int pos, unsigned int count, 1109 const void *kbuf, const void __user *ubuf) 1110 { 1111 struct pt_regs newregs; 1112 int ret = 0; 1113 unsigned int i, start, num_regs; 1114 1115 /* Calculate the number of AArch32 registers contained in count */ 1116 num_regs = count / regset->size; 1117 1118 /* Convert pos into an register number */ 1119 start = pos / regset->size; 1120 1121 if (start + num_regs > regset->n) 1122 return -EIO; 1123 1124 newregs = *task_pt_regs(target); 1125 1126 for (i = 0; i < num_regs; ++i) { 1127 unsigned int idx = start + i; 1128 compat_ulong_t reg; 1129 1130 if (kbuf) { 1131 memcpy(®, kbuf, sizeof(reg)); 1132 kbuf += sizeof(reg); 1133 } else { 1134 ret = copy_from_user(®, ubuf, sizeof(reg)); 1135 if (ret) { 1136 ret = -EFAULT; 1137 break; 1138 } 1139 1140 ubuf += sizeof(reg); 1141 } 1142 1143 switch (idx) { 1144 case 15: 1145 newregs.pc = reg; 1146 break; 1147 case 16: 1148 reg = compat_psr_to_pstate(reg); 1149 newregs.pstate = reg; 1150 break; 1151 case 17: 1152 newregs.orig_x0 = reg; 1153 break; 1154 default: 1155 newregs.regs[idx] = reg; 1156 } 1157 1158 } 1159 1160 if (valid_user_regs(&newregs.user_regs, target)) 1161 *task_pt_regs(target) = newregs; 1162 else 1163 ret = -EINVAL; 1164 1165 return ret; 1166 } 1167 1168 static int compat_vfp_get(struct task_struct *target, 1169 const struct user_regset *regset, 1170 unsigned int pos, unsigned int count, 1171 void *kbuf, void __user *ubuf) 1172 { 1173 struct user_fpsimd_state *uregs; 1174 compat_ulong_t fpscr; 1175 int ret, vregs_end_pos; 1176 1177 uregs = &target->thread.uw.fpsimd_state; 1178 1179 if (target == current) 1180 fpsimd_preserve_current_state(); 1181 1182 /* 1183 * The VFP registers are packed into the fpsimd_state, so they all sit 1184 * nicely together for us. We just need to create the fpscr separately. 1185 */ 1186 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1187 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 1188 0, vregs_end_pos); 1189 1190 if (count && !ret) { 1191 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 1192 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 1193 1194 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 1195 vregs_end_pos, VFP_STATE_SIZE); 1196 } 1197 1198 return ret; 1199 } 1200 1201 static int compat_vfp_set(struct task_struct *target, 1202 const struct user_regset *regset, 1203 unsigned int pos, unsigned int count, 1204 const void *kbuf, const void __user *ubuf) 1205 { 1206 struct user_fpsimd_state *uregs; 1207 compat_ulong_t fpscr; 1208 int ret, vregs_end_pos; 1209 1210 uregs = &target->thread.uw.fpsimd_state; 1211 1212 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 1213 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 1214 vregs_end_pos); 1215 1216 if (count && !ret) { 1217 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 1218 vregs_end_pos, VFP_STATE_SIZE); 1219 if (!ret) { 1220 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 1221 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 1222 } 1223 } 1224 1225 fpsimd_flush_task_state(target); 1226 return ret; 1227 } 1228 1229 static int compat_tls_get(struct task_struct *target, 1230 const struct user_regset *regset, unsigned int pos, 1231 unsigned int count, void *kbuf, void __user *ubuf) 1232 { 1233 compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value; 1234 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1235 } 1236 1237 static int compat_tls_set(struct task_struct *target, 1238 const struct user_regset *regset, unsigned int pos, 1239 unsigned int count, const void *kbuf, 1240 const void __user *ubuf) 1241 { 1242 int ret; 1243 compat_ulong_t tls = target->thread.uw.tp_value; 1244 1245 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 1246 if (ret) 1247 return ret; 1248 1249 target->thread.uw.tp_value = tls; 1250 return ret; 1251 } 1252 1253 static const struct user_regset aarch32_regsets[] = { 1254 [REGSET_COMPAT_GPR] = { 1255 .core_note_type = NT_PRSTATUS, 1256 .n = COMPAT_ELF_NGREG, 1257 .size = sizeof(compat_elf_greg_t), 1258 .align = sizeof(compat_elf_greg_t), 1259 .get = compat_gpr_get, 1260 .set = compat_gpr_set 1261 }, 1262 [REGSET_COMPAT_VFP] = { 1263 .core_note_type = NT_ARM_VFP, 1264 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1265 .size = sizeof(compat_ulong_t), 1266 .align = sizeof(compat_ulong_t), 1267 .get = compat_vfp_get, 1268 .set = compat_vfp_set 1269 }, 1270 }; 1271 1272 static const struct user_regset_view user_aarch32_view = { 1273 .name = "aarch32", .e_machine = EM_ARM, 1274 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1275 }; 1276 1277 static const struct user_regset aarch32_ptrace_regsets[] = { 1278 [REGSET_GPR] = { 1279 .core_note_type = NT_PRSTATUS, 1280 .n = COMPAT_ELF_NGREG, 1281 .size = sizeof(compat_elf_greg_t), 1282 .align = sizeof(compat_elf_greg_t), 1283 .get = compat_gpr_get, 1284 .set = compat_gpr_set 1285 }, 1286 [REGSET_FPR] = { 1287 .core_note_type = NT_ARM_VFP, 1288 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1289 .size = sizeof(compat_ulong_t), 1290 .align = sizeof(compat_ulong_t), 1291 .get = compat_vfp_get, 1292 .set = compat_vfp_set 1293 }, 1294 [REGSET_TLS] = { 1295 .core_note_type = NT_ARM_TLS, 1296 .n = 1, 1297 .size = sizeof(compat_ulong_t), 1298 .align = sizeof(compat_ulong_t), 1299 .get = compat_tls_get, 1300 .set = compat_tls_set, 1301 }, 1302 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1303 [REGSET_HW_BREAK] = { 1304 .core_note_type = NT_ARM_HW_BREAK, 1305 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1306 .size = sizeof(u32), 1307 .align = sizeof(u32), 1308 .get = hw_break_get, 1309 .set = hw_break_set, 1310 }, 1311 [REGSET_HW_WATCH] = { 1312 .core_note_type = NT_ARM_HW_WATCH, 1313 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1314 .size = sizeof(u32), 1315 .align = sizeof(u32), 1316 .get = hw_break_get, 1317 .set = hw_break_set, 1318 }, 1319 #endif 1320 [REGSET_SYSTEM_CALL] = { 1321 .core_note_type = NT_ARM_SYSTEM_CALL, 1322 .n = 1, 1323 .size = sizeof(int), 1324 .align = sizeof(int), 1325 .get = system_call_get, 1326 .set = system_call_set, 1327 }, 1328 }; 1329 1330 static const struct user_regset_view user_aarch32_ptrace_view = { 1331 .name = "aarch32", .e_machine = EM_ARM, 1332 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1333 }; 1334 1335 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1336 compat_ulong_t __user *ret) 1337 { 1338 compat_ulong_t tmp; 1339 1340 if (off & 3) 1341 return -EIO; 1342 1343 if (off == COMPAT_PT_TEXT_ADDR) 1344 tmp = tsk->mm->start_code; 1345 else if (off == COMPAT_PT_DATA_ADDR) 1346 tmp = tsk->mm->start_data; 1347 else if (off == COMPAT_PT_TEXT_END_ADDR) 1348 tmp = tsk->mm->end_code; 1349 else if (off < sizeof(compat_elf_gregset_t)) 1350 return copy_regset_to_user(tsk, &user_aarch32_view, 1351 REGSET_COMPAT_GPR, off, 1352 sizeof(compat_ulong_t), ret); 1353 else if (off >= COMPAT_USER_SZ) 1354 return -EIO; 1355 else 1356 tmp = 0; 1357 1358 return put_user(tmp, ret); 1359 } 1360 1361 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1362 compat_ulong_t val) 1363 { 1364 int ret; 1365 mm_segment_t old_fs = get_fs(); 1366 1367 if (off & 3 || off >= COMPAT_USER_SZ) 1368 return -EIO; 1369 1370 if (off >= sizeof(compat_elf_gregset_t)) 1371 return 0; 1372 1373 set_fs(KERNEL_DS); 1374 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1375 REGSET_COMPAT_GPR, off, 1376 sizeof(compat_ulong_t), 1377 &val); 1378 set_fs(old_fs); 1379 1380 return ret; 1381 } 1382 1383 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1384 1385 /* 1386 * Convert a virtual register number into an index for a thread_info 1387 * breakpoint array. Breakpoints are identified using positive numbers 1388 * whilst watchpoints are negative. The registers are laid out as pairs 1389 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1390 * Register 0 is reserved for describing resource information. 1391 */ 1392 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1393 { 1394 return (abs(num) - 1) >> 1; 1395 } 1396 1397 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1398 { 1399 u8 num_brps, num_wrps, debug_arch, wp_len; 1400 u32 reg = 0; 1401 1402 num_brps = hw_breakpoint_slots(TYPE_INST); 1403 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1404 1405 debug_arch = debug_monitors_arch(); 1406 wp_len = 8; 1407 reg |= debug_arch; 1408 reg <<= 8; 1409 reg |= wp_len; 1410 reg <<= 8; 1411 reg |= num_wrps; 1412 reg <<= 8; 1413 reg |= num_brps; 1414 1415 *kdata = reg; 1416 return 0; 1417 } 1418 1419 static int compat_ptrace_hbp_get(unsigned int note_type, 1420 struct task_struct *tsk, 1421 compat_long_t num, 1422 u32 *kdata) 1423 { 1424 u64 addr = 0; 1425 u32 ctrl = 0; 1426 1427 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1428 1429 if (num & 1) { 1430 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1431 *kdata = (u32)addr; 1432 } else { 1433 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1434 *kdata = ctrl; 1435 } 1436 1437 return err; 1438 } 1439 1440 static int compat_ptrace_hbp_set(unsigned int note_type, 1441 struct task_struct *tsk, 1442 compat_long_t num, 1443 u32 *kdata) 1444 { 1445 u64 addr; 1446 u32 ctrl; 1447 1448 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1449 1450 if (num & 1) { 1451 addr = *kdata; 1452 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1453 } else { 1454 ctrl = *kdata; 1455 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1456 } 1457 1458 return err; 1459 } 1460 1461 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1462 compat_ulong_t __user *data) 1463 { 1464 int ret; 1465 u32 kdata; 1466 1467 /* Watchpoint */ 1468 if (num < 0) { 1469 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1470 /* Resource info */ 1471 } else if (num == 0) { 1472 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1473 /* Breakpoint */ 1474 } else { 1475 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1476 } 1477 1478 if (!ret) 1479 ret = put_user(kdata, data); 1480 1481 return ret; 1482 } 1483 1484 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1485 compat_ulong_t __user *data) 1486 { 1487 int ret; 1488 u32 kdata = 0; 1489 1490 if (num == 0) 1491 return 0; 1492 1493 ret = get_user(kdata, data); 1494 if (ret) 1495 return ret; 1496 1497 if (num < 0) 1498 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1499 else 1500 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1501 1502 return ret; 1503 } 1504 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1505 1506 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1507 compat_ulong_t caddr, compat_ulong_t cdata) 1508 { 1509 unsigned long addr = caddr; 1510 unsigned long data = cdata; 1511 void __user *datap = compat_ptr(data); 1512 int ret; 1513 1514 switch (request) { 1515 case PTRACE_PEEKUSR: 1516 ret = compat_ptrace_read_user(child, addr, datap); 1517 break; 1518 1519 case PTRACE_POKEUSR: 1520 ret = compat_ptrace_write_user(child, addr, data); 1521 break; 1522 1523 case COMPAT_PTRACE_GETREGS: 1524 ret = copy_regset_to_user(child, 1525 &user_aarch32_view, 1526 REGSET_COMPAT_GPR, 1527 0, sizeof(compat_elf_gregset_t), 1528 datap); 1529 break; 1530 1531 case COMPAT_PTRACE_SETREGS: 1532 ret = copy_regset_from_user(child, 1533 &user_aarch32_view, 1534 REGSET_COMPAT_GPR, 1535 0, sizeof(compat_elf_gregset_t), 1536 datap); 1537 break; 1538 1539 case COMPAT_PTRACE_GET_THREAD_AREA: 1540 ret = put_user((compat_ulong_t)child->thread.uw.tp_value, 1541 (compat_ulong_t __user *)datap); 1542 break; 1543 1544 case COMPAT_PTRACE_SET_SYSCALL: 1545 task_pt_regs(child)->syscallno = data; 1546 ret = 0; 1547 break; 1548 1549 case COMPAT_PTRACE_GETVFPREGS: 1550 ret = copy_regset_to_user(child, 1551 &user_aarch32_view, 1552 REGSET_COMPAT_VFP, 1553 0, VFP_STATE_SIZE, 1554 datap); 1555 break; 1556 1557 case COMPAT_PTRACE_SETVFPREGS: 1558 ret = copy_regset_from_user(child, 1559 &user_aarch32_view, 1560 REGSET_COMPAT_VFP, 1561 0, VFP_STATE_SIZE, 1562 datap); 1563 break; 1564 1565 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1566 case COMPAT_PTRACE_GETHBPREGS: 1567 ret = compat_ptrace_gethbpregs(child, addr, datap); 1568 break; 1569 1570 case COMPAT_PTRACE_SETHBPREGS: 1571 ret = compat_ptrace_sethbpregs(child, addr, datap); 1572 break; 1573 #endif 1574 1575 default: 1576 ret = compat_ptrace_request(child, request, addr, 1577 data); 1578 break; 1579 } 1580 1581 return ret; 1582 } 1583 #endif /* CONFIG_COMPAT */ 1584 1585 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1586 { 1587 #ifdef CONFIG_COMPAT 1588 /* 1589 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1590 * user_aarch32_view compatible with arm32. Native ptrace requests on 1591 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1592 * access to the TLS register. 1593 */ 1594 if (is_compat_task()) 1595 return &user_aarch32_view; 1596 else if (is_compat_thread(task_thread_info(task))) 1597 return &user_aarch32_ptrace_view; 1598 #endif 1599 return &user_aarch64_view; 1600 } 1601 1602 long arch_ptrace(struct task_struct *child, long request, 1603 unsigned long addr, unsigned long data) 1604 { 1605 return ptrace_request(child, request, addr, data); 1606 } 1607 1608 enum ptrace_syscall_dir { 1609 PTRACE_SYSCALL_ENTER = 0, 1610 PTRACE_SYSCALL_EXIT, 1611 }; 1612 1613 static void tracehook_report_syscall(struct pt_regs *regs, 1614 enum ptrace_syscall_dir dir) 1615 { 1616 int regno; 1617 unsigned long saved_reg; 1618 1619 /* 1620 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1621 * used to denote syscall entry/exit: 1622 */ 1623 regno = (is_compat_task() ? 12 : 7); 1624 saved_reg = regs->regs[regno]; 1625 regs->regs[regno] = dir; 1626 1627 if (dir == PTRACE_SYSCALL_EXIT) 1628 tracehook_report_syscall_exit(regs, 0); 1629 else if (tracehook_report_syscall_entry(regs)) 1630 forget_syscall(regs); 1631 1632 regs->regs[regno] = saved_reg; 1633 } 1634 1635 int syscall_trace_enter(struct pt_regs *regs) 1636 { 1637 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1638 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1639 1640 /* Do the secure computing after ptrace; failures should be fast. */ 1641 if (secure_computing(NULL) == -1) 1642 return -1; 1643 1644 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1645 trace_sys_enter(regs, regs->syscallno); 1646 1647 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1648 regs->regs[2], regs->regs[3]); 1649 1650 return regs->syscallno; 1651 } 1652 1653 void syscall_trace_exit(struct pt_regs *regs) 1654 { 1655 audit_syscall_exit(regs); 1656 1657 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1658 trace_sys_exit(regs, regs_return_value(regs)); 1659 1660 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1661 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1662 1663 rseq_syscall(regs); 1664 } 1665 1666 /* 1667 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a 1668 * We also take into account DIT (bit 24), which is not yet documented, and 1669 * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be 1670 * allocated an EL0 meaning in future. 1671 * Userspace cannot use these until they have an architectural meaning. 1672 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. 1673 * We also reserve IL for the kernel; SS is handled dynamically. 1674 */ 1675 #define SPSR_EL1_AARCH64_RES0_BITS \ 1676 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ 1677 GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) 1678 #define SPSR_EL1_AARCH32_RES0_BITS \ 1679 (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) 1680 1681 static int valid_compat_regs(struct user_pt_regs *regs) 1682 { 1683 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1684 1685 if (!system_supports_mixed_endian_el0()) { 1686 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1687 regs->pstate |= PSR_AA32_E_BIT; 1688 else 1689 regs->pstate &= ~PSR_AA32_E_BIT; 1690 } 1691 1692 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1693 (regs->pstate & PSR_AA32_A_BIT) == 0 && 1694 (regs->pstate & PSR_AA32_I_BIT) == 0 && 1695 (regs->pstate & PSR_AA32_F_BIT) == 0) { 1696 return 1; 1697 } 1698 1699 /* 1700 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1701 * arch/arm. 1702 */ 1703 regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT | 1704 PSR_AA32_C_BIT | PSR_AA32_V_BIT | 1705 PSR_AA32_Q_BIT | PSR_AA32_IT_MASK | 1706 PSR_AA32_GE_MASK | PSR_AA32_E_BIT | 1707 PSR_AA32_T_BIT; 1708 regs->pstate |= PSR_MODE32_BIT; 1709 1710 return 0; 1711 } 1712 1713 static int valid_native_regs(struct user_pt_regs *regs) 1714 { 1715 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1716 1717 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1718 (regs->pstate & PSR_D_BIT) == 0 && 1719 (regs->pstate & PSR_A_BIT) == 0 && 1720 (regs->pstate & PSR_I_BIT) == 0 && 1721 (regs->pstate & PSR_F_BIT) == 0) { 1722 return 1; 1723 } 1724 1725 /* Force PSR to a valid 64-bit EL0t */ 1726 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1727 1728 return 0; 1729 } 1730 1731 /* 1732 * Are the current registers suitable for user mode? (used to maintain 1733 * security in signal handlers) 1734 */ 1735 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1736 { 1737 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1738 regs->pstate &= ~DBG_SPSR_SS; 1739 1740 if (is_compat_thread(task_thread_info(task))) 1741 return valid_compat_regs(regs); 1742 else 1743 return valid_native_regs(regs); 1744 } 1745