1 /* 2 * Based on arch/arm/kernel/ptrace.c 3 * 4 * By Ross Biro 1/23/92 5 * edited by Linus Torvalds 6 * ARM modifications Copyright (C) 2000 Russell King 7 * Copyright (C) 2012 ARM Ltd. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/audit.h> 23 #include <linux/compat.h> 24 #include <linux/kernel.h> 25 #include <linux/sched/signal.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/mm.h> 28 #include <linux/smp.h> 29 #include <linux/ptrace.h> 30 #include <linux/user.h> 31 #include <linux/seccomp.h> 32 #include <linux/security.h> 33 #include <linux/init.h> 34 #include <linux/signal.h> 35 #include <linux/uaccess.h> 36 #include <linux/perf_event.h> 37 #include <linux/hw_breakpoint.h> 38 #include <linux/regset.h> 39 #include <linux/tracehook.h> 40 #include <linux/elf.h> 41 42 #include <asm/compat.h> 43 #include <asm/debug-monitors.h> 44 #include <asm/pgtable.h> 45 #include <asm/stacktrace.h> 46 #include <asm/syscall.h> 47 #include <asm/traps.h> 48 #include <asm/system_misc.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/syscalls.h> 52 53 struct pt_regs_offset { 54 const char *name; 55 int offset; 56 }; 57 58 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 59 #define REG_OFFSET_END {.name = NULL, .offset = 0} 60 #define GPR_OFFSET_NAME(r) \ 61 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} 62 63 static const struct pt_regs_offset regoffset_table[] = { 64 GPR_OFFSET_NAME(0), 65 GPR_OFFSET_NAME(1), 66 GPR_OFFSET_NAME(2), 67 GPR_OFFSET_NAME(3), 68 GPR_OFFSET_NAME(4), 69 GPR_OFFSET_NAME(5), 70 GPR_OFFSET_NAME(6), 71 GPR_OFFSET_NAME(7), 72 GPR_OFFSET_NAME(8), 73 GPR_OFFSET_NAME(9), 74 GPR_OFFSET_NAME(10), 75 GPR_OFFSET_NAME(11), 76 GPR_OFFSET_NAME(12), 77 GPR_OFFSET_NAME(13), 78 GPR_OFFSET_NAME(14), 79 GPR_OFFSET_NAME(15), 80 GPR_OFFSET_NAME(16), 81 GPR_OFFSET_NAME(17), 82 GPR_OFFSET_NAME(18), 83 GPR_OFFSET_NAME(19), 84 GPR_OFFSET_NAME(20), 85 GPR_OFFSET_NAME(21), 86 GPR_OFFSET_NAME(22), 87 GPR_OFFSET_NAME(23), 88 GPR_OFFSET_NAME(24), 89 GPR_OFFSET_NAME(25), 90 GPR_OFFSET_NAME(26), 91 GPR_OFFSET_NAME(27), 92 GPR_OFFSET_NAME(28), 93 GPR_OFFSET_NAME(29), 94 GPR_OFFSET_NAME(30), 95 {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, 96 REG_OFFSET_NAME(sp), 97 REG_OFFSET_NAME(pc), 98 REG_OFFSET_NAME(pstate), 99 REG_OFFSET_END, 100 }; 101 102 /** 103 * regs_query_register_offset() - query register offset from its name 104 * @name: the name of a register 105 * 106 * regs_query_register_offset() returns the offset of a register in struct 107 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 108 */ 109 int regs_query_register_offset(const char *name) 110 { 111 const struct pt_regs_offset *roff; 112 113 for (roff = regoffset_table; roff->name != NULL; roff++) 114 if (!strcmp(roff->name, name)) 115 return roff->offset; 116 return -EINVAL; 117 } 118 119 /** 120 * regs_within_kernel_stack() - check the address in the stack 121 * @regs: pt_regs which contains kernel stack pointer. 122 * @addr: address which is checked. 123 * 124 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 125 * If @addr is within the kernel stack, it returns true. If not, returns false. 126 */ 127 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 128 { 129 return ((addr & ~(THREAD_SIZE - 1)) == 130 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 131 on_irq_stack(addr); 132 } 133 134 /** 135 * regs_get_kernel_stack_nth() - get Nth entry of the stack 136 * @regs: pt_regs which contains kernel stack pointer. 137 * @n: stack entry number. 138 * 139 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 140 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 141 * this returns 0. 142 */ 143 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 144 { 145 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 146 147 addr += n; 148 if (regs_within_kernel_stack(regs, (unsigned long)addr)) 149 return *addr; 150 else 151 return 0; 152 } 153 154 /* 155 * TODO: does not yet catch signals sent when the child dies. 156 * in exit.c or in signal.c. 157 */ 158 159 /* 160 * Called by kernel/ptrace.c when detaching.. 161 */ 162 void ptrace_disable(struct task_struct *child) 163 { 164 /* 165 * This would be better off in core code, but PTRACE_DETACH has 166 * grown its fair share of arch-specific worts and changing it 167 * is likely to cause regressions on obscure architectures. 168 */ 169 user_disable_single_step(child); 170 } 171 172 #ifdef CONFIG_HAVE_HW_BREAKPOINT 173 /* 174 * Handle hitting a HW-breakpoint. 175 */ 176 static void ptrace_hbptriggered(struct perf_event *bp, 177 struct perf_sample_data *data, 178 struct pt_regs *regs) 179 { 180 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 181 siginfo_t info = { 182 .si_signo = SIGTRAP, 183 .si_errno = 0, 184 .si_code = TRAP_HWBKPT, 185 .si_addr = (void __user *)(bkpt->trigger), 186 }; 187 188 #ifdef CONFIG_COMPAT 189 int i; 190 191 if (!is_compat_task()) 192 goto send_sig; 193 194 for (i = 0; i < ARM_MAX_BRP; ++i) { 195 if (current->thread.debug.hbp_break[i] == bp) { 196 info.si_errno = (i << 1) + 1; 197 break; 198 } 199 } 200 201 for (i = 0; i < ARM_MAX_WRP; ++i) { 202 if (current->thread.debug.hbp_watch[i] == bp) { 203 info.si_errno = -((i << 1) + 1); 204 break; 205 } 206 } 207 208 send_sig: 209 #endif 210 force_sig_info(SIGTRAP, &info, current); 211 } 212 213 /* 214 * Unregister breakpoints from this task and reset the pointers in 215 * the thread_struct. 216 */ 217 void flush_ptrace_hw_breakpoint(struct task_struct *tsk) 218 { 219 int i; 220 struct thread_struct *t = &tsk->thread; 221 222 for (i = 0; i < ARM_MAX_BRP; i++) { 223 if (t->debug.hbp_break[i]) { 224 unregister_hw_breakpoint(t->debug.hbp_break[i]); 225 t->debug.hbp_break[i] = NULL; 226 } 227 } 228 229 for (i = 0; i < ARM_MAX_WRP; i++) { 230 if (t->debug.hbp_watch[i]) { 231 unregister_hw_breakpoint(t->debug.hbp_watch[i]); 232 t->debug.hbp_watch[i] = NULL; 233 } 234 } 235 } 236 237 void ptrace_hw_copy_thread(struct task_struct *tsk) 238 { 239 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 240 } 241 242 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 243 struct task_struct *tsk, 244 unsigned long idx) 245 { 246 struct perf_event *bp = ERR_PTR(-EINVAL); 247 248 switch (note_type) { 249 case NT_ARM_HW_BREAK: 250 if (idx < ARM_MAX_BRP) 251 bp = tsk->thread.debug.hbp_break[idx]; 252 break; 253 case NT_ARM_HW_WATCH: 254 if (idx < ARM_MAX_WRP) 255 bp = tsk->thread.debug.hbp_watch[idx]; 256 break; 257 } 258 259 return bp; 260 } 261 262 static int ptrace_hbp_set_event(unsigned int note_type, 263 struct task_struct *tsk, 264 unsigned long idx, 265 struct perf_event *bp) 266 { 267 int err = -EINVAL; 268 269 switch (note_type) { 270 case NT_ARM_HW_BREAK: 271 if (idx < ARM_MAX_BRP) { 272 tsk->thread.debug.hbp_break[idx] = bp; 273 err = 0; 274 } 275 break; 276 case NT_ARM_HW_WATCH: 277 if (idx < ARM_MAX_WRP) { 278 tsk->thread.debug.hbp_watch[idx] = bp; 279 err = 0; 280 } 281 break; 282 } 283 284 return err; 285 } 286 287 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 288 struct task_struct *tsk, 289 unsigned long idx) 290 { 291 struct perf_event *bp; 292 struct perf_event_attr attr; 293 int err, type; 294 295 switch (note_type) { 296 case NT_ARM_HW_BREAK: 297 type = HW_BREAKPOINT_X; 298 break; 299 case NT_ARM_HW_WATCH: 300 type = HW_BREAKPOINT_RW; 301 break; 302 default: 303 return ERR_PTR(-EINVAL); 304 } 305 306 ptrace_breakpoint_init(&attr); 307 308 /* 309 * Initialise fields to sane defaults 310 * (i.e. values that will pass validation). 311 */ 312 attr.bp_addr = 0; 313 attr.bp_len = HW_BREAKPOINT_LEN_4; 314 attr.bp_type = type; 315 attr.disabled = 1; 316 317 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 318 if (IS_ERR(bp)) 319 return bp; 320 321 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 322 if (err) 323 return ERR_PTR(err); 324 325 return bp; 326 } 327 328 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 329 struct arch_hw_breakpoint_ctrl ctrl, 330 struct perf_event_attr *attr) 331 { 332 int err, len, type, offset, disabled = !ctrl.enabled; 333 334 attr->disabled = disabled; 335 if (disabled) 336 return 0; 337 338 err = arch_bp_generic_fields(ctrl, &len, &type, &offset); 339 if (err) 340 return err; 341 342 switch (note_type) { 343 case NT_ARM_HW_BREAK: 344 if ((type & HW_BREAKPOINT_X) != type) 345 return -EINVAL; 346 break; 347 case NT_ARM_HW_WATCH: 348 if ((type & HW_BREAKPOINT_RW) != type) 349 return -EINVAL; 350 break; 351 default: 352 return -EINVAL; 353 } 354 355 attr->bp_len = len; 356 attr->bp_type = type; 357 attr->bp_addr += offset; 358 359 return 0; 360 } 361 362 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) 363 { 364 u8 num; 365 u32 reg = 0; 366 367 switch (note_type) { 368 case NT_ARM_HW_BREAK: 369 num = hw_breakpoint_slots(TYPE_INST); 370 break; 371 case NT_ARM_HW_WATCH: 372 num = hw_breakpoint_slots(TYPE_DATA); 373 break; 374 default: 375 return -EINVAL; 376 } 377 378 reg |= debug_monitors_arch(); 379 reg <<= 8; 380 reg |= num; 381 382 *info = reg; 383 return 0; 384 } 385 386 static int ptrace_hbp_get_ctrl(unsigned int note_type, 387 struct task_struct *tsk, 388 unsigned long idx, 389 u32 *ctrl) 390 { 391 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 392 393 if (IS_ERR(bp)) 394 return PTR_ERR(bp); 395 396 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 397 return 0; 398 } 399 400 static int ptrace_hbp_get_addr(unsigned int note_type, 401 struct task_struct *tsk, 402 unsigned long idx, 403 u64 *addr) 404 { 405 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 406 407 if (IS_ERR(bp)) 408 return PTR_ERR(bp); 409 410 *addr = bp ? counter_arch_bp(bp)->address : 0; 411 return 0; 412 } 413 414 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 415 struct task_struct *tsk, 416 unsigned long idx) 417 { 418 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 419 420 if (!bp) 421 bp = ptrace_hbp_create(note_type, tsk, idx); 422 423 return bp; 424 } 425 426 static int ptrace_hbp_set_ctrl(unsigned int note_type, 427 struct task_struct *tsk, 428 unsigned long idx, 429 u32 uctrl) 430 { 431 int err; 432 struct perf_event *bp; 433 struct perf_event_attr attr; 434 struct arch_hw_breakpoint_ctrl ctrl; 435 436 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 437 if (IS_ERR(bp)) { 438 err = PTR_ERR(bp); 439 return err; 440 } 441 442 attr = bp->attr; 443 decode_ctrl_reg(uctrl, &ctrl); 444 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 445 if (err) 446 return err; 447 448 return modify_user_hw_breakpoint(bp, &attr); 449 } 450 451 static int ptrace_hbp_set_addr(unsigned int note_type, 452 struct task_struct *tsk, 453 unsigned long idx, 454 u64 addr) 455 { 456 int err; 457 struct perf_event *bp; 458 struct perf_event_attr attr; 459 460 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 461 if (IS_ERR(bp)) { 462 err = PTR_ERR(bp); 463 return err; 464 } 465 466 attr = bp->attr; 467 attr.bp_addr = addr; 468 err = modify_user_hw_breakpoint(bp, &attr); 469 return err; 470 } 471 472 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 473 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 474 #define PTRACE_HBP_PAD_SZ sizeof(u32) 475 476 static int hw_break_get(struct task_struct *target, 477 const struct user_regset *regset, 478 unsigned int pos, unsigned int count, 479 void *kbuf, void __user *ubuf) 480 { 481 unsigned int note_type = regset->core_note_type; 482 int ret, idx = 0, offset, limit; 483 u32 info, ctrl; 484 u64 addr; 485 486 /* Resource info */ 487 ret = ptrace_hbp_get_resource_info(note_type, &info); 488 if (ret) 489 return ret; 490 491 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 492 sizeof(info)); 493 if (ret) 494 return ret; 495 496 /* Pad */ 497 offset = offsetof(struct user_hwdebug_state, pad); 498 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset, 499 offset + PTRACE_HBP_PAD_SZ); 500 if (ret) 501 return ret; 502 503 /* (address, ctrl) registers */ 504 offset = offsetof(struct user_hwdebug_state, dbg_regs); 505 limit = regset->n * regset->size; 506 while (count && offset < limit) { 507 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 508 if (ret) 509 return ret; 510 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, 511 offset, offset + PTRACE_HBP_ADDR_SZ); 512 if (ret) 513 return ret; 514 offset += PTRACE_HBP_ADDR_SZ; 515 516 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 517 if (ret) 518 return ret; 519 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, 520 offset, offset + PTRACE_HBP_CTRL_SZ); 521 if (ret) 522 return ret; 523 offset += PTRACE_HBP_CTRL_SZ; 524 525 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 526 offset, 527 offset + PTRACE_HBP_PAD_SZ); 528 if (ret) 529 return ret; 530 offset += PTRACE_HBP_PAD_SZ; 531 idx++; 532 } 533 534 return 0; 535 } 536 537 static int hw_break_set(struct task_struct *target, 538 const struct user_regset *regset, 539 unsigned int pos, unsigned int count, 540 const void *kbuf, const void __user *ubuf) 541 { 542 unsigned int note_type = regset->core_note_type; 543 int ret, idx = 0, offset, limit; 544 u32 ctrl; 545 u64 addr; 546 547 /* Resource info and pad */ 548 offset = offsetof(struct user_hwdebug_state, dbg_regs); 549 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 550 if (ret) 551 return ret; 552 553 /* (address, ctrl) registers */ 554 limit = regset->n * regset->size; 555 while (count && offset < limit) { 556 if (count < PTRACE_HBP_ADDR_SZ) 557 return -EINVAL; 558 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 559 offset, offset + PTRACE_HBP_ADDR_SZ); 560 if (ret) 561 return ret; 562 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 563 if (ret) 564 return ret; 565 offset += PTRACE_HBP_ADDR_SZ; 566 567 if (!count) 568 break; 569 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 570 offset, offset + PTRACE_HBP_CTRL_SZ); 571 if (ret) 572 return ret; 573 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 574 if (ret) 575 return ret; 576 offset += PTRACE_HBP_CTRL_SZ; 577 578 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 579 offset, 580 offset + PTRACE_HBP_PAD_SZ); 581 if (ret) 582 return ret; 583 offset += PTRACE_HBP_PAD_SZ; 584 idx++; 585 } 586 587 return 0; 588 } 589 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 590 591 static int gpr_get(struct task_struct *target, 592 const struct user_regset *regset, 593 unsigned int pos, unsigned int count, 594 void *kbuf, void __user *ubuf) 595 { 596 struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; 597 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 598 } 599 600 static int gpr_set(struct task_struct *target, const struct user_regset *regset, 601 unsigned int pos, unsigned int count, 602 const void *kbuf, const void __user *ubuf) 603 { 604 int ret; 605 struct user_pt_regs newregs = task_pt_regs(target)->user_regs; 606 607 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); 608 if (ret) 609 return ret; 610 611 if (!valid_user_regs(&newregs, target)) 612 return -EINVAL; 613 614 task_pt_regs(target)->user_regs = newregs; 615 return 0; 616 } 617 618 /* 619 * TODO: update fp accessors for lazy context switching (sync/flush hwstate) 620 */ 621 static int fpr_get(struct task_struct *target, const struct user_regset *regset, 622 unsigned int pos, unsigned int count, 623 void *kbuf, void __user *ubuf) 624 { 625 struct user_fpsimd_state *uregs; 626 uregs = &target->thread.fpsimd_state.user_fpsimd; 627 628 if (target == current) 629 fpsimd_preserve_current_state(); 630 631 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); 632 } 633 634 static int fpr_set(struct task_struct *target, const struct user_regset *regset, 635 unsigned int pos, unsigned int count, 636 const void *kbuf, const void __user *ubuf) 637 { 638 int ret; 639 struct user_fpsimd_state newstate = 640 target->thread.fpsimd_state.user_fpsimd; 641 642 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); 643 if (ret) 644 return ret; 645 646 target->thread.fpsimd_state.user_fpsimd = newstate; 647 fpsimd_flush_task_state(target); 648 return ret; 649 } 650 651 static int tls_get(struct task_struct *target, const struct user_regset *regset, 652 unsigned int pos, unsigned int count, 653 void *kbuf, void __user *ubuf) 654 { 655 unsigned long *tls = &target->thread.tp_value; 656 657 if (target == current) 658 tls_preserve_current_state(); 659 660 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); 661 } 662 663 static int tls_set(struct task_struct *target, const struct user_regset *regset, 664 unsigned int pos, unsigned int count, 665 const void *kbuf, const void __user *ubuf) 666 { 667 int ret; 668 unsigned long tls = target->thread.tp_value; 669 670 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 671 if (ret) 672 return ret; 673 674 target->thread.tp_value = tls; 675 return ret; 676 } 677 678 static int system_call_get(struct task_struct *target, 679 const struct user_regset *regset, 680 unsigned int pos, unsigned int count, 681 void *kbuf, void __user *ubuf) 682 { 683 int syscallno = task_pt_regs(target)->syscallno; 684 685 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 686 &syscallno, 0, -1); 687 } 688 689 static int system_call_set(struct task_struct *target, 690 const struct user_regset *regset, 691 unsigned int pos, unsigned int count, 692 const void *kbuf, const void __user *ubuf) 693 { 694 int syscallno = task_pt_regs(target)->syscallno; 695 int ret; 696 697 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); 698 if (ret) 699 return ret; 700 701 task_pt_regs(target)->syscallno = syscallno; 702 return ret; 703 } 704 705 enum aarch64_regset { 706 REGSET_GPR, 707 REGSET_FPR, 708 REGSET_TLS, 709 #ifdef CONFIG_HAVE_HW_BREAKPOINT 710 REGSET_HW_BREAK, 711 REGSET_HW_WATCH, 712 #endif 713 REGSET_SYSTEM_CALL, 714 }; 715 716 static const struct user_regset aarch64_regsets[] = { 717 [REGSET_GPR] = { 718 .core_note_type = NT_PRSTATUS, 719 .n = sizeof(struct user_pt_regs) / sizeof(u64), 720 .size = sizeof(u64), 721 .align = sizeof(u64), 722 .get = gpr_get, 723 .set = gpr_set 724 }, 725 [REGSET_FPR] = { 726 .core_note_type = NT_PRFPREG, 727 .n = sizeof(struct user_fpsimd_state) / sizeof(u32), 728 /* 729 * We pretend we have 32-bit registers because the fpsr and 730 * fpcr are 32-bits wide. 731 */ 732 .size = sizeof(u32), 733 .align = sizeof(u32), 734 .get = fpr_get, 735 .set = fpr_set 736 }, 737 [REGSET_TLS] = { 738 .core_note_type = NT_ARM_TLS, 739 .n = 1, 740 .size = sizeof(void *), 741 .align = sizeof(void *), 742 .get = tls_get, 743 .set = tls_set, 744 }, 745 #ifdef CONFIG_HAVE_HW_BREAKPOINT 746 [REGSET_HW_BREAK] = { 747 .core_note_type = NT_ARM_HW_BREAK, 748 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 749 .size = sizeof(u32), 750 .align = sizeof(u32), 751 .get = hw_break_get, 752 .set = hw_break_set, 753 }, 754 [REGSET_HW_WATCH] = { 755 .core_note_type = NT_ARM_HW_WATCH, 756 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 757 .size = sizeof(u32), 758 .align = sizeof(u32), 759 .get = hw_break_get, 760 .set = hw_break_set, 761 }, 762 #endif 763 [REGSET_SYSTEM_CALL] = { 764 .core_note_type = NT_ARM_SYSTEM_CALL, 765 .n = 1, 766 .size = sizeof(int), 767 .align = sizeof(int), 768 .get = system_call_get, 769 .set = system_call_set, 770 }, 771 }; 772 773 static const struct user_regset_view user_aarch64_view = { 774 .name = "aarch64", .e_machine = EM_AARCH64, 775 .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) 776 }; 777 778 #ifdef CONFIG_COMPAT 779 #include <linux/compat.h> 780 781 enum compat_regset { 782 REGSET_COMPAT_GPR, 783 REGSET_COMPAT_VFP, 784 }; 785 786 static int compat_gpr_get(struct task_struct *target, 787 const struct user_regset *regset, 788 unsigned int pos, unsigned int count, 789 void *kbuf, void __user *ubuf) 790 { 791 int ret = 0; 792 unsigned int i, start, num_regs; 793 794 /* Calculate the number of AArch32 registers contained in count */ 795 num_regs = count / regset->size; 796 797 /* Convert pos into an register number */ 798 start = pos / regset->size; 799 800 if (start + num_regs > regset->n) 801 return -EIO; 802 803 for (i = 0; i < num_regs; ++i) { 804 unsigned int idx = start + i; 805 compat_ulong_t reg; 806 807 switch (idx) { 808 case 15: 809 reg = task_pt_regs(target)->pc; 810 break; 811 case 16: 812 reg = task_pt_regs(target)->pstate; 813 break; 814 case 17: 815 reg = task_pt_regs(target)->orig_x0; 816 break; 817 default: 818 reg = task_pt_regs(target)->regs[idx]; 819 } 820 821 if (kbuf) { 822 memcpy(kbuf, ®, sizeof(reg)); 823 kbuf += sizeof(reg); 824 } else { 825 ret = copy_to_user(ubuf, ®, sizeof(reg)); 826 if (ret) { 827 ret = -EFAULT; 828 break; 829 } 830 831 ubuf += sizeof(reg); 832 } 833 } 834 835 return ret; 836 } 837 838 static int compat_gpr_set(struct task_struct *target, 839 const struct user_regset *regset, 840 unsigned int pos, unsigned int count, 841 const void *kbuf, const void __user *ubuf) 842 { 843 struct pt_regs newregs; 844 int ret = 0; 845 unsigned int i, start, num_regs; 846 847 /* Calculate the number of AArch32 registers contained in count */ 848 num_regs = count / regset->size; 849 850 /* Convert pos into an register number */ 851 start = pos / regset->size; 852 853 if (start + num_regs > regset->n) 854 return -EIO; 855 856 newregs = *task_pt_regs(target); 857 858 for (i = 0; i < num_regs; ++i) { 859 unsigned int idx = start + i; 860 compat_ulong_t reg; 861 862 if (kbuf) { 863 memcpy(®, kbuf, sizeof(reg)); 864 kbuf += sizeof(reg); 865 } else { 866 ret = copy_from_user(®, ubuf, sizeof(reg)); 867 if (ret) { 868 ret = -EFAULT; 869 break; 870 } 871 872 ubuf += sizeof(reg); 873 } 874 875 switch (idx) { 876 case 15: 877 newregs.pc = reg; 878 break; 879 case 16: 880 newregs.pstate = reg; 881 break; 882 case 17: 883 newregs.orig_x0 = reg; 884 break; 885 default: 886 newregs.regs[idx] = reg; 887 } 888 889 } 890 891 if (valid_user_regs(&newregs.user_regs, target)) 892 *task_pt_regs(target) = newregs; 893 else 894 ret = -EINVAL; 895 896 return ret; 897 } 898 899 static int compat_vfp_get(struct task_struct *target, 900 const struct user_regset *regset, 901 unsigned int pos, unsigned int count, 902 void *kbuf, void __user *ubuf) 903 { 904 struct user_fpsimd_state *uregs; 905 compat_ulong_t fpscr; 906 int ret, vregs_end_pos; 907 908 uregs = &target->thread.fpsimd_state.user_fpsimd; 909 910 if (target == current) 911 fpsimd_preserve_current_state(); 912 913 /* 914 * The VFP registers are packed into the fpsimd_state, so they all sit 915 * nicely together for us. We just need to create the fpscr separately. 916 */ 917 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 918 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 919 0, vregs_end_pos); 920 921 if (count && !ret) { 922 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | 923 (uregs->fpcr & VFP_FPSCR_CTRL_MASK); 924 925 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr, 926 vregs_end_pos, VFP_STATE_SIZE); 927 } 928 929 return ret; 930 } 931 932 static int compat_vfp_set(struct task_struct *target, 933 const struct user_regset *regset, 934 unsigned int pos, unsigned int count, 935 const void *kbuf, const void __user *ubuf) 936 { 937 struct user_fpsimd_state *uregs; 938 compat_ulong_t fpscr; 939 int ret, vregs_end_pos; 940 941 uregs = &target->thread.fpsimd_state.user_fpsimd; 942 943 vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t); 944 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, 945 vregs_end_pos); 946 947 if (count && !ret) { 948 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr, 949 vregs_end_pos, VFP_STATE_SIZE); 950 if (!ret) { 951 uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; 952 uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; 953 } 954 } 955 956 fpsimd_flush_task_state(target); 957 return ret; 958 } 959 960 static int compat_tls_get(struct task_struct *target, 961 const struct user_regset *regset, unsigned int pos, 962 unsigned int count, void *kbuf, void __user *ubuf) 963 { 964 compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value; 965 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 966 } 967 968 static int compat_tls_set(struct task_struct *target, 969 const struct user_regset *regset, unsigned int pos, 970 unsigned int count, const void *kbuf, 971 const void __user *ubuf) 972 { 973 int ret; 974 compat_ulong_t tls = target->thread.tp_value; 975 976 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); 977 if (ret) 978 return ret; 979 980 target->thread.tp_value = tls; 981 return ret; 982 } 983 984 static const struct user_regset aarch32_regsets[] = { 985 [REGSET_COMPAT_GPR] = { 986 .core_note_type = NT_PRSTATUS, 987 .n = COMPAT_ELF_NGREG, 988 .size = sizeof(compat_elf_greg_t), 989 .align = sizeof(compat_elf_greg_t), 990 .get = compat_gpr_get, 991 .set = compat_gpr_set 992 }, 993 [REGSET_COMPAT_VFP] = { 994 .core_note_type = NT_ARM_VFP, 995 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 996 .size = sizeof(compat_ulong_t), 997 .align = sizeof(compat_ulong_t), 998 .get = compat_vfp_get, 999 .set = compat_vfp_set 1000 }, 1001 }; 1002 1003 static const struct user_regset_view user_aarch32_view = { 1004 .name = "aarch32", .e_machine = EM_ARM, 1005 .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) 1006 }; 1007 1008 static const struct user_regset aarch32_ptrace_regsets[] = { 1009 [REGSET_GPR] = { 1010 .core_note_type = NT_PRSTATUS, 1011 .n = COMPAT_ELF_NGREG, 1012 .size = sizeof(compat_elf_greg_t), 1013 .align = sizeof(compat_elf_greg_t), 1014 .get = compat_gpr_get, 1015 .set = compat_gpr_set 1016 }, 1017 [REGSET_FPR] = { 1018 .core_note_type = NT_ARM_VFP, 1019 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), 1020 .size = sizeof(compat_ulong_t), 1021 .align = sizeof(compat_ulong_t), 1022 .get = compat_vfp_get, 1023 .set = compat_vfp_set 1024 }, 1025 [REGSET_TLS] = { 1026 .core_note_type = NT_ARM_TLS, 1027 .n = 1, 1028 .size = sizeof(compat_ulong_t), 1029 .align = sizeof(compat_ulong_t), 1030 .get = compat_tls_get, 1031 .set = compat_tls_set, 1032 }, 1033 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1034 [REGSET_HW_BREAK] = { 1035 .core_note_type = NT_ARM_HW_BREAK, 1036 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1037 .size = sizeof(u32), 1038 .align = sizeof(u32), 1039 .get = hw_break_get, 1040 .set = hw_break_set, 1041 }, 1042 [REGSET_HW_WATCH] = { 1043 .core_note_type = NT_ARM_HW_WATCH, 1044 .n = sizeof(struct user_hwdebug_state) / sizeof(u32), 1045 .size = sizeof(u32), 1046 .align = sizeof(u32), 1047 .get = hw_break_get, 1048 .set = hw_break_set, 1049 }, 1050 #endif 1051 [REGSET_SYSTEM_CALL] = { 1052 .core_note_type = NT_ARM_SYSTEM_CALL, 1053 .n = 1, 1054 .size = sizeof(int), 1055 .align = sizeof(int), 1056 .get = system_call_get, 1057 .set = system_call_set, 1058 }, 1059 }; 1060 1061 static const struct user_regset_view user_aarch32_ptrace_view = { 1062 .name = "aarch32", .e_machine = EM_ARM, 1063 .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets) 1064 }; 1065 1066 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, 1067 compat_ulong_t __user *ret) 1068 { 1069 compat_ulong_t tmp; 1070 1071 if (off & 3) 1072 return -EIO; 1073 1074 if (off == COMPAT_PT_TEXT_ADDR) 1075 tmp = tsk->mm->start_code; 1076 else if (off == COMPAT_PT_DATA_ADDR) 1077 tmp = tsk->mm->start_data; 1078 else if (off == COMPAT_PT_TEXT_END_ADDR) 1079 tmp = tsk->mm->end_code; 1080 else if (off < sizeof(compat_elf_gregset_t)) 1081 return copy_regset_to_user(tsk, &user_aarch32_view, 1082 REGSET_COMPAT_GPR, off, 1083 sizeof(compat_ulong_t), ret); 1084 else if (off >= COMPAT_USER_SZ) 1085 return -EIO; 1086 else 1087 tmp = 0; 1088 1089 return put_user(tmp, ret); 1090 } 1091 1092 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, 1093 compat_ulong_t val) 1094 { 1095 int ret; 1096 mm_segment_t old_fs = get_fs(); 1097 1098 if (off & 3 || off >= COMPAT_USER_SZ) 1099 return -EIO; 1100 1101 if (off >= sizeof(compat_elf_gregset_t)) 1102 return 0; 1103 1104 set_fs(KERNEL_DS); 1105 ret = copy_regset_from_user(tsk, &user_aarch32_view, 1106 REGSET_COMPAT_GPR, off, 1107 sizeof(compat_ulong_t), 1108 &val); 1109 set_fs(old_fs); 1110 1111 return ret; 1112 } 1113 1114 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1115 1116 /* 1117 * Convert a virtual register number into an index for a thread_info 1118 * breakpoint array. Breakpoints are identified using positive numbers 1119 * whilst watchpoints are negative. The registers are laid out as pairs 1120 * of (address, control), each pair mapping to a unique hw_breakpoint struct. 1121 * Register 0 is reserved for describing resource information. 1122 */ 1123 static int compat_ptrace_hbp_num_to_idx(compat_long_t num) 1124 { 1125 return (abs(num) - 1) >> 1; 1126 } 1127 1128 static int compat_ptrace_hbp_get_resource_info(u32 *kdata) 1129 { 1130 u8 num_brps, num_wrps, debug_arch, wp_len; 1131 u32 reg = 0; 1132 1133 num_brps = hw_breakpoint_slots(TYPE_INST); 1134 num_wrps = hw_breakpoint_slots(TYPE_DATA); 1135 1136 debug_arch = debug_monitors_arch(); 1137 wp_len = 8; 1138 reg |= debug_arch; 1139 reg <<= 8; 1140 reg |= wp_len; 1141 reg <<= 8; 1142 reg |= num_wrps; 1143 reg <<= 8; 1144 reg |= num_brps; 1145 1146 *kdata = reg; 1147 return 0; 1148 } 1149 1150 static int compat_ptrace_hbp_get(unsigned int note_type, 1151 struct task_struct *tsk, 1152 compat_long_t num, 1153 u32 *kdata) 1154 { 1155 u64 addr = 0; 1156 u32 ctrl = 0; 1157 1158 int err, idx = compat_ptrace_hbp_num_to_idx(num);; 1159 1160 if (num & 1) { 1161 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); 1162 *kdata = (u32)addr; 1163 } else { 1164 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); 1165 *kdata = ctrl; 1166 } 1167 1168 return err; 1169 } 1170 1171 static int compat_ptrace_hbp_set(unsigned int note_type, 1172 struct task_struct *tsk, 1173 compat_long_t num, 1174 u32 *kdata) 1175 { 1176 u64 addr; 1177 u32 ctrl; 1178 1179 int err, idx = compat_ptrace_hbp_num_to_idx(num); 1180 1181 if (num & 1) { 1182 addr = *kdata; 1183 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); 1184 } else { 1185 ctrl = *kdata; 1186 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); 1187 } 1188 1189 return err; 1190 } 1191 1192 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, 1193 compat_ulong_t __user *data) 1194 { 1195 int ret; 1196 u32 kdata; 1197 mm_segment_t old_fs = get_fs(); 1198 1199 set_fs(KERNEL_DS); 1200 /* Watchpoint */ 1201 if (num < 0) { 1202 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); 1203 /* Resource info */ 1204 } else if (num == 0) { 1205 ret = compat_ptrace_hbp_get_resource_info(&kdata); 1206 /* Breakpoint */ 1207 } else { 1208 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1209 } 1210 set_fs(old_fs); 1211 1212 if (!ret) 1213 ret = put_user(kdata, data); 1214 1215 return ret; 1216 } 1217 1218 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, 1219 compat_ulong_t __user *data) 1220 { 1221 int ret; 1222 u32 kdata = 0; 1223 mm_segment_t old_fs = get_fs(); 1224 1225 if (num == 0) 1226 return 0; 1227 1228 ret = get_user(kdata, data); 1229 if (ret) 1230 return ret; 1231 1232 set_fs(KERNEL_DS); 1233 if (num < 0) 1234 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1235 else 1236 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1237 set_fs(old_fs); 1238 1239 return ret; 1240 } 1241 #endif /* CONFIG_HAVE_HW_BREAKPOINT */ 1242 1243 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1244 compat_ulong_t caddr, compat_ulong_t cdata) 1245 { 1246 unsigned long addr = caddr; 1247 unsigned long data = cdata; 1248 void __user *datap = compat_ptr(data); 1249 int ret; 1250 1251 switch (request) { 1252 case PTRACE_PEEKUSR: 1253 ret = compat_ptrace_read_user(child, addr, datap); 1254 break; 1255 1256 case PTRACE_POKEUSR: 1257 ret = compat_ptrace_write_user(child, addr, data); 1258 break; 1259 1260 case COMPAT_PTRACE_GETREGS: 1261 ret = copy_regset_to_user(child, 1262 &user_aarch32_view, 1263 REGSET_COMPAT_GPR, 1264 0, sizeof(compat_elf_gregset_t), 1265 datap); 1266 break; 1267 1268 case COMPAT_PTRACE_SETREGS: 1269 ret = copy_regset_from_user(child, 1270 &user_aarch32_view, 1271 REGSET_COMPAT_GPR, 1272 0, sizeof(compat_elf_gregset_t), 1273 datap); 1274 break; 1275 1276 case COMPAT_PTRACE_GET_THREAD_AREA: 1277 ret = put_user((compat_ulong_t)child->thread.tp_value, 1278 (compat_ulong_t __user *)datap); 1279 break; 1280 1281 case COMPAT_PTRACE_SET_SYSCALL: 1282 task_pt_regs(child)->syscallno = data; 1283 ret = 0; 1284 break; 1285 1286 case COMPAT_PTRACE_GETVFPREGS: 1287 ret = copy_regset_to_user(child, 1288 &user_aarch32_view, 1289 REGSET_COMPAT_VFP, 1290 0, VFP_STATE_SIZE, 1291 datap); 1292 break; 1293 1294 case COMPAT_PTRACE_SETVFPREGS: 1295 ret = copy_regset_from_user(child, 1296 &user_aarch32_view, 1297 REGSET_COMPAT_VFP, 1298 0, VFP_STATE_SIZE, 1299 datap); 1300 break; 1301 1302 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1303 case COMPAT_PTRACE_GETHBPREGS: 1304 ret = compat_ptrace_gethbpregs(child, addr, datap); 1305 break; 1306 1307 case COMPAT_PTRACE_SETHBPREGS: 1308 ret = compat_ptrace_sethbpregs(child, addr, datap); 1309 break; 1310 #endif 1311 1312 default: 1313 ret = compat_ptrace_request(child, request, addr, 1314 data); 1315 break; 1316 } 1317 1318 return ret; 1319 } 1320 #endif /* CONFIG_COMPAT */ 1321 1322 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1323 { 1324 #ifdef CONFIG_COMPAT 1325 /* 1326 * Core dumping of 32-bit tasks or compat ptrace requests must use the 1327 * user_aarch32_view compatible with arm32. Native ptrace requests on 1328 * 32-bit children use an extended user_aarch32_ptrace_view to allow 1329 * access to the TLS register. 1330 */ 1331 if (is_compat_task()) 1332 return &user_aarch32_view; 1333 else if (is_compat_thread(task_thread_info(task))) 1334 return &user_aarch32_ptrace_view; 1335 #endif 1336 return &user_aarch64_view; 1337 } 1338 1339 long arch_ptrace(struct task_struct *child, long request, 1340 unsigned long addr, unsigned long data) 1341 { 1342 return ptrace_request(child, request, addr, data); 1343 } 1344 1345 enum ptrace_syscall_dir { 1346 PTRACE_SYSCALL_ENTER = 0, 1347 PTRACE_SYSCALL_EXIT, 1348 }; 1349 1350 static void tracehook_report_syscall(struct pt_regs *regs, 1351 enum ptrace_syscall_dir dir) 1352 { 1353 int regno; 1354 unsigned long saved_reg; 1355 1356 /* 1357 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is 1358 * used to denote syscall entry/exit: 1359 */ 1360 regno = (is_compat_task() ? 12 : 7); 1361 saved_reg = regs->regs[regno]; 1362 regs->regs[regno] = dir; 1363 1364 if (dir == PTRACE_SYSCALL_EXIT) 1365 tracehook_report_syscall_exit(regs, 0); 1366 else if (tracehook_report_syscall_entry(regs)) 1367 forget_syscall(regs); 1368 1369 regs->regs[regno] = saved_reg; 1370 } 1371 1372 asmlinkage int syscall_trace_enter(struct pt_regs *regs) 1373 { 1374 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1375 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 1376 1377 /* Do the secure computing after ptrace; failures should be fast. */ 1378 if (secure_computing(NULL) == -1) 1379 return -1; 1380 1381 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1382 trace_sys_enter(regs, regs->syscallno); 1383 1384 audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1], 1385 regs->regs[2], regs->regs[3]); 1386 1387 return regs->syscallno; 1388 } 1389 1390 asmlinkage void syscall_trace_exit(struct pt_regs *regs) 1391 { 1392 audit_syscall_exit(regs); 1393 1394 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 1395 trace_sys_exit(regs, regs_return_value(regs)); 1396 1397 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1398 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); 1399 } 1400 1401 /* 1402 * Bits which are always architecturally RES0 per ARM DDI 0487A.h 1403 * Userspace cannot use these until they have an architectural meaning. 1404 * We also reserve IL for the kernel; SS is handled dynamically. 1405 */ 1406 #define SPSR_EL1_AARCH64_RES0_BITS \ 1407 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \ 1408 GENMASK_ULL(5, 5)) 1409 #define SPSR_EL1_AARCH32_RES0_BITS \ 1410 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20)) 1411 1412 static int valid_compat_regs(struct user_pt_regs *regs) 1413 { 1414 regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS; 1415 1416 if (!system_supports_mixed_endian_el0()) { 1417 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) 1418 regs->pstate |= COMPAT_PSR_E_BIT; 1419 else 1420 regs->pstate &= ~COMPAT_PSR_E_BIT; 1421 } 1422 1423 if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) && 1424 (regs->pstate & COMPAT_PSR_A_BIT) == 0 && 1425 (regs->pstate & COMPAT_PSR_I_BIT) == 0 && 1426 (regs->pstate & COMPAT_PSR_F_BIT) == 0) { 1427 return 1; 1428 } 1429 1430 /* 1431 * Force PSR to a valid 32-bit EL0t, preserving the same bits as 1432 * arch/arm. 1433 */ 1434 regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT | 1435 COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT | 1436 COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK | 1437 COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT | 1438 COMPAT_PSR_T_BIT; 1439 regs->pstate |= PSR_MODE32_BIT; 1440 1441 return 0; 1442 } 1443 1444 static int valid_native_regs(struct user_pt_regs *regs) 1445 { 1446 regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS; 1447 1448 if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) && 1449 (regs->pstate & PSR_D_BIT) == 0 && 1450 (regs->pstate & PSR_A_BIT) == 0 && 1451 (regs->pstate & PSR_I_BIT) == 0 && 1452 (regs->pstate & PSR_F_BIT) == 0) { 1453 return 1; 1454 } 1455 1456 /* Force PSR to a valid 64-bit EL0t */ 1457 regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT; 1458 1459 return 0; 1460 } 1461 1462 /* 1463 * Are the current registers suitable for user mode? (used to maintain 1464 * security in signal handlers) 1465 */ 1466 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) 1467 { 1468 if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) 1469 regs->pstate &= ~DBG_SPSR_SS; 1470 1471 if (is_compat_thread(task_thread_info(task))) 1472 return valid_compat_regs(regs); 1473 else 1474 return valid_native_regs(regs); 1475 } 1476