1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel support for the ptrace() and syscall tracing interfaces. 4 * 5 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. 6 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> 7 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> 8 * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/elf.h> 16 #include <linux/errno.h> 17 #include <linux/ptrace.h> 18 #include <linux/tracehook.h> 19 #include <linux/user.h> 20 #include <linux/personality.h> 21 #include <linux/regset.h> 22 #include <linux/security.h> 23 #include <linux/seccomp.h> 24 #include <linux/compat.h> 25 #include <linux/signal.h> 26 #include <linux/audit.h> 27 28 #include <linux/uaccess.h> 29 #include <asm/pgtable.h> 30 #include <asm/processor.h> 31 #include <asm/asm-offsets.h> 32 33 /* PSW bits we allow the debugger to modify */ 34 #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) 35 36 #define CREATE_TRACE_POINTS 37 #include <trace/events/syscalls.h> 38 39 /* 40 * These are our native regset flavors. 41 */ 42 enum parisc_regset { 43 REGSET_GENERAL, 44 REGSET_FP 45 }; 46 47 /* 48 * Called by kernel/ptrace.c when detaching.. 49 * 50 * Make sure single step bits etc are not set. 51 */ 52 void ptrace_disable(struct task_struct *task) 53 { 54 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 55 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 56 57 /* make sure the trap bits are not set */ 58 pa_psw(task)->r = 0; 59 pa_psw(task)->t = 0; 60 pa_psw(task)->h = 0; 61 pa_psw(task)->l = 0; 62 } 63 64 /* 65 * The following functions are called by ptrace_resume() when 66 * enabling or disabling single/block tracing. 67 */ 68 void user_disable_single_step(struct task_struct *task) 69 { 70 ptrace_disable(task); 71 } 72 73 void user_enable_single_step(struct task_struct *task) 74 { 75 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 76 set_tsk_thread_flag(task, TIF_SINGLESTEP); 77 78 if (pa_psw(task)->n) { 79 /* Nullified, just crank over the queue. */ 80 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; 81 task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; 82 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; 83 pa_psw(task)->n = 0; 84 pa_psw(task)->x = 0; 85 pa_psw(task)->y = 0; 86 pa_psw(task)->z = 0; 87 pa_psw(task)->b = 0; 88 ptrace_disable(task); 89 /* Don't wake up the task, but let the 90 parent know something happened. */ 91 force_sig_fault_to_task(SIGTRAP, TRAP_TRACE, 92 (void __user *) (task_regs(task)->iaoq[0] & ~3), 93 task); 94 /* notify_parent(task, SIGCHLD); */ 95 return; 96 } 97 98 /* Enable recovery counter traps. The recovery counter 99 * itself will be set to zero on a task switch. If the 100 * task is suspended on a syscall then the syscall return 101 * path will overwrite the recovery counter with a suitable 102 * value such that it traps once back in user space. We 103 * disable interrupts in the tasks PSW here also, to avoid 104 * interrupts while the recovery counter is decrementing. 105 */ 106 pa_psw(task)->r = 1; 107 pa_psw(task)->t = 0; 108 pa_psw(task)->h = 0; 109 pa_psw(task)->l = 0; 110 } 111 112 void user_enable_block_step(struct task_struct *task) 113 { 114 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 115 set_tsk_thread_flag(task, TIF_BLOCKSTEP); 116 117 /* Enable taken branch trap. */ 118 pa_psw(task)->r = 0; 119 pa_psw(task)->t = 1; 120 pa_psw(task)->h = 0; 121 pa_psw(task)->l = 0; 122 } 123 124 long arch_ptrace(struct task_struct *child, long request, 125 unsigned long addr, unsigned long data) 126 { 127 unsigned long __user *datap = (unsigned long __user *)data; 128 unsigned long tmp; 129 long ret = -EIO; 130 131 switch (request) { 132 133 /* Read the word at location addr in the USER area. For ptraced 134 processes, the kernel saves all regs on a syscall. */ 135 case PTRACE_PEEKUSR: 136 if ((addr & (sizeof(unsigned long)-1)) || 137 addr >= sizeof(struct pt_regs)) 138 break; 139 tmp = *(unsigned long *) ((char *) task_regs(child) + addr); 140 ret = put_user(tmp, datap); 141 break; 142 143 /* Write the word at location addr in the USER area. This will need 144 to change when the kernel no longer saves all regs on a syscall. 145 FIXME. There is a problem at the moment in that r3-r18 are only 146 saved if the process is ptraced on syscall entry, and even then 147 those values are overwritten by actual register values on syscall 148 exit. */ 149 case PTRACE_POKEUSR: 150 /* Some register values written here may be ignored in 151 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 152 * r31/r31+4, and not with the values in pt_regs. 153 */ 154 if (addr == PT_PSW) { 155 /* Allow writing to Nullify, Divide-step-correction, 156 * and carry/borrow bits. 157 * BEWARE, if you set N, and then single step, it won't 158 * stop on the nullified instruction. 159 */ 160 data &= USER_PSW_BITS; 161 task_regs(child)->gr[0] &= ~USER_PSW_BITS; 162 task_regs(child)->gr[0] |= data; 163 ret = 0; 164 break; 165 } 166 167 if ((addr & (sizeof(unsigned long)-1)) || 168 addr >= sizeof(struct pt_regs)) 169 break; 170 if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { 171 data |= 3; /* ensure userspace privilege */ 172 } 173 if ((addr >= PT_GR1 && addr <= PT_GR31) || 174 addr == PT_IAOQ0 || addr == PT_IAOQ1 || 175 (addr >= PT_FR0 && addr <= PT_FR31 + 4) || 176 addr == PT_SAR) { 177 *(unsigned long *) ((char *) task_regs(child) + addr) = data; 178 ret = 0; 179 } 180 break; 181 182 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 183 return copy_regset_to_user(child, 184 task_user_regset_view(current), 185 REGSET_GENERAL, 186 0, sizeof(struct user_regs_struct), 187 datap); 188 189 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 190 return copy_regset_from_user(child, 191 task_user_regset_view(current), 192 REGSET_GENERAL, 193 0, sizeof(struct user_regs_struct), 194 datap); 195 196 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 197 return copy_regset_to_user(child, 198 task_user_regset_view(current), 199 REGSET_FP, 200 0, sizeof(struct user_fp_struct), 201 datap); 202 203 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 204 return copy_regset_from_user(child, 205 task_user_regset_view(current), 206 REGSET_FP, 207 0, sizeof(struct user_fp_struct), 208 datap); 209 210 default: 211 ret = ptrace_request(child, request, addr, data); 212 break; 213 } 214 215 return ret; 216 } 217 218 219 #ifdef CONFIG_COMPAT 220 221 /* This function is needed to translate 32 bit pt_regs offsets in to 222 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel 223 * will request offset 12 if it wants gr3, but the lower 32 bits of 224 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4). 225 * This code relies on a 32 bit pt_regs being comprised of 32 bit values 226 * except for the fp registers which (a) are 64 bits, and (b) follow 227 * the gr registers at the start of pt_regs. The 32 bit pt_regs should 228 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[] 229 * being 64 bit in both cases. 230 */ 231 232 static compat_ulong_t translate_usr_offset(compat_ulong_t offset) 233 { 234 compat_ulong_t pos; 235 236 if (offset < 32*4) /* gr[0..31] */ 237 pos = offset * 2 + 4; 238 else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */ 239 pos = (offset - 32*4) + PT_FR0; 240 else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */ 241 pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4; 242 else 243 pos = sizeof(struct pt_regs); 244 245 return pos; 246 } 247 248 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 249 compat_ulong_t addr, compat_ulong_t data) 250 { 251 compat_uint_t tmp; 252 long ret = -EIO; 253 254 switch (request) { 255 256 case PTRACE_PEEKUSR: 257 if (addr & (sizeof(compat_uint_t)-1)) 258 break; 259 addr = translate_usr_offset(addr); 260 if (addr >= sizeof(struct pt_regs)) 261 break; 262 263 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); 264 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); 265 break; 266 267 /* Write the word at location addr in the USER area. This will need 268 to change when the kernel no longer saves all regs on a syscall. 269 FIXME. There is a problem at the moment in that r3-r18 are only 270 saved if the process is ptraced on syscall entry, and even then 271 those values are overwritten by actual register values on syscall 272 exit. */ 273 case PTRACE_POKEUSR: 274 /* Some register values written here may be ignored in 275 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 276 * r31/r31+4, and not with the values in pt_regs. 277 */ 278 if (addr == PT_PSW) { 279 /* Since PT_PSW==0, it is valid for 32 bit processes 280 * under 64 bit kernels as well. 281 */ 282 ret = arch_ptrace(child, request, addr, data); 283 } else { 284 if (addr & (sizeof(compat_uint_t)-1)) 285 break; 286 addr = translate_usr_offset(addr); 287 if (addr >= sizeof(struct pt_regs)) 288 break; 289 if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { 290 data |= 3; /* ensure userspace privilege */ 291 } 292 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { 293 /* Special case, fp regs are 64 bits anyway */ 294 *(__u32 *) ((char *) task_regs(child) + addr) = data; 295 ret = 0; 296 } 297 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || 298 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || 299 addr == PT_SAR+4) { 300 /* Zero the top 32 bits */ 301 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; 302 *(__u32 *) ((char *) task_regs(child) + addr) = data; 303 ret = 0; 304 } 305 } 306 break; 307 308 default: 309 ret = compat_ptrace_request(child, request, addr, data); 310 break; 311 } 312 313 return ret; 314 } 315 #endif 316 317 long do_syscall_trace_enter(struct pt_regs *regs) 318 { 319 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 320 int rc = tracehook_report_syscall_entry(regs); 321 322 /* 323 * As tracesys_next does not set %r28 to -ENOSYS 324 * when %r20 is set to -1, initialize it here. 325 */ 326 regs->gr[28] = -ENOSYS; 327 328 if (rc) { 329 /* 330 * A nonzero return code from 331 * tracehook_report_syscall_entry() tells us 332 * to prevent the syscall execution. Skip 333 * the syscall call and the syscall restart handling. 334 * 335 * Note that the tracer may also just change 336 * regs->gr[20] to an invalid syscall number, 337 * that is handled by tracesys_next. 338 */ 339 regs->gr[20] = -1UL; 340 return -1; 341 } 342 } 343 344 /* Do the secure computing check after ptrace. */ 345 if (secure_computing(NULL) == -1) 346 return -1; 347 348 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 349 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 350 trace_sys_enter(regs, regs->gr[20]); 351 #endif 352 353 #ifdef CONFIG_64BIT 354 if (!is_compat_task()) 355 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25], 356 regs->gr[24], regs->gr[23]); 357 else 358 #endif 359 audit_syscall_entry(regs->gr[20] & 0xffffffff, 360 regs->gr[26] & 0xffffffff, 361 regs->gr[25] & 0xffffffff, 362 regs->gr[24] & 0xffffffff, 363 regs->gr[23] & 0xffffffff); 364 365 /* 366 * Sign extend the syscall number to 64bit since it may have been 367 * modified by a compat ptrace call 368 */ 369 return (int) ((u32) regs->gr[20]); 370 } 371 372 void do_syscall_trace_exit(struct pt_regs *regs) 373 { 374 int stepping = test_thread_flag(TIF_SINGLESTEP) || 375 test_thread_flag(TIF_BLOCKSTEP); 376 377 audit_syscall_exit(regs); 378 379 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 380 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 381 trace_sys_exit(regs, regs->gr[20]); 382 #endif 383 384 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) 385 tracehook_report_syscall_exit(regs, stepping); 386 } 387 388 389 /* 390 * regset functions. 391 */ 392 393 static int fpr_get(struct task_struct *target, 394 const struct user_regset *regset, 395 unsigned int pos, unsigned int count, 396 void *kbuf, void __user *ubuf) 397 { 398 struct pt_regs *regs = task_regs(target); 399 __u64 *k = kbuf; 400 __u64 __user *u = ubuf; 401 __u64 reg; 402 403 pos /= sizeof(reg); 404 count /= sizeof(reg); 405 406 if (kbuf) 407 for (; count > 0 && pos < ELF_NFPREG; --count) 408 *k++ = regs->fr[pos++]; 409 else 410 for (; count > 0 && pos < ELF_NFPREG; --count) 411 if (__put_user(regs->fr[pos++], u++)) 412 return -EFAULT; 413 414 kbuf = k; 415 ubuf = u; 416 pos *= sizeof(reg); 417 count *= sizeof(reg); 418 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 419 ELF_NFPREG * sizeof(reg), -1); 420 } 421 422 static int fpr_set(struct task_struct *target, 423 const struct user_regset *regset, 424 unsigned int pos, unsigned int count, 425 const void *kbuf, const void __user *ubuf) 426 { 427 struct pt_regs *regs = task_regs(target); 428 const __u64 *k = kbuf; 429 const __u64 __user *u = ubuf; 430 __u64 reg; 431 432 pos /= sizeof(reg); 433 count /= sizeof(reg); 434 435 if (kbuf) 436 for (; count > 0 && pos < ELF_NFPREG; --count) 437 regs->fr[pos++] = *k++; 438 else 439 for (; count > 0 && pos < ELF_NFPREG; --count) { 440 if (__get_user(reg, u++)) 441 return -EFAULT; 442 regs->fr[pos++] = reg; 443 } 444 445 kbuf = k; 446 ubuf = u; 447 pos *= sizeof(reg); 448 count *= sizeof(reg); 449 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 450 ELF_NFPREG * sizeof(reg), -1); 451 } 452 453 #define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) 454 455 static unsigned long get_reg(struct pt_regs *regs, int num) 456 { 457 switch (num) { 458 case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])]; 459 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 460 case RI(iasq[0]): return regs->iasq[0]; 461 case RI(iasq[1]): return regs->iasq[1]; 462 case RI(iaoq[0]): return regs->iaoq[0]; 463 case RI(iaoq[1]): return regs->iaoq[1]; 464 case RI(sar): return regs->sar; 465 case RI(iir): return regs->iir; 466 case RI(isr): return regs->isr; 467 case RI(ior): return regs->ior; 468 case RI(ipsw): return regs->ipsw; 469 case RI(cr27): return regs->cr27; 470 case RI(cr0): return mfctl(0); 471 case RI(cr24): return mfctl(24); 472 case RI(cr25): return mfctl(25); 473 case RI(cr26): return mfctl(26); 474 case RI(cr28): return mfctl(28); 475 case RI(cr29): return mfctl(29); 476 case RI(cr30): return mfctl(30); 477 case RI(cr31): return mfctl(31); 478 case RI(cr8): return mfctl(8); 479 case RI(cr9): return mfctl(9); 480 case RI(cr12): return mfctl(12); 481 case RI(cr13): return mfctl(13); 482 case RI(cr10): return mfctl(10); 483 case RI(cr15): return mfctl(15); 484 default: return 0; 485 } 486 } 487 488 static void set_reg(struct pt_regs *regs, int num, unsigned long val) 489 { 490 switch (num) { 491 case RI(gr[0]): /* 492 * PSW is in gr[0]. 493 * Allow writing to Nullify, Divide-step-correction, 494 * and carry/borrow bits. 495 * BEWARE, if you set N, and then single step, it won't 496 * stop on the nullified instruction. 497 */ 498 val &= USER_PSW_BITS; 499 regs->gr[0] &= ~USER_PSW_BITS; 500 regs->gr[0] |= val; 501 return; 502 case RI(gr[1]) ... RI(gr[31]): 503 regs->gr[num - RI(gr[0])] = val; 504 return; 505 case RI(iaoq[0]): 506 case RI(iaoq[1]): 507 /* set 2 lowest bits to ensure userspace privilege: */ 508 regs->iaoq[num - RI(iaoq[0])] = val | 3; 509 return; 510 case RI(sar): regs->sar = val; 511 return; 512 default: return; 513 #if 0 514 /* do not allow to change any of the following registers (yet) */ 515 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 516 case RI(iasq[0]): return regs->iasq[0]; 517 case RI(iasq[1]): return regs->iasq[1]; 518 case RI(iir): return regs->iir; 519 case RI(isr): return regs->isr; 520 case RI(ior): return regs->ior; 521 case RI(ipsw): return regs->ipsw; 522 case RI(cr27): return regs->cr27; 523 case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31; 524 case cr8, cr9, cr12, cr13, cr10, cr15; 525 #endif 526 } 527 } 528 529 static int gpr_get(struct task_struct *target, 530 const struct user_regset *regset, 531 unsigned int pos, unsigned int count, 532 void *kbuf, void __user *ubuf) 533 { 534 struct pt_regs *regs = task_regs(target); 535 unsigned long *k = kbuf; 536 unsigned long __user *u = ubuf; 537 unsigned long reg; 538 539 pos /= sizeof(reg); 540 count /= sizeof(reg); 541 542 if (kbuf) 543 for (; count > 0 && pos < ELF_NGREG; --count) 544 *k++ = get_reg(regs, pos++); 545 else 546 for (; count > 0 && pos < ELF_NGREG; --count) 547 if (__put_user(get_reg(regs, pos++), u++)) 548 return -EFAULT; 549 kbuf = k; 550 ubuf = u; 551 pos *= sizeof(reg); 552 count *= sizeof(reg); 553 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 554 ELF_NGREG * sizeof(reg), -1); 555 } 556 557 static int gpr_set(struct task_struct *target, 558 const struct user_regset *regset, 559 unsigned int pos, unsigned int count, 560 const void *kbuf, const void __user *ubuf) 561 { 562 struct pt_regs *regs = task_regs(target); 563 const unsigned long *k = kbuf; 564 const unsigned long __user *u = ubuf; 565 unsigned long reg; 566 567 pos /= sizeof(reg); 568 count /= sizeof(reg); 569 570 if (kbuf) 571 for (; count > 0 && pos < ELF_NGREG; --count) 572 set_reg(regs, pos++, *k++); 573 else 574 for (; count > 0 && pos < ELF_NGREG; --count) { 575 if (__get_user(reg, u++)) 576 return -EFAULT; 577 set_reg(regs, pos++, reg); 578 } 579 580 kbuf = k; 581 ubuf = u; 582 pos *= sizeof(reg); 583 count *= sizeof(reg); 584 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 585 ELF_NGREG * sizeof(reg), -1); 586 } 587 588 static const struct user_regset native_regsets[] = { 589 [REGSET_GENERAL] = { 590 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 591 .size = sizeof(long), .align = sizeof(long), 592 .get = gpr_get, .set = gpr_set 593 }, 594 [REGSET_FP] = { 595 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 596 .size = sizeof(__u64), .align = sizeof(__u64), 597 .get = fpr_get, .set = fpr_set 598 } 599 }; 600 601 static const struct user_regset_view user_parisc_native_view = { 602 .name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX, 603 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 604 }; 605 606 #ifdef CONFIG_64BIT 607 #include <linux/compat.h> 608 609 static int gpr32_get(struct task_struct *target, 610 const struct user_regset *regset, 611 unsigned int pos, unsigned int count, 612 void *kbuf, void __user *ubuf) 613 { 614 struct pt_regs *regs = task_regs(target); 615 compat_ulong_t *k = kbuf; 616 compat_ulong_t __user *u = ubuf; 617 compat_ulong_t reg; 618 619 pos /= sizeof(reg); 620 count /= sizeof(reg); 621 622 if (kbuf) 623 for (; count > 0 && pos < ELF_NGREG; --count) 624 *k++ = get_reg(regs, pos++); 625 else 626 for (; count > 0 && pos < ELF_NGREG; --count) 627 if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++)) 628 return -EFAULT; 629 630 kbuf = k; 631 ubuf = u; 632 pos *= sizeof(reg); 633 count *= sizeof(reg); 634 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 635 ELF_NGREG * sizeof(reg), -1); 636 } 637 638 static int gpr32_set(struct task_struct *target, 639 const struct user_regset *regset, 640 unsigned int pos, unsigned int count, 641 const void *kbuf, const void __user *ubuf) 642 { 643 struct pt_regs *regs = task_regs(target); 644 const compat_ulong_t *k = kbuf; 645 const compat_ulong_t __user *u = ubuf; 646 compat_ulong_t reg; 647 648 pos /= sizeof(reg); 649 count /= sizeof(reg); 650 651 if (kbuf) 652 for (; count > 0 && pos < ELF_NGREG; --count) 653 set_reg(regs, pos++, *k++); 654 else 655 for (; count > 0 && pos < ELF_NGREG; --count) { 656 if (__get_user(reg, u++)) 657 return -EFAULT; 658 set_reg(regs, pos++, reg); 659 } 660 661 kbuf = k; 662 ubuf = u; 663 pos *= sizeof(reg); 664 count *= sizeof(reg); 665 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 666 ELF_NGREG * sizeof(reg), -1); 667 } 668 669 /* 670 * These are the regset flavors matching the 32bit native set. 671 */ 672 static const struct user_regset compat_regsets[] = { 673 [REGSET_GENERAL] = { 674 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 675 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), 676 .get = gpr32_get, .set = gpr32_set 677 }, 678 [REGSET_FP] = { 679 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 680 .size = sizeof(__u64), .align = sizeof(__u64), 681 .get = fpr_get, .set = fpr_set 682 } 683 }; 684 685 static const struct user_regset_view user_parisc_compat_view = { 686 .name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX, 687 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) 688 }; 689 #endif /* CONFIG_64BIT */ 690 691 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 692 { 693 BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG); 694 BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG); 695 #ifdef CONFIG_64BIT 696 if (is_compat_task()) 697 return &user_parisc_compat_view; 698 #endif 699 return &user_parisc_native_view; 700 } 701 702 703 /* HAVE_REGS_AND_STACK_ACCESS_API feature */ 704 705 struct pt_regs_offset { 706 const char *name; 707 int offset; 708 }; 709 710 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 711 #define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])} 712 #define REG_OFFSET_END {.name = NULL, .offset = 0} 713 714 static const struct pt_regs_offset regoffset_table[] = { 715 REG_OFFSET_INDEX(gr,0), 716 REG_OFFSET_INDEX(gr,1), 717 REG_OFFSET_INDEX(gr,2), 718 REG_OFFSET_INDEX(gr,3), 719 REG_OFFSET_INDEX(gr,4), 720 REG_OFFSET_INDEX(gr,5), 721 REG_OFFSET_INDEX(gr,6), 722 REG_OFFSET_INDEX(gr,7), 723 REG_OFFSET_INDEX(gr,8), 724 REG_OFFSET_INDEX(gr,9), 725 REG_OFFSET_INDEX(gr,10), 726 REG_OFFSET_INDEX(gr,11), 727 REG_OFFSET_INDEX(gr,12), 728 REG_OFFSET_INDEX(gr,13), 729 REG_OFFSET_INDEX(gr,14), 730 REG_OFFSET_INDEX(gr,15), 731 REG_OFFSET_INDEX(gr,16), 732 REG_OFFSET_INDEX(gr,17), 733 REG_OFFSET_INDEX(gr,18), 734 REG_OFFSET_INDEX(gr,19), 735 REG_OFFSET_INDEX(gr,20), 736 REG_OFFSET_INDEX(gr,21), 737 REG_OFFSET_INDEX(gr,22), 738 REG_OFFSET_INDEX(gr,23), 739 REG_OFFSET_INDEX(gr,24), 740 REG_OFFSET_INDEX(gr,25), 741 REG_OFFSET_INDEX(gr,26), 742 REG_OFFSET_INDEX(gr,27), 743 REG_OFFSET_INDEX(gr,28), 744 REG_OFFSET_INDEX(gr,29), 745 REG_OFFSET_INDEX(gr,30), 746 REG_OFFSET_INDEX(gr,31), 747 REG_OFFSET_INDEX(sr,0), 748 REG_OFFSET_INDEX(sr,1), 749 REG_OFFSET_INDEX(sr,2), 750 REG_OFFSET_INDEX(sr,3), 751 REG_OFFSET_INDEX(sr,4), 752 REG_OFFSET_INDEX(sr,5), 753 REG_OFFSET_INDEX(sr,6), 754 REG_OFFSET_INDEX(sr,7), 755 REG_OFFSET_INDEX(iasq,0), 756 REG_OFFSET_INDEX(iasq,1), 757 REG_OFFSET_INDEX(iaoq,0), 758 REG_OFFSET_INDEX(iaoq,1), 759 REG_OFFSET_NAME(cr27), 760 REG_OFFSET_NAME(ksp), 761 REG_OFFSET_NAME(kpc), 762 REG_OFFSET_NAME(sar), 763 REG_OFFSET_NAME(iir), 764 REG_OFFSET_NAME(isr), 765 REG_OFFSET_NAME(ior), 766 REG_OFFSET_NAME(ipsw), 767 REG_OFFSET_END, 768 }; 769 770 /** 771 * regs_query_register_offset() - query register offset from its name 772 * @name: the name of a register 773 * 774 * regs_query_register_offset() returns the offset of a register in struct 775 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 776 */ 777 int regs_query_register_offset(const char *name) 778 { 779 const struct pt_regs_offset *roff; 780 for (roff = regoffset_table; roff->name != NULL; roff++) 781 if (!strcmp(roff->name, name)) 782 return roff->offset; 783 return -EINVAL; 784 } 785 786 /** 787 * regs_query_register_name() - query register name from its offset 788 * @offset: the offset of a register in struct pt_regs. 789 * 790 * regs_query_register_name() returns the name of a register from its 791 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 792 */ 793 const char *regs_query_register_name(unsigned int offset) 794 { 795 const struct pt_regs_offset *roff; 796 for (roff = regoffset_table; roff->name != NULL; roff++) 797 if (roff->offset == offset) 798 return roff->name; 799 return NULL; 800 } 801 802 /** 803 * regs_within_kernel_stack() - check the address in the stack 804 * @regs: pt_regs which contains kernel stack pointer. 805 * @addr: address which is checked. 806 * 807 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 808 * If @addr is within the kernel stack, it returns true. If not, returns false. 809 */ 810 int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 811 { 812 return ((addr & ~(THREAD_SIZE - 1)) == 813 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); 814 } 815 816 /** 817 * regs_get_kernel_stack_nth() - get Nth entry of the stack 818 * @regs: pt_regs which contains kernel stack pointer. 819 * @n: stack entry number. 820 * 821 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 822 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 823 * this returns 0. 824 */ 825 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 826 { 827 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 828 829 addr -= n; 830 831 if (!regs_within_kernel_stack(regs, (unsigned long)addr)) 832 return 0; 833 834 return *addr; 835 } 836