1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel support for the ptrace() and syscall tracing interfaces. 4 * 5 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. 6 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> 7 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> 8 * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/elf.h> 16 #include <linux/errno.h> 17 #include <linux/ptrace.h> 18 #include <linux/tracehook.h> 19 #include <linux/user.h> 20 #include <linux/personality.h> 21 #include <linux/regset.h> 22 #include <linux/security.h> 23 #include <linux/seccomp.h> 24 #include <linux/compat.h> 25 #include <linux/signal.h> 26 #include <linux/audit.h> 27 28 #include <linux/uaccess.h> 29 #include <asm/processor.h> 30 #include <asm/asm-offsets.h> 31 32 /* PSW bits we allow the debugger to modify */ 33 #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) 34 35 #define CREATE_TRACE_POINTS 36 #include <trace/events/syscalls.h> 37 38 /* 39 * These are our native regset flavors. 40 */ 41 enum parisc_regset { 42 REGSET_GENERAL, 43 REGSET_FP 44 }; 45 46 /* 47 * Called by kernel/ptrace.c when detaching.. 48 * 49 * Make sure single step bits etc are not set. 50 */ 51 void ptrace_disable(struct task_struct *task) 52 { 53 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 54 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 55 56 /* make sure the trap bits are not set */ 57 pa_psw(task)->r = 0; 58 pa_psw(task)->t = 0; 59 pa_psw(task)->h = 0; 60 pa_psw(task)->l = 0; 61 } 62 63 /* 64 * The following functions are called by ptrace_resume() when 65 * enabling or disabling single/block tracing. 66 */ 67 void user_disable_single_step(struct task_struct *task) 68 { 69 ptrace_disable(task); 70 } 71 72 void user_enable_single_step(struct task_struct *task) 73 { 74 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 75 set_tsk_thread_flag(task, TIF_SINGLESTEP); 76 77 if (pa_psw(task)->n) { 78 /* Nullified, just crank over the queue. */ 79 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; 80 task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; 81 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; 82 pa_psw(task)->n = 0; 83 pa_psw(task)->x = 0; 84 pa_psw(task)->y = 0; 85 pa_psw(task)->z = 0; 86 pa_psw(task)->b = 0; 87 ptrace_disable(task); 88 /* Don't wake up the task, but let the 89 parent know something happened. */ 90 force_sig_fault_to_task(SIGTRAP, TRAP_TRACE, 91 (void __user *) (task_regs(task)->iaoq[0] & ~3), 92 task); 93 /* notify_parent(task, SIGCHLD); */ 94 return; 95 } 96 97 /* Enable recovery counter traps. The recovery counter 98 * itself will be set to zero on a task switch. If the 99 * task is suspended on a syscall then the syscall return 100 * path will overwrite the recovery counter with a suitable 101 * value such that it traps once back in user space. We 102 * disable interrupts in the tasks PSW here also, to avoid 103 * interrupts while the recovery counter is decrementing. 104 */ 105 pa_psw(task)->r = 1; 106 pa_psw(task)->t = 0; 107 pa_psw(task)->h = 0; 108 pa_psw(task)->l = 0; 109 } 110 111 void user_enable_block_step(struct task_struct *task) 112 { 113 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 114 set_tsk_thread_flag(task, TIF_BLOCKSTEP); 115 116 /* Enable taken branch trap. */ 117 pa_psw(task)->r = 0; 118 pa_psw(task)->t = 1; 119 pa_psw(task)->h = 0; 120 pa_psw(task)->l = 0; 121 } 122 123 long arch_ptrace(struct task_struct *child, long request, 124 unsigned long addr, unsigned long data) 125 { 126 unsigned long __user *datap = (unsigned long __user *)data; 127 unsigned long tmp; 128 long ret = -EIO; 129 130 switch (request) { 131 132 /* Read the word at location addr in the USER area. For ptraced 133 processes, the kernel saves all regs on a syscall. */ 134 case PTRACE_PEEKUSR: 135 if ((addr & (sizeof(unsigned long)-1)) || 136 addr >= sizeof(struct pt_regs)) 137 break; 138 tmp = *(unsigned long *) ((char *) task_regs(child) + addr); 139 ret = put_user(tmp, datap); 140 break; 141 142 /* Write the word at location addr in the USER area. This will need 143 to change when the kernel no longer saves all regs on a syscall. 144 FIXME. There is a problem at the moment in that r3-r18 are only 145 saved if the process is ptraced on syscall entry, and even then 146 those values are overwritten by actual register values on syscall 147 exit. */ 148 case PTRACE_POKEUSR: 149 /* Some register values written here may be ignored in 150 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 151 * r31/r31+4, and not with the values in pt_regs. 152 */ 153 if (addr == PT_PSW) { 154 /* Allow writing to Nullify, Divide-step-correction, 155 * and carry/borrow bits. 156 * BEWARE, if you set N, and then single step, it won't 157 * stop on the nullified instruction. 158 */ 159 data &= USER_PSW_BITS; 160 task_regs(child)->gr[0] &= ~USER_PSW_BITS; 161 task_regs(child)->gr[0] |= data; 162 ret = 0; 163 break; 164 } 165 166 if ((addr & (sizeof(unsigned long)-1)) || 167 addr >= sizeof(struct pt_regs)) 168 break; 169 if (addr == PT_IAOQ0 || addr == PT_IAOQ1) { 170 data |= 3; /* ensure userspace privilege */ 171 } 172 if ((addr >= PT_GR1 && addr <= PT_GR31) || 173 addr == PT_IAOQ0 || addr == PT_IAOQ1 || 174 (addr >= PT_FR0 && addr <= PT_FR31 + 4) || 175 addr == PT_SAR) { 176 *(unsigned long *) ((char *) task_regs(child) + addr) = data; 177 ret = 0; 178 } 179 break; 180 181 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 182 return copy_regset_to_user(child, 183 task_user_regset_view(current), 184 REGSET_GENERAL, 185 0, sizeof(struct user_regs_struct), 186 datap); 187 188 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 189 return copy_regset_from_user(child, 190 task_user_regset_view(current), 191 REGSET_GENERAL, 192 0, sizeof(struct user_regs_struct), 193 datap); 194 195 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 196 return copy_regset_to_user(child, 197 task_user_regset_view(current), 198 REGSET_FP, 199 0, sizeof(struct user_fp_struct), 200 datap); 201 202 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 203 return copy_regset_from_user(child, 204 task_user_regset_view(current), 205 REGSET_FP, 206 0, sizeof(struct user_fp_struct), 207 datap); 208 209 default: 210 ret = ptrace_request(child, request, addr, data); 211 break; 212 } 213 214 return ret; 215 } 216 217 218 #ifdef CONFIG_COMPAT 219 220 /* This function is needed to translate 32 bit pt_regs offsets in to 221 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel 222 * will request offset 12 if it wants gr3, but the lower 32 bits of 223 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4). 224 * This code relies on a 32 bit pt_regs being comprised of 32 bit values 225 * except for the fp registers which (a) are 64 bits, and (b) follow 226 * the gr registers at the start of pt_regs. The 32 bit pt_regs should 227 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[] 228 * being 64 bit in both cases. 229 */ 230 231 static compat_ulong_t translate_usr_offset(compat_ulong_t offset) 232 { 233 compat_ulong_t pos; 234 235 if (offset < 32*4) /* gr[0..31] */ 236 pos = offset * 2 + 4; 237 else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */ 238 pos = (offset - 32*4) + PT_FR0; 239 else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */ 240 pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4; 241 else 242 pos = sizeof(struct pt_regs); 243 244 return pos; 245 } 246 247 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 248 compat_ulong_t addr, compat_ulong_t data) 249 { 250 compat_uint_t tmp; 251 long ret = -EIO; 252 253 switch (request) { 254 255 case PTRACE_PEEKUSR: 256 if (addr & (sizeof(compat_uint_t)-1)) 257 break; 258 addr = translate_usr_offset(addr); 259 if (addr >= sizeof(struct pt_regs)) 260 break; 261 262 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); 263 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); 264 break; 265 266 /* Write the word at location addr in the USER area. This will need 267 to change when the kernel no longer saves all regs on a syscall. 268 FIXME. There is a problem at the moment in that r3-r18 are only 269 saved if the process is ptraced on syscall entry, and even then 270 those values are overwritten by actual register values on syscall 271 exit. */ 272 case PTRACE_POKEUSR: 273 /* Some register values written here may be ignored in 274 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 275 * r31/r31+4, and not with the values in pt_regs. 276 */ 277 if (addr == PT_PSW) { 278 /* Since PT_PSW==0, it is valid for 32 bit processes 279 * under 64 bit kernels as well. 280 */ 281 ret = arch_ptrace(child, request, addr, data); 282 } else { 283 if (addr & (sizeof(compat_uint_t)-1)) 284 break; 285 addr = translate_usr_offset(addr); 286 if (addr >= sizeof(struct pt_regs)) 287 break; 288 if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) { 289 data |= 3; /* ensure userspace privilege */ 290 } 291 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { 292 /* Special case, fp regs are 64 bits anyway */ 293 *(__u32 *) ((char *) task_regs(child) + addr) = data; 294 ret = 0; 295 } 296 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || 297 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || 298 addr == PT_SAR+4) { 299 /* Zero the top 32 bits */ 300 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; 301 *(__u32 *) ((char *) task_regs(child) + addr) = data; 302 ret = 0; 303 } 304 } 305 break; 306 307 default: 308 ret = compat_ptrace_request(child, request, addr, data); 309 break; 310 } 311 312 return ret; 313 } 314 #endif 315 316 long do_syscall_trace_enter(struct pt_regs *regs) 317 { 318 if (test_thread_flag(TIF_SYSCALL_TRACE)) { 319 int rc = tracehook_report_syscall_entry(regs); 320 321 /* 322 * As tracesys_next does not set %r28 to -ENOSYS 323 * when %r20 is set to -1, initialize it here. 324 */ 325 regs->gr[28] = -ENOSYS; 326 327 if (rc) { 328 /* 329 * A nonzero return code from 330 * tracehook_report_syscall_entry() tells us 331 * to prevent the syscall execution. Skip 332 * the syscall call and the syscall restart handling. 333 * 334 * Note that the tracer may also just change 335 * regs->gr[20] to an invalid syscall number, 336 * that is handled by tracesys_next. 337 */ 338 regs->gr[20] = -1UL; 339 return -1; 340 } 341 } 342 343 /* Do the secure computing check after ptrace. */ 344 if (secure_computing() == -1) 345 return -1; 346 347 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 348 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 349 trace_sys_enter(regs, regs->gr[20]); 350 #endif 351 352 #ifdef CONFIG_64BIT 353 if (!is_compat_task()) 354 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25], 355 regs->gr[24], regs->gr[23]); 356 else 357 #endif 358 audit_syscall_entry(regs->gr[20] & 0xffffffff, 359 regs->gr[26] & 0xffffffff, 360 regs->gr[25] & 0xffffffff, 361 regs->gr[24] & 0xffffffff, 362 regs->gr[23] & 0xffffffff); 363 364 /* 365 * Sign extend the syscall number to 64bit since it may have been 366 * modified by a compat ptrace call 367 */ 368 return (int) ((u32) regs->gr[20]); 369 } 370 371 void do_syscall_trace_exit(struct pt_regs *regs) 372 { 373 int stepping = test_thread_flag(TIF_SINGLESTEP) || 374 test_thread_flag(TIF_BLOCKSTEP); 375 376 audit_syscall_exit(regs); 377 378 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 379 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 380 trace_sys_exit(regs, regs->gr[20]); 381 #endif 382 383 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) 384 tracehook_report_syscall_exit(regs, stepping); 385 } 386 387 388 /* 389 * regset functions. 390 */ 391 392 static int fpr_get(struct task_struct *target, 393 const struct user_regset *regset, 394 unsigned int pos, unsigned int count, 395 void *kbuf, void __user *ubuf) 396 { 397 struct pt_regs *regs = task_regs(target); 398 __u64 *k = kbuf; 399 __u64 __user *u = ubuf; 400 __u64 reg; 401 402 pos /= sizeof(reg); 403 count /= sizeof(reg); 404 405 if (kbuf) 406 for (; count > 0 && pos < ELF_NFPREG; --count) 407 *k++ = regs->fr[pos++]; 408 else 409 for (; count > 0 && pos < ELF_NFPREG; --count) 410 if (__put_user(regs->fr[pos++], u++)) 411 return -EFAULT; 412 413 kbuf = k; 414 ubuf = u; 415 pos *= sizeof(reg); 416 count *= sizeof(reg); 417 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 418 ELF_NFPREG * sizeof(reg), -1); 419 } 420 421 static int fpr_set(struct task_struct *target, 422 const struct user_regset *regset, 423 unsigned int pos, unsigned int count, 424 const void *kbuf, const void __user *ubuf) 425 { 426 struct pt_regs *regs = task_regs(target); 427 const __u64 *k = kbuf; 428 const __u64 __user *u = ubuf; 429 __u64 reg; 430 431 pos /= sizeof(reg); 432 count /= sizeof(reg); 433 434 if (kbuf) 435 for (; count > 0 && pos < ELF_NFPREG; --count) 436 regs->fr[pos++] = *k++; 437 else 438 for (; count > 0 && pos < ELF_NFPREG; --count) { 439 if (__get_user(reg, u++)) 440 return -EFAULT; 441 regs->fr[pos++] = reg; 442 } 443 444 kbuf = k; 445 ubuf = u; 446 pos *= sizeof(reg); 447 count *= sizeof(reg); 448 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 449 ELF_NFPREG * sizeof(reg), -1); 450 } 451 452 #define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) 453 454 static unsigned long get_reg(struct pt_regs *regs, int num) 455 { 456 switch (num) { 457 case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])]; 458 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 459 case RI(iasq[0]): return regs->iasq[0]; 460 case RI(iasq[1]): return regs->iasq[1]; 461 case RI(iaoq[0]): return regs->iaoq[0]; 462 case RI(iaoq[1]): return regs->iaoq[1]; 463 case RI(sar): return regs->sar; 464 case RI(iir): return regs->iir; 465 case RI(isr): return regs->isr; 466 case RI(ior): return regs->ior; 467 case RI(ipsw): return regs->ipsw; 468 case RI(cr27): return regs->cr27; 469 case RI(cr0): return mfctl(0); 470 case RI(cr24): return mfctl(24); 471 case RI(cr25): return mfctl(25); 472 case RI(cr26): return mfctl(26); 473 case RI(cr28): return mfctl(28); 474 case RI(cr29): return mfctl(29); 475 case RI(cr30): return mfctl(30); 476 case RI(cr31): return mfctl(31); 477 case RI(cr8): return mfctl(8); 478 case RI(cr9): return mfctl(9); 479 case RI(cr12): return mfctl(12); 480 case RI(cr13): return mfctl(13); 481 case RI(cr10): return mfctl(10); 482 case RI(cr15): return mfctl(15); 483 default: return 0; 484 } 485 } 486 487 static void set_reg(struct pt_regs *regs, int num, unsigned long val) 488 { 489 switch (num) { 490 case RI(gr[0]): /* 491 * PSW is in gr[0]. 492 * Allow writing to Nullify, Divide-step-correction, 493 * and carry/borrow bits. 494 * BEWARE, if you set N, and then single step, it won't 495 * stop on the nullified instruction. 496 */ 497 val &= USER_PSW_BITS; 498 regs->gr[0] &= ~USER_PSW_BITS; 499 regs->gr[0] |= val; 500 return; 501 case RI(gr[1]) ... RI(gr[31]): 502 regs->gr[num - RI(gr[0])] = val; 503 return; 504 case RI(iaoq[0]): 505 case RI(iaoq[1]): 506 /* set 2 lowest bits to ensure userspace privilege: */ 507 regs->iaoq[num - RI(iaoq[0])] = val | 3; 508 return; 509 case RI(sar): regs->sar = val; 510 return; 511 default: return; 512 #if 0 513 /* do not allow to change any of the following registers (yet) */ 514 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 515 case RI(iasq[0]): return regs->iasq[0]; 516 case RI(iasq[1]): return regs->iasq[1]; 517 case RI(iir): return regs->iir; 518 case RI(isr): return regs->isr; 519 case RI(ior): return regs->ior; 520 case RI(ipsw): return regs->ipsw; 521 case RI(cr27): return regs->cr27; 522 case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31; 523 case cr8, cr9, cr12, cr13, cr10, cr15; 524 #endif 525 } 526 } 527 528 static int gpr_get(struct task_struct *target, 529 const struct user_regset *regset, 530 unsigned int pos, unsigned int count, 531 void *kbuf, void __user *ubuf) 532 { 533 struct pt_regs *regs = task_regs(target); 534 unsigned long *k = kbuf; 535 unsigned long __user *u = ubuf; 536 unsigned long reg; 537 538 pos /= sizeof(reg); 539 count /= sizeof(reg); 540 541 if (kbuf) 542 for (; count > 0 && pos < ELF_NGREG; --count) 543 *k++ = get_reg(regs, pos++); 544 else 545 for (; count > 0 && pos < ELF_NGREG; --count) 546 if (__put_user(get_reg(regs, pos++), u++)) 547 return -EFAULT; 548 kbuf = k; 549 ubuf = u; 550 pos *= sizeof(reg); 551 count *= sizeof(reg); 552 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 553 ELF_NGREG * sizeof(reg), -1); 554 } 555 556 static int gpr_set(struct task_struct *target, 557 const struct user_regset *regset, 558 unsigned int pos, unsigned int count, 559 const void *kbuf, const void __user *ubuf) 560 { 561 struct pt_regs *regs = task_regs(target); 562 const unsigned long *k = kbuf; 563 const unsigned long __user *u = ubuf; 564 unsigned long reg; 565 566 pos /= sizeof(reg); 567 count /= sizeof(reg); 568 569 if (kbuf) 570 for (; count > 0 && pos < ELF_NGREG; --count) 571 set_reg(regs, pos++, *k++); 572 else 573 for (; count > 0 && pos < ELF_NGREG; --count) { 574 if (__get_user(reg, u++)) 575 return -EFAULT; 576 set_reg(regs, pos++, reg); 577 } 578 579 kbuf = k; 580 ubuf = u; 581 pos *= sizeof(reg); 582 count *= sizeof(reg); 583 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 584 ELF_NGREG * sizeof(reg), -1); 585 } 586 587 static const struct user_regset native_regsets[] = { 588 [REGSET_GENERAL] = { 589 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 590 .size = sizeof(long), .align = sizeof(long), 591 .get = gpr_get, .set = gpr_set 592 }, 593 [REGSET_FP] = { 594 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 595 .size = sizeof(__u64), .align = sizeof(__u64), 596 .get = fpr_get, .set = fpr_set 597 } 598 }; 599 600 static const struct user_regset_view user_parisc_native_view = { 601 .name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX, 602 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 603 }; 604 605 #ifdef CONFIG_64BIT 606 #include <linux/compat.h> 607 608 static int gpr32_get(struct task_struct *target, 609 const struct user_regset *regset, 610 unsigned int pos, unsigned int count, 611 void *kbuf, void __user *ubuf) 612 { 613 struct pt_regs *regs = task_regs(target); 614 compat_ulong_t *k = kbuf; 615 compat_ulong_t __user *u = ubuf; 616 compat_ulong_t reg; 617 618 pos /= sizeof(reg); 619 count /= sizeof(reg); 620 621 if (kbuf) 622 for (; count > 0 && pos < ELF_NGREG; --count) 623 *k++ = get_reg(regs, pos++); 624 else 625 for (; count > 0 && pos < ELF_NGREG; --count) 626 if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++)) 627 return -EFAULT; 628 629 kbuf = k; 630 ubuf = u; 631 pos *= sizeof(reg); 632 count *= sizeof(reg); 633 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 634 ELF_NGREG * sizeof(reg), -1); 635 } 636 637 static int gpr32_set(struct task_struct *target, 638 const struct user_regset *regset, 639 unsigned int pos, unsigned int count, 640 const void *kbuf, const void __user *ubuf) 641 { 642 struct pt_regs *regs = task_regs(target); 643 const compat_ulong_t *k = kbuf; 644 const compat_ulong_t __user *u = ubuf; 645 compat_ulong_t reg; 646 647 pos /= sizeof(reg); 648 count /= sizeof(reg); 649 650 if (kbuf) 651 for (; count > 0 && pos < ELF_NGREG; --count) 652 set_reg(regs, pos++, *k++); 653 else 654 for (; count > 0 && pos < ELF_NGREG; --count) { 655 if (__get_user(reg, u++)) 656 return -EFAULT; 657 set_reg(regs, pos++, reg); 658 } 659 660 kbuf = k; 661 ubuf = u; 662 pos *= sizeof(reg); 663 count *= sizeof(reg); 664 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 665 ELF_NGREG * sizeof(reg), -1); 666 } 667 668 /* 669 * These are the regset flavors matching the 32bit native set. 670 */ 671 static const struct user_regset compat_regsets[] = { 672 [REGSET_GENERAL] = { 673 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 674 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), 675 .get = gpr32_get, .set = gpr32_set 676 }, 677 [REGSET_FP] = { 678 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 679 .size = sizeof(__u64), .align = sizeof(__u64), 680 .get = fpr_get, .set = fpr_set 681 } 682 }; 683 684 static const struct user_regset_view user_parisc_compat_view = { 685 .name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX, 686 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) 687 }; 688 #endif /* CONFIG_64BIT */ 689 690 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 691 { 692 BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG); 693 BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG); 694 #ifdef CONFIG_64BIT 695 if (is_compat_task()) 696 return &user_parisc_compat_view; 697 #endif 698 return &user_parisc_native_view; 699 } 700 701 702 /* HAVE_REGS_AND_STACK_ACCESS_API feature */ 703 704 struct pt_regs_offset { 705 const char *name; 706 int offset; 707 }; 708 709 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 710 #define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])} 711 #define REG_OFFSET_END {.name = NULL, .offset = 0} 712 713 static const struct pt_regs_offset regoffset_table[] = { 714 REG_OFFSET_INDEX(gr,0), 715 REG_OFFSET_INDEX(gr,1), 716 REG_OFFSET_INDEX(gr,2), 717 REG_OFFSET_INDEX(gr,3), 718 REG_OFFSET_INDEX(gr,4), 719 REG_OFFSET_INDEX(gr,5), 720 REG_OFFSET_INDEX(gr,6), 721 REG_OFFSET_INDEX(gr,7), 722 REG_OFFSET_INDEX(gr,8), 723 REG_OFFSET_INDEX(gr,9), 724 REG_OFFSET_INDEX(gr,10), 725 REG_OFFSET_INDEX(gr,11), 726 REG_OFFSET_INDEX(gr,12), 727 REG_OFFSET_INDEX(gr,13), 728 REG_OFFSET_INDEX(gr,14), 729 REG_OFFSET_INDEX(gr,15), 730 REG_OFFSET_INDEX(gr,16), 731 REG_OFFSET_INDEX(gr,17), 732 REG_OFFSET_INDEX(gr,18), 733 REG_OFFSET_INDEX(gr,19), 734 REG_OFFSET_INDEX(gr,20), 735 REG_OFFSET_INDEX(gr,21), 736 REG_OFFSET_INDEX(gr,22), 737 REG_OFFSET_INDEX(gr,23), 738 REG_OFFSET_INDEX(gr,24), 739 REG_OFFSET_INDEX(gr,25), 740 REG_OFFSET_INDEX(gr,26), 741 REG_OFFSET_INDEX(gr,27), 742 REG_OFFSET_INDEX(gr,28), 743 REG_OFFSET_INDEX(gr,29), 744 REG_OFFSET_INDEX(gr,30), 745 REG_OFFSET_INDEX(gr,31), 746 REG_OFFSET_INDEX(sr,0), 747 REG_OFFSET_INDEX(sr,1), 748 REG_OFFSET_INDEX(sr,2), 749 REG_OFFSET_INDEX(sr,3), 750 REG_OFFSET_INDEX(sr,4), 751 REG_OFFSET_INDEX(sr,5), 752 REG_OFFSET_INDEX(sr,6), 753 REG_OFFSET_INDEX(sr,7), 754 REG_OFFSET_INDEX(iasq,0), 755 REG_OFFSET_INDEX(iasq,1), 756 REG_OFFSET_INDEX(iaoq,0), 757 REG_OFFSET_INDEX(iaoq,1), 758 REG_OFFSET_NAME(cr27), 759 REG_OFFSET_NAME(ksp), 760 REG_OFFSET_NAME(kpc), 761 REG_OFFSET_NAME(sar), 762 REG_OFFSET_NAME(iir), 763 REG_OFFSET_NAME(isr), 764 REG_OFFSET_NAME(ior), 765 REG_OFFSET_NAME(ipsw), 766 REG_OFFSET_END, 767 }; 768 769 /** 770 * regs_query_register_offset() - query register offset from its name 771 * @name: the name of a register 772 * 773 * regs_query_register_offset() returns the offset of a register in struct 774 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 775 */ 776 int regs_query_register_offset(const char *name) 777 { 778 const struct pt_regs_offset *roff; 779 for (roff = regoffset_table; roff->name != NULL; roff++) 780 if (!strcmp(roff->name, name)) 781 return roff->offset; 782 return -EINVAL; 783 } 784 785 /** 786 * regs_query_register_name() - query register name from its offset 787 * @offset: the offset of a register in struct pt_regs. 788 * 789 * regs_query_register_name() returns the name of a register from its 790 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 791 */ 792 const char *regs_query_register_name(unsigned int offset) 793 { 794 const struct pt_regs_offset *roff; 795 for (roff = regoffset_table; roff->name != NULL; roff++) 796 if (roff->offset == offset) 797 return roff->name; 798 return NULL; 799 } 800 801 /** 802 * regs_within_kernel_stack() - check the address in the stack 803 * @regs: pt_regs which contains kernel stack pointer. 804 * @addr: address which is checked. 805 * 806 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). 807 * If @addr is within the kernel stack, it returns true. If not, returns false. 808 */ 809 int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) 810 { 811 return ((addr & ~(THREAD_SIZE - 1)) == 812 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); 813 } 814 815 /** 816 * regs_get_kernel_stack_nth() - get Nth entry of the stack 817 * @regs: pt_regs which contains kernel stack pointer. 818 * @n: stack entry number. 819 * 820 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which 821 * is specified by @regs. If the @n th entry is NOT in the kernel stack, 822 * this returns 0. 823 */ 824 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) 825 { 826 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); 827 828 addr -= n; 829 830 if (!regs_within_kernel_stack(regs, (unsigned long)addr)) 831 return 0; 832 833 return *addr; 834 } 835