1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel support for the ptrace() and syscall tracing interfaces. 4 * 5 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc. 6 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx> 7 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org> 8 * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/smp.h> 15 #include <linux/elf.h> 16 #include <linux/errno.h> 17 #include <linux/ptrace.h> 18 #include <linux/tracehook.h> 19 #include <linux/user.h> 20 #include <linux/personality.h> 21 #include <linux/regset.h> 22 #include <linux/security.h> 23 #include <linux/seccomp.h> 24 #include <linux/compat.h> 25 #include <linux/signal.h> 26 #include <linux/audit.h> 27 28 #include <linux/uaccess.h> 29 #include <asm/pgtable.h> 30 #include <asm/processor.h> 31 #include <asm/asm-offsets.h> 32 33 /* PSW bits we allow the debugger to modify */ 34 #define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) 35 36 #define CREATE_TRACE_POINTS 37 #include <trace/events/syscalls.h> 38 39 /* 40 * These are our native regset flavors. 41 */ 42 enum parisc_regset { 43 REGSET_GENERAL, 44 REGSET_FP 45 }; 46 47 /* 48 * Called by kernel/ptrace.c when detaching.. 49 * 50 * Make sure single step bits etc are not set. 51 */ 52 void ptrace_disable(struct task_struct *task) 53 { 54 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 55 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 56 57 /* make sure the trap bits are not set */ 58 pa_psw(task)->r = 0; 59 pa_psw(task)->t = 0; 60 pa_psw(task)->h = 0; 61 pa_psw(task)->l = 0; 62 } 63 64 /* 65 * The following functions are called by ptrace_resume() when 66 * enabling or disabling single/block tracing. 67 */ 68 void user_disable_single_step(struct task_struct *task) 69 { 70 ptrace_disable(task); 71 } 72 73 void user_enable_single_step(struct task_struct *task) 74 { 75 clear_tsk_thread_flag(task, TIF_BLOCKSTEP); 76 set_tsk_thread_flag(task, TIF_SINGLESTEP); 77 78 if (pa_psw(task)->n) { 79 /* Nullified, just crank over the queue. */ 80 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1]; 81 task_regs(task)->iasq[0] = task_regs(task)->iasq[1]; 82 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4; 83 pa_psw(task)->n = 0; 84 pa_psw(task)->x = 0; 85 pa_psw(task)->y = 0; 86 pa_psw(task)->z = 0; 87 pa_psw(task)->b = 0; 88 ptrace_disable(task); 89 /* Don't wake up the task, but let the 90 parent know something happened. */ 91 force_sig_fault(SIGTRAP, TRAP_TRACE, 92 (void __user *) (task_regs(task)->iaoq[0] & ~3), 93 task); 94 /* notify_parent(task, SIGCHLD); */ 95 return; 96 } 97 98 /* Enable recovery counter traps. The recovery counter 99 * itself will be set to zero on a task switch. If the 100 * task is suspended on a syscall then the syscall return 101 * path will overwrite the recovery counter with a suitable 102 * value such that it traps once back in user space. We 103 * disable interrupts in the tasks PSW here also, to avoid 104 * interrupts while the recovery counter is decrementing. 105 */ 106 pa_psw(task)->r = 1; 107 pa_psw(task)->t = 0; 108 pa_psw(task)->h = 0; 109 pa_psw(task)->l = 0; 110 } 111 112 void user_enable_block_step(struct task_struct *task) 113 { 114 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 115 set_tsk_thread_flag(task, TIF_BLOCKSTEP); 116 117 /* Enable taken branch trap. */ 118 pa_psw(task)->r = 0; 119 pa_psw(task)->t = 1; 120 pa_psw(task)->h = 0; 121 pa_psw(task)->l = 0; 122 } 123 124 long arch_ptrace(struct task_struct *child, long request, 125 unsigned long addr, unsigned long data) 126 { 127 unsigned long __user *datap = (unsigned long __user *)data; 128 unsigned long tmp; 129 long ret = -EIO; 130 131 switch (request) { 132 133 /* Read the word at location addr in the USER area. For ptraced 134 processes, the kernel saves all regs on a syscall. */ 135 case PTRACE_PEEKUSR: 136 if ((addr & (sizeof(unsigned long)-1)) || 137 addr >= sizeof(struct pt_regs)) 138 break; 139 tmp = *(unsigned long *) ((char *) task_regs(child) + addr); 140 ret = put_user(tmp, datap); 141 break; 142 143 /* Write the word at location addr in the USER area. This will need 144 to change when the kernel no longer saves all regs on a syscall. 145 FIXME. There is a problem at the moment in that r3-r18 are only 146 saved if the process is ptraced on syscall entry, and even then 147 those values are overwritten by actual register values on syscall 148 exit. */ 149 case PTRACE_POKEUSR: 150 /* Some register values written here may be ignored in 151 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 152 * r31/r31+4, and not with the values in pt_regs. 153 */ 154 if (addr == PT_PSW) { 155 /* Allow writing to Nullify, Divide-step-correction, 156 * and carry/borrow bits. 157 * BEWARE, if you set N, and then single step, it won't 158 * stop on the nullified instruction. 159 */ 160 data &= USER_PSW_BITS; 161 task_regs(child)->gr[0] &= ~USER_PSW_BITS; 162 task_regs(child)->gr[0] |= data; 163 ret = 0; 164 break; 165 } 166 167 if ((addr & (sizeof(unsigned long)-1)) || 168 addr >= sizeof(struct pt_regs)) 169 break; 170 if ((addr >= PT_GR1 && addr <= PT_GR31) || 171 addr == PT_IAOQ0 || addr == PT_IAOQ1 || 172 (addr >= PT_FR0 && addr <= PT_FR31 + 4) || 173 addr == PT_SAR) { 174 *(unsigned long *) ((char *) task_regs(child) + addr) = data; 175 ret = 0; 176 } 177 break; 178 179 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 180 return copy_regset_to_user(child, 181 task_user_regset_view(current), 182 REGSET_GENERAL, 183 0, sizeof(struct user_regs_struct), 184 datap); 185 186 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 187 return copy_regset_from_user(child, 188 task_user_regset_view(current), 189 REGSET_GENERAL, 190 0, sizeof(struct user_regs_struct), 191 datap); 192 193 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 194 return copy_regset_to_user(child, 195 task_user_regset_view(current), 196 REGSET_FP, 197 0, sizeof(struct user_fp_struct), 198 datap); 199 200 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 201 return copy_regset_from_user(child, 202 task_user_regset_view(current), 203 REGSET_FP, 204 0, sizeof(struct user_fp_struct), 205 datap); 206 207 default: 208 ret = ptrace_request(child, request, addr, data); 209 break; 210 } 211 212 return ret; 213 } 214 215 216 #ifdef CONFIG_COMPAT 217 218 /* This function is needed to translate 32 bit pt_regs offsets in to 219 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel 220 * will request offset 12 if it wants gr3, but the lower 32 bits of 221 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4). 222 * This code relies on a 32 bit pt_regs being comprised of 32 bit values 223 * except for the fp registers which (a) are 64 bits, and (b) follow 224 * the gr registers at the start of pt_regs. The 32 bit pt_regs should 225 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[] 226 * being 64 bit in both cases. 227 */ 228 229 static compat_ulong_t translate_usr_offset(compat_ulong_t offset) 230 { 231 if (offset < 0) 232 return sizeof(struct pt_regs); 233 else if (offset <= 32*4) /* gr[0..31] */ 234 return offset * 2 + 4; 235 else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ 236 return offset + 32*4; 237 else if (offset < sizeof(struct pt_regs)/2 + 32*4) 238 return offset * 2 + 4 - 32*8; 239 else 240 return sizeof(struct pt_regs); 241 } 242 243 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 244 compat_ulong_t addr, compat_ulong_t data) 245 { 246 compat_uint_t tmp; 247 long ret = -EIO; 248 249 switch (request) { 250 251 case PTRACE_PEEKUSR: 252 if (addr & (sizeof(compat_uint_t)-1)) 253 break; 254 addr = translate_usr_offset(addr); 255 if (addr >= sizeof(struct pt_regs)) 256 break; 257 258 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); 259 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data); 260 break; 261 262 /* Write the word at location addr in the USER area. This will need 263 to change when the kernel no longer saves all regs on a syscall. 264 FIXME. There is a problem at the moment in that r3-r18 are only 265 saved if the process is ptraced on syscall entry, and even then 266 those values are overwritten by actual register values on syscall 267 exit. */ 268 case PTRACE_POKEUSR: 269 /* Some register values written here may be ignored in 270 * entry.S:syscall_restore_rfi; e.g. iaoq is written with 271 * r31/r31+4, and not with the values in pt_regs. 272 */ 273 if (addr == PT_PSW) { 274 /* Since PT_PSW==0, it is valid for 32 bit processes 275 * under 64 bit kernels as well. 276 */ 277 ret = arch_ptrace(child, request, addr, data); 278 } else { 279 if (addr & (sizeof(compat_uint_t)-1)) 280 break; 281 addr = translate_usr_offset(addr); 282 if (addr >= sizeof(struct pt_regs)) 283 break; 284 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { 285 /* Special case, fp regs are 64 bits anyway */ 286 *(__u64 *) ((char *) task_regs(child) + addr) = data; 287 ret = 0; 288 } 289 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) || 290 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 || 291 addr == PT_SAR+4) { 292 /* Zero the top 32 bits */ 293 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0; 294 *(__u32 *) ((char *) task_regs(child) + addr) = data; 295 ret = 0; 296 } 297 } 298 break; 299 300 default: 301 ret = compat_ptrace_request(child, request, addr, data); 302 break; 303 } 304 305 return ret; 306 } 307 #endif 308 309 long do_syscall_trace_enter(struct pt_regs *regs) 310 { 311 if (test_thread_flag(TIF_SYSCALL_TRACE) && 312 tracehook_report_syscall_entry(regs)) { 313 /* 314 * Tracing decided this syscall should not happen or the 315 * debugger stored an invalid system call number. Skip 316 * the system call and the system call restart handling. 317 */ 318 regs->gr[20] = -1UL; 319 goto out; 320 } 321 322 /* Do the secure computing check after ptrace. */ 323 if (secure_computing(NULL) == -1) 324 return -1; 325 326 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 327 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 328 trace_sys_enter(regs, regs->gr[20]); 329 #endif 330 331 #ifdef CONFIG_64BIT 332 if (!is_compat_task()) 333 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25], 334 regs->gr[24], regs->gr[23]); 335 else 336 #endif 337 audit_syscall_entry(regs->gr[20] & 0xffffffff, 338 regs->gr[26] & 0xffffffff, 339 regs->gr[25] & 0xffffffff, 340 regs->gr[24] & 0xffffffff, 341 regs->gr[23] & 0xffffffff); 342 343 out: 344 /* 345 * Sign extend the syscall number to 64bit since it may have been 346 * modified by a compat ptrace call 347 */ 348 return (int) ((u32) regs->gr[20]); 349 } 350 351 void do_syscall_trace_exit(struct pt_regs *regs) 352 { 353 int stepping = test_thread_flag(TIF_SINGLESTEP) || 354 test_thread_flag(TIF_BLOCKSTEP); 355 356 audit_syscall_exit(regs); 357 358 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 359 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 360 trace_sys_exit(regs, regs->gr[20]); 361 #endif 362 363 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) 364 tracehook_report_syscall_exit(regs, stepping); 365 } 366 367 368 /* 369 * regset functions. 370 */ 371 372 static int fpr_get(struct task_struct *target, 373 const struct user_regset *regset, 374 unsigned int pos, unsigned int count, 375 void *kbuf, void __user *ubuf) 376 { 377 struct pt_regs *regs = task_regs(target); 378 __u64 *k = kbuf; 379 __u64 __user *u = ubuf; 380 __u64 reg; 381 382 pos /= sizeof(reg); 383 count /= sizeof(reg); 384 385 if (kbuf) 386 for (; count > 0 && pos < ELF_NFPREG; --count) 387 *k++ = regs->fr[pos++]; 388 else 389 for (; count > 0 && pos < ELF_NFPREG; --count) 390 if (__put_user(regs->fr[pos++], u++)) 391 return -EFAULT; 392 393 kbuf = k; 394 ubuf = u; 395 pos *= sizeof(reg); 396 count *= sizeof(reg); 397 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 398 ELF_NFPREG * sizeof(reg), -1); 399 } 400 401 static int fpr_set(struct task_struct *target, 402 const struct user_regset *regset, 403 unsigned int pos, unsigned int count, 404 const void *kbuf, const void __user *ubuf) 405 { 406 struct pt_regs *regs = task_regs(target); 407 const __u64 *k = kbuf; 408 const __u64 __user *u = ubuf; 409 __u64 reg; 410 411 pos /= sizeof(reg); 412 count /= sizeof(reg); 413 414 if (kbuf) 415 for (; count > 0 && pos < ELF_NFPREG; --count) 416 regs->fr[pos++] = *k++; 417 else 418 for (; count > 0 && pos < ELF_NFPREG; --count) { 419 if (__get_user(reg, u++)) 420 return -EFAULT; 421 regs->fr[pos++] = reg; 422 } 423 424 kbuf = k; 425 ubuf = u; 426 pos *= sizeof(reg); 427 count *= sizeof(reg); 428 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 429 ELF_NFPREG * sizeof(reg), -1); 430 } 431 432 #define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long)) 433 434 static unsigned long get_reg(struct pt_regs *regs, int num) 435 { 436 switch (num) { 437 case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])]; 438 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 439 case RI(iasq[0]): return regs->iasq[0]; 440 case RI(iasq[1]): return regs->iasq[1]; 441 case RI(iaoq[0]): return regs->iaoq[0]; 442 case RI(iaoq[1]): return regs->iaoq[1]; 443 case RI(sar): return regs->sar; 444 case RI(iir): return regs->iir; 445 case RI(isr): return regs->isr; 446 case RI(ior): return regs->ior; 447 case RI(ipsw): return regs->ipsw; 448 case RI(cr27): return regs->cr27; 449 case RI(cr0): return mfctl(0); 450 case RI(cr24): return mfctl(24); 451 case RI(cr25): return mfctl(25); 452 case RI(cr26): return mfctl(26); 453 case RI(cr28): return mfctl(28); 454 case RI(cr29): return mfctl(29); 455 case RI(cr30): return mfctl(30); 456 case RI(cr31): return mfctl(31); 457 case RI(cr8): return mfctl(8); 458 case RI(cr9): return mfctl(9); 459 case RI(cr12): return mfctl(12); 460 case RI(cr13): return mfctl(13); 461 case RI(cr10): return mfctl(10); 462 case RI(cr15): return mfctl(15); 463 default: return 0; 464 } 465 } 466 467 static void set_reg(struct pt_regs *regs, int num, unsigned long val) 468 { 469 switch (num) { 470 case RI(gr[0]): /* 471 * PSW is in gr[0]. 472 * Allow writing to Nullify, Divide-step-correction, 473 * and carry/borrow bits. 474 * BEWARE, if you set N, and then single step, it won't 475 * stop on the nullified instruction. 476 */ 477 val &= USER_PSW_BITS; 478 regs->gr[0] &= ~USER_PSW_BITS; 479 regs->gr[0] |= val; 480 return; 481 case RI(gr[1]) ... RI(gr[31]): 482 regs->gr[num - RI(gr[0])] = val; 483 return; 484 case RI(iaoq[0]): 485 case RI(iaoq[1]): 486 regs->iaoq[num - RI(iaoq[0])] = val; 487 return; 488 case RI(sar): regs->sar = val; 489 return; 490 default: return; 491 #if 0 492 /* do not allow to change any of the following registers (yet) */ 493 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])]; 494 case RI(iasq[0]): return regs->iasq[0]; 495 case RI(iasq[1]): return regs->iasq[1]; 496 case RI(iir): return regs->iir; 497 case RI(isr): return regs->isr; 498 case RI(ior): return regs->ior; 499 case RI(ipsw): return regs->ipsw; 500 case RI(cr27): return regs->cr27; 501 case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31; 502 case cr8, cr9, cr12, cr13, cr10, cr15; 503 #endif 504 } 505 } 506 507 static int gpr_get(struct task_struct *target, 508 const struct user_regset *regset, 509 unsigned int pos, unsigned int count, 510 void *kbuf, void __user *ubuf) 511 { 512 struct pt_regs *regs = task_regs(target); 513 unsigned long *k = kbuf; 514 unsigned long __user *u = ubuf; 515 unsigned long reg; 516 517 pos /= sizeof(reg); 518 count /= sizeof(reg); 519 520 if (kbuf) 521 for (; count > 0 && pos < ELF_NGREG; --count) 522 *k++ = get_reg(regs, pos++); 523 else 524 for (; count > 0 && pos < ELF_NGREG; --count) 525 if (__put_user(get_reg(regs, pos++), u++)) 526 return -EFAULT; 527 kbuf = k; 528 ubuf = u; 529 pos *= sizeof(reg); 530 count *= sizeof(reg); 531 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 532 ELF_NGREG * sizeof(reg), -1); 533 } 534 535 static int gpr_set(struct task_struct *target, 536 const struct user_regset *regset, 537 unsigned int pos, unsigned int count, 538 const void *kbuf, const void __user *ubuf) 539 { 540 struct pt_regs *regs = task_regs(target); 541 const unsigned long *k = kbuf; 542 const unsigned long __user *u = ubuf; 543 unsigned long reg; 544 545 pos /= sizeof(reg); 546 count /= sizeof(reg); 547 548 if (kbuf) 549 for (; count > 0 && pos < ELF_NGREG; --count) 550 set_reg(regs, pos++, *k++); 551 else 552 for (; count > 0 && pos < ELF_NGREG; --count) { 553 if (__get_user(reg, u++)) 554 return -EFAULT; 555 set_reg(regs, pos++, reg); 556 } 557 558 kbuf = k; 559 ubuf = u; 560 pos *= sizeof(reg); 561 count *= sizeof(reg); 562 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 563 ELF_NGREG * sizeof(reg), -1); 564 } 565 566 static const struct user_regset native_regsets[] = { 567 [REGSET_GENERAL] = { 568 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 569 .size = sizeof(long), .align = sizeof(long), 570 .get = gpr_get, .set = gpr_set 571 }, 572 [REGSET_FP] = { 573 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 574 .size = sizeof(__u64), .align = sizeof(__u64), 575 .get = fpr_get, .set = fpr_set 576 } 577 }; 578 579 static const struct user_regset_view user_parisc_native_view = { 580 .name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX, 581 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 582 }; 583 584 #ifdef CONFIG_64BIT 585 #include <linux/compat.h> 586 587 static int gpr32_get(struct task_struct *target, 588 const struct user_regset *regset, 589 unsigned int pos, unsigned int count, 590 void *kbuf, void __user *ubuf) 591 { 592 struct pt_regs *regs = task_regs(target); 593 compat_ulong_t *k = kbuf; 594 compat_ulong_t __user *u = ubuf; 595 compat_ulong_t reg; 596 597 pos /= sizeof(reg); 598 count /= sizeof(reg); 599 600 if (kbuf) 601 for (; count > 0 && pos < ELF_NGREG; --count) 602 *k++ = get_reg(regs, pos++); 603 else 604 for (; count > 0 && pos < ELF_NGREG; --count) 605 if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++)) 606 return -EFAULT; 607 608 kbuf = k; 609 ubuf = u; 610 pos *= sizeof(reg); 611 count *= sizeof(reg); 612 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, 613 ELF_NGREG * sizeof(reg), -1); 614 } 615 616 static int gpr32_set(struct task_struct *target, 617 const struct user_regset *regset, 618 unsigned int pos, unsigned int count, 619 const void *kbuf, const void __user *ubuf) 620 { 621 struct pt_regs *regs = task_regs(target); 622 const compat_ulong_t *k = kbuf; 623 const compat_ulong_t __user *u = ubuf; 624 compat_ulong_t reg; 625 626 pos /= sizeof(reg); 627 count /= sizeof(reg); 628 629 if (kbuf) 630 for (; count > 0 && pos < ELF_NGREG; --count) 631 set_reg(regs, pos++, *k++); 632 else 633 for (; count > 0 && pos < ELF_NGREG; --count) { 634 if (__get_user(reg, u++)) 635 return -EFAULT; 636 set_reg(regs, pos++, reg); 637 } 638 639 kbuf = k; 640 ubuf = u; 641 pos *= sizeof(reg); 642 count *= sizeof(reg); 643 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 644 ELF_NGREG * sizeof(reg), -1); 645 } 646 647 /* 648 * These are the regset flavors matching the 32bit native set. 649 */ 650 static const struct user_regset compat_regsets[] = { 651 [REGSET_GENERAL] = { 652 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, 653 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t), 654 .get = gpr32_get, .set = gpr32_set 655 }, 656 [REGSET_FP] = { 657 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, 658 .size = sizeof(__u64), .align = sizeof(__u64), 659 .get = fpr_get, .set = fpr_set 660 } 661 }; 662 663 static const struct user_regset_view user_parisc_compat_view = { 664 .name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX, 665 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets) 666 }; 667 #endif /* CONFIG_64BIT */ 668 669 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 670 { 671 BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG); 672 BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG); 673 #ifdef CONFIG_64BIT 674 if (is_compat_task()) 675 return &user_parisc_compat_view; 676 #endif 677 return &user_parisc_native_view; 678 } 679 680 681 /* HAVE_REGS_AND_STACK_ACCESS_API feature */ 682 683 struct pt_regs_offset { 684 const char *name; 685 int offset; 686 }; 687 688 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} 689 #define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])} 690 #define REG_OFFSET_END {.name = NULL, .offset = 0} 691 692 static const struct pt_regs_offset regoffset_table[] = { 693 REG_OFFSET_INDEX(gr,0), 694 REG_OFFSET_INDEX(gr,1), 695 REG_OFFSET_INDEX(gr,2), 696 REG_OFFSET_INDEX(gr,3), 697 REG_OFFSET_INDEX(gr,4), 698 REG_OFFSET_INDEX(gr,5), 699 REG_OFFSET_INDEX(gr,6), 700 REG_OFFSET_INDEX(gr,7), 701 REG_OFFSET_INDEX(gr,8), 702 REG_OFFSET_INDEX(gr,9), 703 REG_OFFSET_INDEX(gr,10), 704 REG_OFFSET_INDEX(gr,11), 705 REG_OFFSET_INDEX(gr,12), 706 REG_OFFSET_INDEX(gr,13), 707 REG_OFFSET_INDEX(gr,14), 708 REG_OFFSET_INDEX(gr,15), 709 REG_OFFSET_INDEX(gr,16), 710 REG_OFFSET_INDEX(gr,17), 711 REG_OFFSET_INDEX(gr,18), 712 REG_OFFSET_INDEX(gr,19), 713 REG_OFFSET_INDEX(gr,20), 714 REG_OFFSET_INDEX(gr,21), 715 REG_OFFSET_INDEX(gr,22), 716 REG_OFFSET_INDEX(gr,23), 717 REG_OFFSET_INDEX(gr,24), 718 REG_OFFSET_INDEX(gr,25), 719 REG_OFFSET_INDEX(gr,26), 720 REG_OFFSET_INDEX(gr,27), 721 REG_OFFSET_INDEX(gr,28), 722 REG_OFFSET_INDEX(gr,29), 723 REG_OFFSET_INDEX(gr,30), 724 REG_OFFSET_INDEX(gr,31), 725 REG_OFFSET_INDEX(sr,0), 726 REG_OFFSET_INDEX(sr,1), 727 REG_OFFSET_INDEX(sr,2), 728 REG_OFFSET_INDEX(sr,3), 729 REG_OFFSET_INDEX(sr,4), 730 REG_OFFSET_INDEX(sr,5), 731 REG_OFFSET_INDEX(sr,6), 732 REG_OFFSET_INDEX(sr,7), 733 REG_OFFSET_INDEX(iasq,0), 734 REG_OFFSET_INDEX(iasq,1), 735 REG_OFFSET_INDEX(iaoq,0), 736 REG_OFFSET_INDEX(iaoq,1), 737 REG_OFFSET_NAME(cr27), 738 REG_OFFSET_NAME(ksp), 739 REG_OFFSET_NAME(kpc), 740 REG_OFFSET_NAME(sar), 741 REG_OFFSET_NAME(iir), 742 REG_OFFSET_NAME(isr), 743 REG_OFFSET_NAME(ior), 744 REG_OFFSET_NAME(ipsw), 745 REG_OFFSET_END, 746 }; 747 748 /** 749 * regs_query_register_offset() - query register offset from its name 750 * @name: the name of a register 751 * 752 * regs_query_register_offset() returns the offset of a register in struct 753 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 754 */ 755 int regs_query_register_offset(const char *name) 756 { 757 const struct pt_regs_offset *roff; 758 for (roff = regoffset_table; roff->name != NULL; roff++) 759 if (!strcmp(roff->name, name)) 760 return roff->offset; 761 return -EINVAL; 762 } 763 764 /** 765 * regs_query_register_name() - query register name from its offset 766 * @offset: the offset of a register in struct pt_regs. 767 * 768 * regs_query_register_name() returns the name of a register from its 769 * offset in struct pt_regs. If the @offset is invalid, this returns NULL; 770 */ 771 const char *regs_query_register_name(unsigned int offset) 772 { 773 const struct pt_regs_offset *roff; 774 for (roff = regoffset_table; roff->name != NULL; roff++) 775 if (roff->offset == offset) 776 return roff->name; 777 return NULL; 778 } 779