1 /* 2 * Kernel support for the ptrace() and syscall tracing interfaces. 3 * 4 * Copyright (C) 1999-2005 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Copyright (C) 2006 Intel Co 7 * 2006-08-12 - IA64 Native Utrace implementation support added by 8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 9 * 10 * Derived from the x86 and Alpha versions. 11 */ 12 #include <linux/kernel.h> 13 #include <linux/sched.h> 14 #include <linux/sched/task.h> 15 #include <linux/sched/task_stack.h> 16 #include <linux/mm.h> 17 #include <linux/errno.h> 18 #include <linux/ptrace.h> 19 #include <linux/user.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/signal.h> 23 #include <linux/regset.h> 24 #include <linux/elf.h> 25 #include <linux/tracehook.h> 26 27 #include <asm/pgtable.h> 28 #include <asm/processor.h> 29 #include <asm/ptrace_offsets.h> 30 #include <asm/rse.h> 31 #include <linux/uaccess.h> 32 #include <asm/unwind.h> 33 #ifdef CONFIG_PERFMON 34 #include <asm/perfmon.h> 35 #endif 36 37 #include "entry.h" 38 39 /* 40 * Bits in the PSR that we allow ptrace() to change: 41 * be, up, ac, mfl, mfh (the user mask; five bits total) 42 * db (debug breakpoint fault; one bit) 43 * id (instruction debug fault disable; one bit) 44 * dd (data debug fault disable; one bit) 45 * ri (restart instruction; two bits) 46 * is (instruction set; one bit) 47 */ 48 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ 49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) 50 51 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ 52 #define PFM_MASK MASK(38) 53 54 #define PTRACE_DEBUG 0 55 56 #if PTRACE_DEBUG 57 # define dprintk(format...) printk(format) 58 # define inline 59 #else 60 # define dprintk(format...) 61 #endif 62 63 /* Return TRUE if PT was created due to kernel-entry via a system-call. */ 64 65 static inline int 66 in_syscall (struct pt_regs *pt) 67 { 68 return (long) pt->cr_ifs >= 0; 69 } 70 71 /* 72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT 73 * bitset where bit i is set iff the NaT bit of register i is set. 74 */ 75 unsigned long 76 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) 77 { 78 # define GET_BITS(first, last, unat) \ 79 ({ \ 80 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 81 unsigned long nbits = (last - first + 1); \ 82 unsigned long mask = MASK(nbits) << first; \ 83 unsigned long dist; \ 84 if (bit < first) \ 85 dist = 64 + bit - first; \ 86 else \ 87 dist = bit - first; \ 88 ia64_rotr(unat, dist) & mask; \ 89 }) 90 unsigned long val; 91 92 /* 93 * Registers that are stored consecutively in struct pt_regs 94 * can be handled in parallel. If the register order in 95 * struct_pt_regs changes, this code MUST be updated. 96 */ 97 val = GET_BITS( 1, 1, scratch_unat); 98 val |= GET_BITS( 2, 3, scratch_unat); 99 val |= GET_BITS(12, 13, scratch_unat); 100 val |= GET_BITS(14, 14, scratch_unat); 101 val |= GET_BITS(15, 15, scratch_unat); 102 val |= GET_BITS( 8, 11, scratch_unat); 103 val |= GET_BITS(16, 31, scratch_unat); 104 return val; 105 106 # undef GET_BITS 107 } 108 109 /* 110 * Set the NaT bits for the scratch registers according to NAT and 111 * return the resulting unat (assuming the scratch registers are 112 * stored in PT). 113 */ 114 unsigned long 115 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) 116 { 117 # define PUT_BITS(first, last, nat) \ 118 ({ \ 119 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 120 unsigned long nbits = (last - first + 1); \ 121 unsigned long mask = MASK(nbits) << first; \ 122 long dist; \ 123 if (bit < first) \ 124 dist = 64 + bit - first; \ 125 else \ 126 dist = bit - first; \ 127 ia64_rotl(nat & mask, dist); \ 128 }) 129 unsigned long scratch_unat; 130 131 /* 132 * Registers that are stored consecutively in struct pt_regs 133 * can be handled in parallel. If the register order in 134 * struct_pt_regs changes, this code MUST be updated. 135 */ 136 scratch_unat = PUT_BITS( 1, 1, nat); 137 scratch_unat |= PUT_BITS( 2, 3, nat); 138 scratch_unat |= PUT_BITS(12, 13, nat); 139 scratch_unat |= PUT_BITS(14, 14, nat); 140 scratch_unat |= PUT_BITS(15, 15, nat); 141 scratch_unat |= PUT_BITS( 8, 11, nat); 142 scratch_unat |= PUT_BITS(16, 31, nat); 143 144 return scratch_unat; 145 146 # undef PUT_BITS 147 } 148 149 #define IA64_MLX_TEMPLATE 0x2 150 #define IA64_MOVL_OPCODE 6 151 152 void 153 ia64_increment_ip (struct pt_regs *regs) 154 { 155 unsigned long w0, ri = ia64_psr(regs)->ri + 1; 156 157 if (ri > 2) { 158 ri = 0; 159 regs->cr_iip += 16; 160 } else if (ri == 2) { 161 get_user(w0, (char __user *) regs->cr_iip + 0); 162 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 163 /* 164 * rfi'ing to slot 2 of an MLX bundle causes 165 * an illegal operation fault. We don't want 166 * that to happen... 167 */ 168 ri = 0; 169 regs->cr_iip += 16; 170 } 171 } 172 ia64_psr(regs)->ri = ri; 173 } 174 175 void 176 ia64_decrement_ip (struct pt_regs *regs) 177 { 178 unsigned long w0, ri = ia64_psr(regs)->ri - 1; 179 180 if (ia64_psr(regs)->ri == 0) { 181 regs->cr_iip -= 16; 182 ri = 2; 183 get_user(w0, (char __user *) regs->cr_iip + 0); 184 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 185 /* 186 * rfi'ing to slot 2 of an MLX bundle causes 187 * an illegal operation fault. We don't want 188 * that to happen... 189 */ 190 ri = 1; 191 } 192 } 193 ia64_psr(regs)->ri = ri; 194 } 195 196 /* 197 * This routine is used to read an rnat bits that are stored on the 198 * kernel backing store. Since, in general, the alignment of the user 199 * and kernel are different, this is not completely trivial. In 200 * essence, we need to construct the user RNAT based on up to two 201 * kernel RNAT values and/or the RNAT value saved in the child's 202 * pt_regs. 203 * 204 * user rbs 205 * 206 * +--------+ <-- lowest address 207 * | slot62 | 208 * +--------+ 209 * | rnat | 0x....1f8 210 * +--------+ 211 * | slot00 | \ 212 * +--------+ | 213 * | slot01 | > child_regs->ar_rnat 214 * +--------+ | 215 * | slot02 | / kernel rbs 216 * +--------+ +--------+ 217 * <- child_regs->ar_bspstore | slot61 | <-- krbs 218 * +- - - - + +--------+ 219 * | slot62 | 220 * +- - - - + +--------+ 221 * | rnat | 222 * +- - - - + +--------+ 223 * vrnat | slot00 | 224 * +- - - - + +--------+ 225 * = = 226 * +--------+ 227 * | slot00 | \ 228 * +--------+ | 229 * | slot01 | > child_stack->ar_rnat 230 * +--------+ | 231 * | slot02 | / 232 * +--------+ 233 * <--- child_stack->ar_bspstore 234 * 235 * The way to think of this code is as follows: bit 0 in the user rnat 236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat 237 * value. The kernel rnat value holding this bit is stored in 238 * variable rnat0. rnat1 is loaded with the kernel rnat value that 239 * form the upper bits of the user rnat value. 240 * 241 * Boundary cases: 242 * 243 * o when reading the rnat "below" the first rnat slot on the kernel 244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are 245 * merged in from pt->ar_rnat. 246 * 247 * o when reading the rnat "above" the last rnat slot on the kernel 248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. 249 */ 250 static unsigned long 251 get_rnat (struct task_struct *task, struct switch_stack *sw, 252 unsigned long *krbs, unsigned long *urnat_addr, 253 unsigned long *urbs_end) 254 { 255 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; 256 unsigned long umask = 0, mask, m; 257 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 258 long num_regs, nbits; 259 struct pt_regs *pt; 260 261 pt = task_pt_regs(task); 262 kbsp = (unsigned long *) sw->ar_bspstore; 263 ubspstore = (unsigned long *) pt->ar_bspstore; 264 265 if (urbs_end < urnat_addr) 266 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); 267 else 268 nbits = 63; 269 mask = MASK(nbits); 270 /* 271 * First, figure out which bit number slot 0 in user-land maps 272 * to in the kernel rnat. Do this by figuring out how many 273 * register slots we're beyond the user's backingstore and 274 * then computing the equivalent address in kernel space. 275 */ 276 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 277 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 278 shift = ia64_rse_slot_num(slot0_kaddr); 279 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 280 rnat0_kaddr = rnat1_kaddr - 64; 281 282 if (ubspstore + 63 > urnat_addr) { 283 /* some bits need to be merged in from pt->ar_rnat */ 284 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 285 urnat = (pt->ar_rnat & umask); 286 mask &= ~umask; 287 if (!mask) 288 return urnat; 289 } 290 291 m = mask << shift; 292 if (rnat0_kaddr >= kbsp) 293 rnat0 = sw->ar_rnat; 294 else if (rnat0_kaddr > krbs) 295 rnat0 = *rnat0_kaddr; 296 urnat |= (rnat0 & m) >> shift; 297 298 m = mask >> (63 - shift); 299 if (rnat1_kaddr >= kbsp) 300 rnat1 = sw->ar_rnat; 301 else if (rnat1_kaddr > krbs) 302 rnat1 = *rnat1_kaddr; 303 urnat |= (rnat1 & m) << (63 - shift); 304 return urnat; 305 } 306 307 /* 308 * The reverse of get_rnat. 309 */ 310 static void 311 put_rnat (struct task_struct *task, struct switch_stack *sw, 312 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, 313 unsigned long *urbs_end) 314 { 315 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; 316 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 317 long num_regs, nbits; 318 struct pt_regs *pt; 319 unsigned long cfm, *urbs_kargs; 320 321 pt = task_pt_regs(task); 322 kbsp = (unsigned long *) sw->ar_bspstore; 323 ubspstore = (unsigned long *) pt->ar_bspstore; 324 325 urbs_kargs = urbs_end; 326 if (in_syscall(pt)) { 327 /* 328 * If entered via syscall, don't allow user to set rnat bits 329 * for syscall args. 330 */ 331 cfm = pt->cr_ifs; 332 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); 333 } 334 335 if (urbs_kargs >= urnat_addr) 336 nbits = 63; 337 else { 338 if ((urnat_addr - 63) >= urbs_kargs) 339 return; 340 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); 341 } 342 mask = MASK(nbits); 343 344 /* 345 * First, figure out which bit number slot 0 in user-land maps 346 * to in the kernel rnat. Do this by figuring out how many 347 * register slots we're beyond the user's backingstore and 348 * then computing the equivalent address in kernel space. 349 */ 350 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 351 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 352 shift = ia64_rse_slot_num(slot0_kaddr); 353 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 354 rnat0_kaddr = rnat1_kaddr - 64; 355 356 if (ubspstore + 63 > urnat_addr) { 357 /* some bits need to be place in pt->ar_rnat: */ 358 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 359 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); 360 mask &= ~umask; 361 if (!mask) 362 return; 363 } 364 /* 365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an 366 * rnat slot is ignored. so we don't have to clear it here. 367 */ 368 rnat0 = (urnat << shift); 369 m = mask << shift; 370 if (rnat0_kaddr >= kbsp) 371 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); 372 else if (rnat0_kaddr > krbs) 373 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); 374 375 rnat1 = (urnat >> (63 - shift)); 376 m = mask >> (63 - shift); 377 if (rnat1_kaddr >= kbsp) 378 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); 379 else if (rnat1_kaddr > krbs) 380 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); 381 } 382 383 static inline int 384 on_kernel_rbs (unsigned long addr, unsigned long bspstore, 385 unsigned long urbs_end) 386 { 387 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) 388 urbs_end); 389 return (addr >= bspstore && addr <= (unsigned long) rnat_addr); 390 } 391 392 /* 393 * Read a word from the user-level backing store of task CHILD. ADDR 394 * is the user-level address to read the word from, VAL a pointer to 395 * the return value, and USER_BSP gives the end of the user-level 396 * backing store (i.e., it's the address that would be in ar.bsp after 397 * the user executed a "cover" instruction). 398 * 399 * This routine takes care of accessing the kernel register backing 400 * store for those registers that got spilled there. It also takes 401 * care of calculating the appropriate RNaT collection words. 402 */ 403 long 404 ia64_peek (struct task_struct *child, struct switch_stack *child_stack, 405 unsigned long user_rbs_end, unsigned long addr, long *val) 406 { 407 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; 408 struct pt_regs *child_regs; 409 size_t copied; 410 long ret; 411 412 urbs_end = (long *) user_rbs_end; 413 laddr = (unsigned long *) addr; 414 child_regs = task_pt_regs(child); 415 bspstore = (unsigned long *) child_regs->ar_bspstore; 416 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 417 if (on_kernel_rbs(addr, (unsigned long) bspstore, 418 (unsigned long) urbs_end)) 419 { 420 /* 421 * Attempt to read the RBS in an area that's actually 422 * on the kernel RBS => read the corresponding bits in 423 * the kernel RBS. 424 */ 425 rnat_addr = ia64_rse_rnat_addr(laddr); 426 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); 427 428 if (laddr == rnat_addr) { 429 /* return NaT collection word itself */ 430 *val = ret; 431 return 0; 432 } 433 434 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { 435 /* 436 * It is implementation dependent whether the 437 * data portion of a NaT value gets saved on a 438 * st8.spill or RSE spill (e.g., see EAS 2.6, 439 * 4.4.4.6 Register Spill and Fill). To get 440 * consistent behavior across all possible 441 * IA-64 implementations, we return zero in 442 * this case. 443 */ 444 *val = 0; 445 return 0; 446 } 447 448 if (laddr < urbs_end) { 449 /* 450 * The desired word is on the kernel RBS and 451 * is not a NaT. 452 */ 453 regnum = ia64_rse_num_regs(bspstore, laddr); 454 *val = *ia64_rse_skip_regs(krbs, regnum); 455 return 0; 456 } 457 } 458 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE); 459 if (copied != sizeof(ret)) 460 return -EIO; 461 *val = ret; 462 return 0; 463 } 464 465 long 466 ia64_poke (struct task_struct *child, struct switch_stack *child_stack, 467 unsigned long user_rbs_end, unsigned long addr, long val) 468 { 469 unsigned long *bspstore, *krbs, regnum, *laddr; 470 unsigned long *urbs_end = (long *) user_rbs_end; 471 struct pt_regs *child_regs; 472 473 laddr = (unsigned long *) addr; 474 child_regs = task_pt_regs(child); 475 bspstore = (unsigned long *) child_regs->ar_bspstore; 476 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 477 if (on_kernel_rbs(addr, (unsigned long) bspstore, 478 (unsigned long) urbs_end)) 479 { 480 /* 481 * Attempt to write the RBS in an area that's actually 482 * on the kernel RBS => write the corresponding bits 483 * in the kernel RBS. 484 */ 485 if (ia64_rse_is_rnat_slot(laddr)) 486 put_rnat(child, child_stack, krbs, laddr, val, 487 urbs_end); 488 else { 489 if (laddr < urbs_end) { 490 regnum = ia64_rse_num_regs(bspstore, laddr); 491 *ia64_rse_skip_regs(krbs, regnum) = val; 492 } 493 } 494 } else if (access_process_vm(child, addr, &val, sizeof(val), 495 FOLL_FORCE | FOLL_WRITE) 496 != sizeof(val)) 497 return -EIO; 498 return 0; 499 } 500 501 /* 502 * Calculate the address of the end of the user-level register backing 503 * store. This is the address that would have been stored in ar.bsp 504 * if the user had executed a "cover" instruction right before 505 * entering the kernel. If CFMP is not NULL, it is used to return the 506 * "current frame mask" that was active at the time the kernel was 507 * entered. 508 */ 509 unsigned long 510 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, 511 unsigned long *cfmp) 512 { 513 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; 514 long ndirty; 515 516 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 517 bspstore = (unsigned long *) pt->ar_bspstore; 518 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 519 520 if (in_syscall(pt)) 521 ndirty += (cfm & 0x7f); 522 else 523 cfm &= ~(1UL << 63); /* clear valid bit */ 524 525 if (cfmp) 526 *cfmp = cfm; 527 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); 528 } 529 530 /* 531 * Synchronize (i.e, write) the RSE backing store living in kernel 532 * space to the VM of the CHILD task. SW and PT are the pointers to 533 * the switch_stack and pt_regs structures, respectively. 534 * USER_RBS_END is the user-level address at which the backing store 535 * ends. 536 */ 537 long 538 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, 539 unsigned long user_rbs_start, unsigned long user_rbs_end) 540 { 541 unsigned long addr, val; 542 long ret; 543 544 /* now copy word for word from kernel rbs to user rbs: */ 545 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 546 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 547 if (ret < 0) 548 return ret; 549 if (access_process_vm(child, addr, &val, sizeof(val), 550 FOLL_FORCE | FOLL_WRITE) 551 != sizeof(val)) 552 return -EIO; 553 } 554 return 0; 555 } 556 557 static long 558 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, 559 unsigned long user_rbs_start, unsigned long user_rbs_end) 560 { 561 unsigned long addr, val; 562 long ret; 563 564 /* now copy word for word from user rbs to kernel rbs: */ 565 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 566 if (access_process_vm(child, addr, &val, sizeof(val), 567 FOLL_FORCE) 568 != sizeof(val)) 569 return -EIO; 570 571 ret = ia64_poke(child, sw, user_rbs_end, addr, val); 572 if (ret < 0) 573 return ret; 574 } 575 return 0; 576 } 577 578 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, 579 unsigned long, unsigned long); 580 581 static void do_sync_rbs(struct unw_frame_info *info, void *arg) 582 { 583 struct pt_regs *pt; 584 unsigned long urbs_end; 585 syncfunc_t fn = arg; 586 587 if (unw_unwind_to_user(info) < 0) 588 return; 589 pt = task_pt_regs(info->task); 590 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); 591 592 fn(info->task, info->sw, pt->ar_bspstore, urbs_end); 593 } 594 595 /* 596 * when a thread is stopped (ptraced), debugger might change thread's user 597 * stack (change memory directly), and we must avoid the RSE stored in kernel 598 * to override user stack (user space's RSE is newer than kernel's in the 599 * case). To workaround the issue, we copy kernel RSE to user RSE before the 600 * task is stopped, so user RSE has updated data. we then copy user RSE to 601 * kernel after the task is resummed from traced stop and kernel will use the 602 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need 603 * synchronize user RSE to kernel. 604 */ 605 void ia64_ptrace_stop(void) 606 { 607 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 608 return; 609 set_notify_resume(current); 610 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 611 } 612 613 /* 614 * This is called to read back the register backing store. 615 */ 616 void ia64_sync_krbs(void) 617 { 618 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 619 620 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 621 } 622 623 /* 624 * After PTRACE_ATTACH, a thread's register backing store area in user 625 * space is assumed to contain correct data whenever the thread is 626 * stopped. arch_ptrace_stop takes care of this on tracing stops. 627 * But if the child was already stopped for job control when we attach 628 * to it, then it might not ever get into ptrace_stop by the time we 629 * want to examine the user memory containing the RBS. 630 */ 631 void 632 ptrace_attach_sync_user_rbs (struct task_struct *child) 633 { 634 int stopped = 0; 635 struct unw_frame_info info; 636 637 /* 638 * If the child is in TASK_STOPPED, we need to change that to 639 * TASK_TRACED momentarily while we operate on it. This ensures 640 * that the child won't be woken up and return to user mode while 641 * we are doing the sync. (It can only be woken up for SIGKILL.) 642 */ 643 644 read_lock(&tasklist_lock); 645 if (child->sighand) { 646 spin_lock_irq(&child->sighand->siglock); 647 if (child->state == TASK_STOPPED && 648 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 649 set_notify_resume(child); 650 651 child->state = TASK_TRACED; 652 stopped = 1; 653 } 654 spin_unlock_irq(&child->sighand->siglock); 655 } 656 read_unlock(&tasklist_lock); 657 658 if (!stopped) 659 return; 660 661 unw_init_from_blocked_task(&info, child); 662 do_sync_rbs(&info, ia64_sync_user_rbs); 663 664 /* 665 * Now move the child back into TASK_STOPPED if it should be in a 666 * job control stop, so that SIGCONT can be used to wake it up. 667 */ 668 read_lock(&tasklist_lock); 669 if (child->sighand) { 670 spin_lock_irq(&child->sighand->siglock); 671 if (child->state == TASK_TRACED && 672 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 673 child->state = TASK_STOPPED; 674 } 675 spin_unlock_irq(&child->sighand->siglock); 676 } 677 read_unlock(&tasklist_lock); 678 } 679 680 /* 681 * Write f32-f127 back to task->thread.fph if it has been modified. 682 */ 683 inline void 684 ia64_flush_fph (struct task_struct *task) 685 { 686 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 687 688 /* 689 * Prevent migrating this task while 690 * we're fiddling with the FPU state 691 */ 692 preempt_disable(); 693 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 694 psr->mfh = 0; 695 task->thread.flags |= IA64_THREAD_FPH_VALID; 696 ia64_save_fpu(&task->thread.fph[0]); 697 } 698 preempt_enable(); 699 } 700 701 /* 702 * Sync the fph state of the task so that it can be manipulated 703 * through thread.fph. If necessary, f32-f127 are written back to 704 * thread.fph or, if the fph state hasn't been used before, thread.fph 705 * is cleared to zeroes. Also, access to f32-f127 is disabled to 706 * ensure that the task picks up the state from thread.fph when it 707 * executes again. 708 */ 709 void 710 ia64_sync_fph (struct task_struct *task) 711 { 712 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 713 714 ia64_flush_fph(task); 715 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 716 task->thread.flags |= IA64_THREAD_FPH_VALID; 717 memset(&task->thread.fph, 0, sizeof(task->thread.fph)); 718 } 719 ia64_drop_fpu(task); 720 psr->dfh = 1; 721 } 722 723 /* 724 * Change the machine-state of CHILD such that it will return via the normal 725 * kernel exit-path, rather than the syscall-exit path. 726 */ 727 static void 728 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, 729 unsigned long cfm) 730 { 731 struct unw_frame_info info, prev_info; 732 unsigned long ip, sp, pr; 733 734 unw_init_from_blocked_task(&info, child); 735 while (1) { 736 prev_info = info; 737 if (unw_unwind(&info) < 0) 738 return; 739 740 unw_get_sp(&info, &sp); 741 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 742 < IA64_PT_REGS_SIZE) { 743 dprintk("ptrace.%s: ran off the top of the kernel " 744 "stack\n", __func__); 745 return; 746 } 747 if (unw_get_pr (&prev_info, &pr) < 0) { 748 unw_get_rp(&prev_info, &ip); 749 dprintk("ptrace.%s: failed to read " 750 "predicate register (ip=0x%lx)\n", 751 __func__, ip); 752 return; 753 } 754 if (unw_is_intr_frame(&info) 755 && (pr & (1UL << PRED_USER_STACK))) 756 break; 757 } 758 759 /* 760 * Note: at the time of this call, the target task is blocked 761 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL 762 * (aka, "pLvSys") we redirect execution from 763 * .work_pending_syscall_end to .work_processed_kernel. 764 */ 765 unw_get_pr(&prev_info, &pr); 766 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); 767 pr |= (1UL << PRED_NON_SYSCALL); 768 unw_set_pr(&prev_info, pr); 769 770 pt->cr_ifs = (1UL << 63) | cfm; 771 /* 772 * Clear the memory that is NOT written on syscall-entry to 773 * ensure we do not leak kernel-state to user when execution 774 * resumes. 775 */ 776 pt->r2 = 0; 777 pt->r3 = 0; 778 pt->r14 = 0; 779 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ 780 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ 781 pt->b7 = 0; 782 pt->ar_ccv = 0; 783 pt->ar_csd = 0; 784 pt->ar_ssd = 0; 785 } 786 787 static int 788 access_nat_bits (struct task_struct *child, struct pt_regs *pt, 789 struct unw_frame_info *info, 790 unsigned long *data, int write_access) 791 { 792 unsigned long regnum, nat_bits, scratch_unat, dummy = 0; 793 char nat = 0; 794 795 if (write_access) { 796 nat_bits = *data; 797 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); 798 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { 799 dprintk("ptrace: failed to set ar.unat\n"); 800 return -1; 801 } 802 for (regnum = 4; regnum <= 7; ++regnum) { 803 unw_get_gr(info, regnum, &dummy, &nat); 804 unw_set_gr(info, regnum, dummy, 805 (nat_bits >> regnum) & 1); 806 } 807 } else { 808 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { 809 dprintk("ptrace: failed to read ar.unat\n"); 810 return -1; 811 } 812 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); 813 for (regnum = 4; regnum <= 7; ++regnum) { 814 unw_get_gr(info, regnum, &dummy, &nat); 815 nat_bits |= (nat != 0) << regnum; 816 } 817 *data = nat_bits; 818 } 819 return 0; 820 } 821 822 static int 823 access_uarea (struct task_struct *child, unsigned long addr, 824 unsigned long *data, int write_access); 825 826 static long 827 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 828 { 829 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; 830 struct unw_frame_info info; 831 struct ia64_fpreg fpval; 832 struct switch_stack *sw; 833 struct pt_regs *pt; 834 long ret, retval = 0; 835 char nat = 0; 836 int i; 837 838 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 839 return -EIO; 840 841 pt = task_pt_regs(child); 842 sw = (struct switch_stack *) (child->thread.ksp + 16); 843 unw_init_from_blocked_task(&info, child); 844 if (unw_unwind_to_user(&info) < 0) { 845 return -EIO; 846 } 847 848 if (((unsigned long) ppr & 0x7) != 0) { 849 dprintk("ptrace:unaligned register address %p\n", ppr); 850 return -EIO; 851 } 852 853 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 854 || access_uarea(child, PT_AR_EC, &ec, 0) < 0 855 || access_uarea(child, PT_AR_LC, &lc, 0) < 0 856 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 857 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 858 || access_uarea(child, PT_CFM, &cfm, 0) 859 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) 860 return -EIO; 861 862 /* control regs */ 863 864 retval |= __put_user(pt->cr_iip, &ppr->cr_iip); 865 retval |= __put_user(psr, &ppr->cr_ipsr); 866 867 /* app regs */ 868 869 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 870 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); 871 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 872 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 873 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 874 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 875 876 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); 877 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); 878 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); 879 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); 880 retval |= __put_user(cfm, &ppr->cfm); 881 882 /* gr1-gr3 */ 883 884 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); 885 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); 886 887 /* gr4-gr7 */ 888 889 for (i = 4; i < 8; i++) { 890 if (unw_access_gr(&info, i, &val, &nat, 0) < 0) 891 return -EIO; 892 retval |= __put_user(val, &ppr->gr[i]); 893 } 894 895 /* gr8-gr11 */ 896 897 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); 898 899 /* gr12-gr15 */ 900 901 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); 902 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); 903 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); 904 905 /* gr16-gr31 */ 906 907 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); 908 909 /* b0 */ 910 911 retval |= __put_user(pt->b0, &ppr->br[0]); 912 913 /* b1-b5 */ 914 915 for (i = 1; i < 6; i++) { 916 if (unw_access_br(&info, i, &val, 0) < 0) 917 return -EIO; 918 __put_user(val, &ppr->br[i]); 919 } 920 921 /* b6-b7 */ 922 923 retval |= __put_user(pt->b6, &ppr->br[6]); 924 retval |= __put_user(pt->b7, &ppr->br[7]); 925 926 /* fr2-fr5 */ 927 928 for (i = 2; i < 6; i++) { 929 if (unw_get_fr(&info, i, &fpval) < 0) 930 return -EIO; 931 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 932 } 933 934 /* fr6-fr11 */ 935 936 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, 937 sizeof(struct ia64_fpreg) * 6); 938 939 /* fp scratch regs(12-15) */ 940 941 retval |= __copy_to_user(&ppr->fr[12], &sw->f12, 942 sizeof(struct ia64_fpreg) * 4); 943 944 /* fr16-fr31 */ 945 946 for (i = 16; i < 32; i++) { 947 if (unw_get_fr(&info, i, &fpval) < 0) 948 return -EIO; 949 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 950 } 951 952 /* fph */ 953 954 ia64_flush_fph(child); 955 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, 956 sizeof(ppr->fr[32]) * 96); 957 958 /* preds */ 959 960 retval |= __put_user(pt->pr, &ppr->pr); 961 962 /* nat bits */ 963 964 retval |= __put_user(nat_bits, &ppr->nat); 965 966 ret = retval ? -EIO : 0; 967 return ret; 968 } 969 970 static long 971 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 972 { 973 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; 974 struct unw_frame_info info; 975 struct switch_stack *sw; 976 struct ia64_fpreg fpval; 977 struct pt_regs *pt; 978 long ret, retval = 0; 979 int i; 980 981 memset(&fpval, 0, sizeof(fpval)); 982 983 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 984 return -EIO; 985 986 pt = task_pt_regs(child); 987 sw = (struct switch_stack *) (child->thread.ksp + 16); 988 unw_init_from_blocked_task(&info, child); 989 if (unw_unwind_to_user(&info) < 0) { 990 return -EIO; 991 } 992 993 if (((unsigned long) ppr & 0x7) != 0) { 994 dprintk("ptrace:unaligned register address %p\n", ppr); 995 return -EIO; 996 } 997 998 /* control regs */ 999 1000 retval |= __get_user(pt->cr_iip, &ppr->cr_iip); 1001 retval |= __get_user(psr, &ppr->cr_ipsr); 1002 1003 /* app regs */ 1004 1005 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 1006 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); 1007 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 1008 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 1009 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 1010 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 1011 1012 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); 1013 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); 1014 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); 1015 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); 1016 retval |= __get_user(cfm, &ppr->cfm); 1017 1018 /* gr1-gr3 */ 1019 1020 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); 1021 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); 1022 1023 /* gr4-gr7 */ 1024 1025 for (i = 4; i < 8; i++) { 1026 retval |= __get_user(val, &ppr->gr[i]); 1027 /* NaT bit will be set via PT_NAT_BITS: */ 1028 if (unw_set_gr(&info, i, val, 0) < 0) 1029 return -EIO; 1030 } 1031 1032 /* gr8-gr11 */ 1033 1034 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); 1035 1036 /* gr12-gr15 */ 1037 1038 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); 1039 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); 1040 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); 1041 1042 /* gr16-gr31 */ 1043 1044 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); 1045 1046 /* b0 */ 1047 1048 retval |= __get_user(pt->b0, &ppr->br[0]); 1049 1050 /* b1-b5 */ 1051 1052 for (i = 1; i < 6; i++) { 1053 retval |= __get_user(val, &ppr->br[i]); 1054 unw_set_br(&info, i, val); 1055 } 1056 1057 /* b6-b7 */ 1058 1059 retval |= __get_user(pt->b6, &ppr->br[6]); 1060 retval |= __get_user(pt->b7, &ppr->br[7]); 1061 1062 /* fr2-fr5 */ 1063 1064 for (i = 2; i < 6; i++) { 1065 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); 1066 if (unw_set_fr(&info, i, fpval) < 0) 1067 return -EIO; 1068 } 1069 1070 /* fr6-fr11 */ 1071 1072 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], 1073 sizeof(ppr->fr[6]) * 6); 1074 1075 /* fp scratch regs(12-15) */ 1076 1077 retval |= __copy_from_user(&sw->f12, &ppr->fr[12], 1078 sizeof(ppr->fr[12]) * 4); 1079 1080 /* fr16-fr31 */ 1081 1082 for (i = 16; i < 32; i++) { 1083 retval |= __copy_from_user(&fpval, &ppr->fr[i], 1084 sizeof(fpval)); 1085 if (unw_set_fr(&info, i, fpval) < 0) 1086 return -EIO; 1087 } 1088 1089 /* fph */ 1090 1091 ia64_sync_fph(child); 1092 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], 1093 sizeof(ppr->fr[32]) * 96); 1094 1095 /* preds */ 1096 1097 retval |= __get_user(pt->pr, &ppr->pr); 1098 1099 /* nat bits */ 1100 1101 retval |= __get_user(nat_bits, &ppr->nat); 1102 1103 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); 1104 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); 1105 retval |= access_uarea(child, PT_AR_EC, &ec, 1); 1106 retval |= access_uarea(child, PT_AR_LC, &lc, 1); 1107 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); 1108 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); 1109 retval |= access_uarea(child, PT_CFM, &cfm, 1); 1110 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); 1111 1112 ret = retval ? -EIO : 0; 1113 return ret; 1114 } 1115 1116 void 1117 user_enable_single_step (struct task_struct *child) 1118 { 1119 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1120 1121 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1122 child_psr->ss = 1; 1123 } 1124 1125 void 1126 user_enable_block_step (struct task_struct *child) 1127 { 1128 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1129 1130 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1131 child_psr->tb = 1; 1132 } 1133 1134 void 1135 user_disable_single_step (struct task_struct *child) 1136 { 1137 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1138 1139 /* make sure the single step/taken-branch trap bits are not set: */ 1140 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 1141 child_psr->ss = 0; 1142 child_psr->tb = 0; 1143 } 1144 1145 /* 1146 * Called by kernel/ptrace.c when detaching.. 1147 * 1148 * Make sure the single step bit is not set. 1149 */ 1150 void 1151 ptrace_disable (struct task_struct *child) 1152 { 1153 user_disable_single_step(child); 1154 } 1155 1156 long 1157 arch_ptrace (struct task_struct *child, long request, 1158 unsigned long addr, unsigned long data) 1159 { 1160 switch (request) { 1161 case PTRACE_PEEKTEXT: 1162 case PTRACE_PEEKDATA: 1163 /* read word at location addr */ 1164 if (ptrace_access_vm(child, addr, &data, sizeof(data), 1165 FOLL_FORCE) 1166 != sizeof(data)) 1167 return -EIO; 1168 /* ensure return value is not mistaken for error code */ 1169 force_successful_syscall_return(); 1170 return data; 1171 1172 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled 1173 * by the generic ptrace_request(). 1174 */ 1175 1176 case PTRACE_PEEKUSR: 1177 /* read the word at addr in the USER area */ 1178 if (access_uarea(child, addr, &data, 0) < 0) 1179 return -EIO; 1180 /* ensure return value is not mistaken for error code */ 1181 force_successful_syscall_return(); 1182 return data; 1183 1184 case PTRACE_POKEUSR: 1185 /* write the word at addr in the USER area */ 1186 if (access_uarea(child, addr, &data, 1) < 0) 1187 return -EIO; 1188 return 0; 1189 1190 case PTRACE_OLD_GETSIGINFO: 1191 /* for backwards-compatibility */ 1192 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); 1193 1194 case PTRACE_OLD_SETSIGINFO: 1195 /* for backwards-compatibility */ 1196 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); 1197 1198 case PTRACE_GETREGS: 1199 return ptrace_getregs(child, 1200 (struct pt_all_user_regs __user *) data); 1201 1202 case PTRACE_SETREGS: 1203 return ptrace_setregs(child, 1204 (struct pt_all_user_regs __user *) data); 1205 1206 default: 1207 return ptrace_request(child, request, addr, data); 1208 } 1209 } 1210 1211 1212 /* "asmlinkage" so the input arguments are preserved... */ 1213 1214 asmlinkage long 1215 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1216 long arg4, long arg5, long arg6, long arg7, 1217 struct pt_regs regs) 1218 { 1219 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1220 if (tracehook_report_syscall_entry(®s)) 1221 return -ENOSYS; 1222 1223 /* copy user rbs to kernel rbs */ 1224 if (test_thread_flag(TIF_RESTORE_RSE)) 1225 ia64_sync_krbs(); 1226 1227 1228 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3); 1229 1230 return 0; 1231 } 1232 1233 /* "asmlinkage" so the input arguments are preserved... */ 1234 1235 asmlinkage void 1236 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, 1237 long arg4, long arg5, long arg6, long arg7, 1238 struct pt_regs regs) 1239 { 1240 int step; 1241 1242 audit_syscall_exit(®s); 1243 1244 step = test_thread_flag(TIF_SINGLESTEP); 1245 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1246 tracehook_report_syscall_exit(®s, step); 1247 1248 /* copy user rbs to kernel rbs */ 1249 if (test_thread_flag(TIF_RESTORE_RSE)) 1250 ia64_sync_krbs(); 1251 } 1252 1253 /* Utrace implementation starts here */ 1254 struct regset_get { 1255 void *kbuf; 1256 void __user *ubuf; 1257 }; 1258 1259 struct regset_set { 1260 const void *kbuf; 1261 const void __user *ubuf; 1262 }; 1263 1264 struct regset_getset { 1265 struct task_struct *target; 1266 const struct user_regset *regset; 1267 union { 1268 struct regset_get get; 1269 struct regset_set set; 1270 } u; 1271 unsigned int pos; 1272 unsigned int count; 1273 int ret; 1274 }; 1275 1276 static int 1277 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, 1278 unsigned long addr, unsigned long *data, int write_access) 1279 { 1280 struct pt_regs *pt; 1281 unsigned long *ptr = NULL; 1282 int ret; 1283 char nat = 0; 1284 1285 pt = task_pt_regs(target); 1286 switch (addr) { 1287 case ELF_GR_OFFSET(1): 1288 ptr = &pt->r1; 1289 break; 1290 case ELF_GR_OFFSET(2): 1291 case ELF_GR_OFFSET(3): 1292 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2)); 1293 break; 1294 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7): 1295 if (write_access) { 1296 /* read NaT bit first: */ 1297 unsigned long dummy; 1298 1299 ret = unw_get_gr(info, addr/8, &dummy, &nat); 1300 if (ret < 0) 1301 return ret; 1302 } 1303 return unw_access_gr(info, addr/8, data, &nat, write_access); 1304 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11): 1305 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8); 1306 break; 1307 case ELF_GR_OFFSET(12): 1308 case ELF_GR_OFFSET(13): 1309 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12); 1310 break; 1311 case ELF_GR_OFFSET(14): 1312 ptr = &pt->r14; 1313 break; 1314 case ELF_GR_OFFSET(15): 1315 ptr = &pt->r15; 1316 } 1317 if (write_access) 1318 *ptr = *data; 1319 else 1320 *data = *ptr; 1321 return 0; 1322 } 1323 1324 static int 1325 access_elf_breg(struct task_struct *target, struct unw_frame_info *info, 1326 unsigned long addr, unsigned long *data, int write_access) 1327 { 1328 struct pt_regs *pt; 1329 unsigned long *ptr = NULL; 1330 1331 pt = task_pt_regs(target); 1332 switch (addr) { 1333 case ELF_BR_OFFSET(0): 1334 ptr = &pt->b0; 1335 break; 1336 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): 1337 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, 1338 data, write_access); 1339 case ELF_BR_OFFSET(6): 1340 ptr = &pt->b6; 1341 break; 1342 case ELF_BR_OFFSET(7): 1343 ptr = &pt->b7; 1344 } 1345 if (write_access) 1346 *ptr = *data; 1347 else 1348 *data = *ptr; 1349 return 0; 1350 } 1351 1352 static int 1353 access_elf_areg(struct task_struct *target, struct unw_frame_info *info, 1354 unsigned long addr, unsigned long *data, int write_access) 1355 { 1356 struct pt_regs *pt; 1357 unsigned long cfm, urbs_end; 1358 unsigned long *ptr = NULL; 1359 1360 pt = task_pt_regs(target); 1361 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { 1362 switch (addr) { 1363 case ELF_AR_RSC_OFFSET: 1364 /* force PL3 */ 1365 if (write_access) 1366 pt->ar_rsc = *data | (3 << 2); 1367 else 1368 *data = pt->ar_rsc; 1369 return 0; 1370 case ELF_AR_BSP_OFFSET: 1371 /* 1372 * By convention, we use PT_AR_BSP to refer to 1373 * the end of the user-level backing store. 1374 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) 1375 * to get the real value of ar.bsp at the time 1376 * the kernel was entered. 1377 * 1378 * Furthermore, when changing the contents of 1379 * PT_AR_BSP (or PT_CFM) while the task is 1380 * blocked in a system call, convert the state 1381 * so that the non-system-call exit 1382 * path is used. This ensures that the proper 1383 * state will be picked up when resuming 1384 * execution. However, it *also* means that 1385 * once we write PT_AR_BSP/PT_CFM, it won't be 1386 * possible to modify the syscall arguments of 1387 * the pending system call any longer. This 1388 * shouldn't be an issue because modifying 1389 * PT_AR_BSP/PT_CFM generally implies that 1390 * we're either abandoning the pending system 1391 * call or that we defer it's re-execution 1392 * (e.g., due to GDB doing an inferior 1393 * function call). 1394 */ 1395 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1396 if (write_access) { 1397 if (*data != urbs_end) { 1398 if (in_syscall(pt)) 1399 convert_to_non_syscall(target, 1400 pt, 1401 cfm); 1402 /* 1403 * Simulate user-level write 1404 * of ar.bsp: 1405 */ 1406 pt->loadrs = 0; 1407 pt->ar_bspstore = *data; 1408 } 1409 } else 1410 *data = urbs_end; 1411 return 0; 1412 case ELF_AR_BSPSTORE_OFFSET: 1413 ptr = &pt->ar_bspstore; 1414 break; 1415 case ELF_AR_RNAT_OFFSET: 1416 ptr = &pt->ar_rnat; 1417 break; 1418 case ELF_AR_CCV_OFFSET: 1419 ptr = &pt->ar_ccv; 1420 break; 1421 case ELF_AR_UNAT_OFFSET: 1422 ptr = &pt->ar_unat; 1423 break; 1424 case ELF_AR_FPSR_OFFSET: 1425 ptr = &pt->ar_fpsr; 1426 break; 1427 case ELF_AR_PFS_OFFSET: 1428 ptr = &pt->ar_pfs; 1429 break; 1430 case ELF_AR_LC_OFFSET: 1431 return unw_access_ar(info, UNW_AR_LC, data, 1432 write_access); 1433 case ELF_AR_EC_OFFSET: 1434 return unw_access_ar(info, UNW_AR_EC, data, 1435 write_access); 1436 case ELF_AR_CSD_OFFSET: 1437 ptr = &pt->ar_csd; 1438 break; 1439 case ELF_AR_SSD_OFFSET: 1440 ptr = &pt->ar_ssd; 1441 } 1442 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { 1443 switch (addr) { 1444 case ELF_CR_IIP_OFFSET: 1445 ptr = &pt->cr_iip; 1446 break; 1447 case ELF_CFM_OFFSET: 1448 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1449 if (write_access) { 1450 if (((cfm ^ *data) & PFM_MASK) != 0) { 1451 if (in_syscall(pt)) 1452 convert_to_non_syscall(target, 1453 pt, 1454 cfm); 1455 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) 1456 | (*data & PFM_MASK)); 1457 } 1458 } else 1459 *data = cfm; 1460 return 0; 1461 case ELF_CR_IPSR_OFFSET: 1462 if (write_access) { 1463 unsigned long tmp = *data; 1464 /* psr.ri==3 is a reserved value: SDM 2:25 */ 1465 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) 1466 tmp &= ~IA64_PSR_RI; 1467 pt->cr_ipsr = ((tmp & IPSR_MASK) 1468 | (pt->cr_ipsr & ~IPSR_MASK)); 1469 } else 1470 *data = (pt->cr_ipsr & IPSR_MASK); 1471 return 0; 1472 } 1473 } else if (addr == ELF_NAT_OFFSET) 1474 return access_nat_bits(target, pt, info, 1475 data, write_access); 1476 else if (addr == ELF_PR_OFFSET) 1477 ptr = &pt->pr; 1478 else 1479 return -1; 1480 1481 if (write_access) 1482 *ptr = *data; 1483 else 1484 *data = *ptr; 1485 1486 return 0; 1487 } 1488 1489 static int 1490 access_elf_reg(struct task_struct *target, struct unw_frame_info *info, 1491 unsigned long addr, unsigned long *data, int write_access) 1492 { 1493 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15)) 1494 return access_elf_gpreg(target, info, addr, data, write_access); 1495 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) 1496 return access_elf_breg(target, info, addr, data, write_access); 1497 else 1498 return access_elf_areg(target, info, addr, data, write_access); 1499 } 1500 1501 void do_gpregs_get(struct unw_frame_info *info, void *arg) 1502 { 1503 struct pt_regs *pt; 1504 struct regset_getset *dst = arg; 1505 elf_greg_t tmp[16]; 1506 unsigned int i, index, min_copy; 1507 1508 if (unw_unwind_to_user(info) < 0) 1509 return; 1510 1511 /* 1512 * coredump format: 1513 * r0-r31 1514 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) 1515 * predicate registers (p0-p63) 1516 * b0-b7 1517 * ip cfm user-mask 1518 * ar.rsc ar.bsp ar.bspstore ar.rnat 1519 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec 1520 */ 1521 1522 1523 /* Skip r0 */ 1524 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { 1525 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, 1526 &dst->u.get.kbuf, 1527 &dst->u.get.ubuf, 1528 0, ELF_GR_OFFSET(1)); 1529 if (dst->ret || dst->count == 0) 1530 return; 1531 } 1532 1533 /* gr1 - gr15 */ 1534 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { 1535 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); 1536 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ? 1537 (dst->pos + dst->count) : ELF_GR_OFFSET(16); 1538 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1539 index++) 1540 if (access_elf_reg(dst->target, info, i, 1541 &tmp[index], 0) < 0) { 1542 dst->ret = -EIO; 1543 return; 1544 } 1545 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1546 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1547 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); 1548 if (dst->ret || dst->count == 0) 1549 return; 1550 } 1551 1552 /* r16-r31 */ 1553 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { 1554 pt = task_pt_regs(dst->target); 1555 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1556 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16, 1557 ELF_GR_OFFSET(16), ELF_NAT_OFFSET); 1558 if (dst->ret || dst->count == 0) 1559 return; 1560 } 1561 1562 /* nat, pr, b0 - b7 */ 1563 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { 1564 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); 1565 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ? 1566 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET; 1567 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1568 index++) 1569 if (access_elf_reg(dst->target, info, i, 1570 &tmp[index], 0) < 0) { 1571 dst->ret = -EIO; 1572 return; 1573 } 1574 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1575 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1576 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); 1577 if (dst->ret || dst->count == 0) 1578 return; 1579 } 1580 1581 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat 1582 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd 1583 */ 1584 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { 1585 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); 1586 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ? 1587 (dst->pos + dst->count) : ELF_AR_END_OFFSET; 1588 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), 1589 index++) 1590 if (access_elf_reg(dst->target, info, i, 1591 &tmp[index], 0) < 0) { 1592 dst->ret = -EIO; 1593 return; 1594 } 1595 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1596 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1597 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); 1598 } 1599 } 1600 1601 void do_gpregs_set(struct unw_frame_info *info, void *arg) 1602 { 1603 struct pt_regs *pt; 1604 struct regset_getset *dst = arg; 1605 elf_greg_t tmp[16]; 1606 unsigned int i, index; 1607 1608 if (unw_unwind_to_user(info) < 0) 1609 return; 1610 1611 /* Skip r0 */ 1612 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) { 1613 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1614 &dst->u.set.kbuf, 1615 &dst->u.set.ubuf, 1616 0, ELF_GR_OFFSET(1)); 1617 if (dst->ret || dst->count == 0) 1618 return; 1619 } 1620 1621 /* gr1-gr15 */ 1622 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) { 1623 i = dst->pos; 1624 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t); 1625 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1626 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1627 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16)); 1628 if (dst->ret) 1629 return; 1630 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) 1631 if (access_elf_reg(dst->target, info, i, 1632 &tmp[index], 1) < 0) { 1633 dst->ret = -EIO; 1634 return; 1635 } 1636 if (dst->count == 0) 1637 return; 1638 } 1639 1640 /* gr16-gr31 */ 1641 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) { 1642 pt = task_pt_regs(dst->target); 1643 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1644 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16, 1645 ELF_GR_OFFSET(16), ELF_NAT_OFFSET); 1646 if (dst->ret || dst->count == 0) 1647 return; 1648 } 1649 1650 /* nat, pr, b0 - b7 */ 1651 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) { 1652 i = dst->pos; 1653 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t); 1654 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1655 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1656 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET); 1657 if (dst->ret) 1658 return; 1659 for (; i < dst->pos; i += sizeof(elf_greg_t), index++) 1660 if (access_elf_reg(dst->target, info, i, 1661 &tmp[index], 1) < 0) { 1662 dst->ret = -EIO; 1663 return; 1664 } 1665 if (dst->count == 0) 1666 return; 1667 } 1668 1669 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat 1670 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd 1671 */ 1672 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) { 1673 i = dst->pos; 1674 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t); 1675 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1676 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1677 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET); 1678 if (dst->ret) 1679 return; 1680 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++) 1681 if (access_elf_reg(dst->target, info, i, 1682 &tmp[index], 1) < 0) { 1683 dst->ret = -EIO; 1684 return; 1685 } 1686 } 1687 } 1688 1689 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) 1690 1691 void do_fpregs_get(struct unw_frame_info *info, void *arg) 1692 { 1693 struct regset_getset *dst = arg; 1694 struct task_struct *task = dst->target; 1695 elf_fpreg_t tmp[30]; 1696 int index, min_copy, i; 1697 1698 if (unw_unwind_to_user(info) < 0) 1699 return; 1700 1701 /* Skip pos 0 and 1 */ 1702 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { 1703 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count, 1704 &dst->u.get.kbuf, 1705 &dst->u.get.ubuf, 1706 0, ELF_FP_OFFSET(2)); 1707 if (dst->count == 0 || dst->ret) 1708 return; 1709 } 1710 1711 /* fr2-fr31 */ 1712 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { 1713 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t); 1714 1715 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)), 1716 dst->pos + dst->count); 1717 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), 1718 index++) 1719 if (unw_get_fr(info, i / sizeof(elf_fpreg_t), 1720 &tmp[index])) { 1721 dst->ret = -EIO; 1722 return; 1723 } 1724 dst->ret = user_regset_copyout(&dst->pos, &dst->count, 1725 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp, 1726 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); 1727 if (dst->count == 0 || dst->ret) 1728 return; 1729 } 1730 1731 /* fph */ 1732 if (dst->count > 0) { 1733 ia64_flush_fph(dst->target); 1734 if (task->thread.flags & IA64_THREAD_FPH_VALID) 1735 dst->ret = user_regset_copyout( 1736 &dst->pos, &dst->count, 1737 &dst->u.get.kbuf, &dst->u.get.ubuf, 1738 &dst->target->thread.fph, 1739 ELF_FP_OFFSET(32), -1); 1740 else 1741 /* Zero fill instead. */ 1742 dst->ret = user_regset_copyout_zero( 1743 &dst->pos, &dst->count, 1744 &dst->u.get.kbuf, &dst->u.get.ubuf, 1745 ELF_FP_OFFSET(32), -1); 1746 } 1747 } 1748 1749 void do_fpregs_set(struct unw_frame_info *info, void *arg) 1750 { 1751 struct regset_getset *dst = arg; 1752 elf_fpreg_t fpreg, tmp[30]; 1753 int index, start, end; 1754 1755 if (unw_unwind_to_user(info) < 0) 1756 return; 1757 1758 /* Skip pos 0 and 1 */ 1759 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { 1760 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1761 &dst->u.set.kbuf, 1762 &dst->u.set.ubuf, 1763 0, ELF_FP_OFFSET(2)); 1764 if (dst->count == 0 || dst->ret) 1765 return; 1766 } 1767 1768 /* fr2-fr31 */ 1769 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { 1770 start = dst->pos; 1771 end = min(((unsigned int)ELF_FP_OFFSET(32)), 1772 dst->pos + dst->count); 1773 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1774 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1775 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); 1776 if (dst->ret) 1777 return; 1778 1779 if (start & 0xF) { /* only write high part */ 1780 if (unw_get_fr(info, start / sizeof(elf_fpreg_t), 1781 &fpreg)) { 1782 dst->ret = -EIO; 1783 return; 1784 } 1785 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] 1786 = fpreg.u.bits[0]; 1787 start &= ~0xFUL; 1788 } 1789 if (end & 0xF) { /* only write low part */ 1790 if (unw_get_fr(info, end / sizeof(elf_fpreg_t), 1791 &fpreg)) { 1792 dst->ret = -EIO; 1793 return; 1794 } 1795 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] 1796 = fpreg.u.bits[1]; 1797 end = (end + 0xF) & ~0xFUL; 1798 } 1799 1800 for ( ; start < end ; start += sizeof(elf_fpreg_t)) { 1801 index = start / sizeof(elf_fpreg_t); 1802 if (unw_set_fr(info, index, tmp[index - 2])) { 1803 dst->ret = -EIO; 1804 return; 1805 } 1806 } 1807 if (dst->ret || dst->count == 0) 1808 return; 1809 } 1810 1811 /* fph */ 1812 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { 1813 ia64_sync_fph(dst->target); 1814 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1815 &dst->u.set.kbuf, 1816 &dst->u.set.ubuf, 1817 &dst->target->thread.fph, 1818 ELF_FP_OFFSET(32), -1); 1819 } 1820 } 1821 1822 static int 1823 do_regset_call(void (*call)(struct unw_frame_info *, void *), 1824 struct task_struct *target, 1825 const struct user_regset *regset, 1826 unsigned int pos, unsigned int count, 1827 const void *kbuf, const void __user *ubuf) 1828 { 1829 struct regset_getset info = { .target = target, .regset = regset, 1830 .pos = pos, .count = count, 1831 .u.set = { .kbuf = kbuf, .ubuf = ubuf }, 1832 .ret = 0 }; 1833 1834 if (target == current) 1835 unw_init_running(call, &info); 1836 else { 1837 struct unw_frame_info ufi; 1838 memset(&ufi, 0, sizeof(ufi)); 1839 unw_init_from_blocked_task(&ufi, target); 1840 (*call)(&ufi, &info); 1841 } 1842 1843 return info.ret; 1844 } 1845 1846 static int 1847 gpregs_get(struct task_struct *target, 1848 const struct user_regset *regset, 1849 unsigned int pos, unsigned int count, 1850 void *kbuf, void __user *ubuf) 1851 { 1852 return do_regset_call(do_gpregs_get, target, regset, pos, count, 1853 kbuf, ubuf); 1854 } 1855 1856 static int gpregs_set(struct task_struct *target, 1857 const struct user_regset *regset, 1858 unsigned int pos, unsigned int count, 1859 const void *kbuf, const void __user *ubuf) 1860 { 1861 return do_regset_call(do_gpregs_set, target, regset, pos, count, 1862 kbuf, ubuf); 1863 } 1864 1865 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) 1866 { 1867 do_sync_rbs(info, ia64_sync_user_rbs); 1868 } 1869 1870 /* 1871 * This is called to write back the register backing store. 1872 * ptrace does this before it stops, so that a tracer reading the user 1873 * memory after the thread stops will get the current register data. 1874 */ 1875 static int 1876 gpregs_writeback(struct task_struct *target, 1877 const struct user_regset *regset, 1878 int now) 1879 { 1880 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) 1881 return 0; 1882 set_notify_resume(target); 1883 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, 1884 NULL, NULL); 1885 } 1886 1887 static int 1888 fpregs_active(struct task_struct *target, const struct user_regset *regset) 1889 { 1890 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; 1891 } 1892 1893 static int fpregs_get(struct task_struct *target, 1894 const struct user_regset *regset, 1895 unsigned int pos, unsigned int count, 1896 void *kbuf, void __user *ubuf) 1897 { 1898 return do_regset_call(do_fpregs_get, target, regset, pos, count, 1899 kbuf, ubuf); 1900 } 1901 1902 static int fpregs_set(struct task_struct *target, 1903 const struct user_regset *regset, 1904 unsigned int pos, unsigned int count, 1905 const void *kbuf, const void __user *ubuf) 1906 { 1907 return do_regset_call(do_fpregs_set, target, regset, pos, count, 1908 kbuf, ubuf); 1909 } 1910 1911 static int 1912 access_uarea(struct task_struct *child, unsigned long addr, 1913 unsigned long *data, int write_access) 1914 { 1915 unsigned int pos = -1; /* an invalid value */ 1916 int ret; 1917 unsigned long *ptr, regnum; 1918 1919 if ((addr & 0x7) != 0) { 1920 dprintk("ptrace: unaligned register address 0x%lx\n", addr); 1921 return -1; 1922 } 1923 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || 1924 (addr >= PT_R7 + 8 && addr < PT_B1) || 1925 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || 1926 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { 1927 dprintk("ptrace: rejecting access to register " 1928 "address 0x%lx\n", addr); 1929 return -1; 1930 } 1931 1932 switch (addr) { 1933 case PT_F32 ... (PT_F127 + 15): 1934 pos = addr - PT_F32 + ELF_FP_OFFSET(32); 1935 break; 1936 case PT_F2 ... (PT_F5 + 15): 1937 pos = addr - PT_F2 + ELF_FP_OFFSET(2); 1938 break; 1939 case PT_F10 ... (PT_F31 + 15): 1940 pos = addr - PT_F10 + ELF_FP_OFFSET(10); 1941 break; 1942 case PT_F6 ... (PT_F9 + 15): 1943 pos = addr - PT_F6 + ELF_FP_OFFSET(6); 1944 break; 1945 } 1946 1947 if (pos != -1) { 1948 if (write_access) 1949 ret = fpregs_set(child, NULL, pos, 1950 sizeof(unsigned long), data, NULL); 1951 else 1952 ret = fpregs_get(child, NULL, pos, 1953 sizeof(unsigned long), data, NULL); 1954 if (ret != 0) 1955 return -1; 1956 return 0; 1957 } 1958 1959 switch (addr) { 1960 case PT_NAT_BITS: 1961 pos = ELF_NAT_OFFSET; 1962 break; 1963 case PT_R4 ... PT_R7: 1964 pos = addr - PT_R4 + ELF_GR_OFFSET(4); 1965 break; 1966 case PT_B1 ... PT_B5: 1967 pos = addr - PT_B1 + ELF_BR_OFFSET(1); 1968 break; 1969 case PT_AR_EC: 1970 pos = ELF_AR_EC_OFFSET; 1971 break; 1972 case PT_AR_LC: 1973 pos = ELF_AR_LC_OFFSET; 1974 break; 1975 case PT_CR_IPSR: 1976 pos = ELF_CR_IPSR_OFFSET; 1977 break; 1978 case PT_CR_IIP: 1979 pos = ELF_CR_IIP_OFFSET; 1980 break; 1981 case PT_CFM: 1982 pos = ELF_CFM_OFFSET; 1983 break; 1984 case PT_AR_UNAT: 1985 pos = ELF_AR_UNAT_OFFSET; 1986 break; 1987 case PT_AR_PFS: 1988 pos = ELF_AR_PFS_OFFSET; 1989 break; 1990 case PT_AR_RSC: 1991 pos = ELF_AR_RSC_OFFSET; 1992 break; 1993 case PT_AR_RNAT: 1994 pos = ELF_AR_RNAT_OFFSET; 1995 break; 1996 case PT_AR_BSPSTORE: 1997 pos = ELF_AR_BSPSTORE_OFFSET; 1998 break; 1999 case PT_PR: 2000 pos = ELF_PR_OFFSET; 2001 break; 2002 case PT_B6: 2003 pos = ELF_BR_OFFSET(6); 2004 break; 2005 case PT_AR_BSP: 2006 pos = ELF_AR_BSP_OFFSET; 2007 break; 2008 case PT_R1 ... PT_R3: 2009 pos = addr - PT_R1 + ELF_GR_OFFSET(1); 2010 break; 2011 case PT_R12 ... PT_R15: 2012 pos = addr - PT_R12 + ELF_GR_OFFSET(12); 2013 break; 2014 case PT_R8 ... PT_R11: 2015 pos = addr - PT_R8 + ELF_GR_OFFSET(8); 2016 break; 2017 case PT_R16 ... PT_R31: 2018 pos = addr - PT_R16 + ELF_GR_OFFSET(16); 2019 break; 2020 case PT_AR_CCV: 2021 pos = ELF_AR_CCV_OFFSET; 2022 break; 2023 case PT_AR_FPSR: 2024 pos = ELF_AR_FPSR_OFFSET; 2025 break; 2026 case PT_B0: 2027 pos = ELF_BR_OFFSET(0); 2028 break; 2029 case PT_B7: 2030 pos = ELF_BR_OFFSET(7); 2031 break; 2032 case PT_AR_CSD: 2033 pos = ELF_AR_CSD_OFFSET; 2034 break; 2035 case PT_AR_SSD: 2036 pos = ELF_AR_SSD_OFFSET; 2037 break; 2038 } 2039 2040 if (pos != -1) { 2041 if (write_access) 2042 ret = gpregs_set(child, NULL, pos, 2043 sizeof(unsigned long), data, NULL); 2044 else 2045 ret = gpregs_get(child, NULL, pos, 2046 sizeof(unsigned long), data, NULL); 2047 if (ret != 0) 2048 return -1; 2049 return 0; 2050 } 2051 2052 /* access debug registers */ 2053 if (addr >= PT_IBR) { 2054 regnum = (addr - PT_IBR) >> 3; 2055 ptr = &child->thread.ibr[0]; 2056 } else { 2057 regnum = (addr - PT_DBR) >> 3; 2058 ptr = &child->thread.dbr[0]; 2059 } 2060 2061 if (regnum >= 8) { 2062 dprintk("ptrace: rejecting access to register " 2063 "address 0x%lx\n", addr); 2064 return -1; 2065 } 2066 #ifdef CONFIG_PERFMON 2067 /* 2068 * Check if debug registers are used by perfmon. This 2069 * test must be done once we know that we can do the 2070 * operation, i.e. the arguments are all valid, but 2071 * before we start modifying the state. 2072 * 2073 * Perfmon needs to keep a count of how many processes 2074 * are trying to modify the debug registers for system 2075 * wide monitoring sessions. 2076 * 2077 * We also include read access here, because they may 2078 * cause the PMU-installed debug register state 2079 * (dbr[], ibr[]) to be reset. The two arrays are also 2080 * used by perfmon, but we do not use 2081 * IA64_THREAD_DBG_VALID. The registers are restored 2082 * by the PMU context switch code. 2083 */ 2084 if (pfm_use_debug_registers(child)) 2085 return -1; 2086 #endif 2087 2088 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { 2089 child->thread.flags |= IA64_THREAD_DBG_VALID; 2090 memset(child->thread.dbr, 0, 2091 sizeof(child->thread.dbr)); 2092 memset(child->thread.ibr, 0, 2093 sizeof(child->thread.ibr)); 2094 } 2095 2096 ptr += regnum; 2097 2098 if ((regnum & 1) && write_access) { 2099 /* don't let the user set kernel-level breakpoints: */ 2100 *ptr = *data & ~(7UL << 56); 2101 return 0; 2102 } 2103 if (write_access) 2104 *ptr = *data; 2105 else 2106 *data = *ptr; 2107 return 0; 2108 } 2109 2110 static const struct user_regset native_regsets[] = { 2111 { 2112 .core_note_type = NT_PRSTATUS, 2113 .n = ELF_NGREG, 2114 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), 2115 .get = gpregs_get, .set = gpregs_set, 2116 .writeback = gpregs_writeback 2117 }, 2118 { 2119 .core_note_type = NT_PRFPREG, 2120 .n = ELF_NFPREG, 2121 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), 2122 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active 2123 }, 2124 }; 2125 2126 static const struct user_regset_view user_ia64_view = { 2127 .name = "ia64", 2128 .e_machine = EM_IA_64, 2129 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 2130 }; 2131 2132 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) 2133 { 2134 return &user_ia64_view; 2135 } 2136 2137 struct syscall_get_set_args { 2138 unsigned int i; 2139 unsigned int n; 2140 unsigned long *args; 2141 struct pt_regs *regs; 2142 int rw; 2143 }; 2144 2145 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) 2146 { 2147 struct syscall_get_set_args *args = data; 2148 struct pt_regs *pt = args->regs; 2149 unsigned long *krbs, cfm, ndirty; 2150 int i, count; 2151 2152 if (unw_unwind_to_user(info) < 0) 2153 return; 2154 2155 cfm = pt->cr_ifs; 2156 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; 2157 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 2158 2159 count = 0; 2160 if (in_syscall(pt)) 2161 count = min_t(int, args->n, cfm & 0x7f); 2162 2163 for (i = 0; i < count; i++) { 2164 if (args->rw) 2165 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = 2166 args->args[i]; 2167 else 2168 args->args[i] = *ia64_rse_skip_regs(krbs, 2169 ndirty + i + args->i); 2170 } 2171 2172 if (!args->rw) { 2173 while (i < args->n) { 2174 args->args[i] = 0; 2175 i++; 2176 } 2177 } 2178 } 2179 2180 void ia64_syscall_get_set_arguments(struct task_struct *task, 2181 struct pt_regs *regs, unsigned int i, unsigned int n, 2182 unsigned long *args, int rw) 2183 { 2184 struct syscall_get_set_args data = { 2185 .i = i, 2186 .n = n, 2187 .args = args, 2188 .regs = regs, 2189 .rw = rw, 2190 }; 2191 2192 if (task == current) 2193 unw_init_running(syscall_get_set_args_cb, &data); 2194 else { 2195 struct unw_frame_info ufi; 2196 memset(&ufi, 0, sizeof(ufi)); 2197 unw_init_from_blocked_task(&ufi, task); 2198 syscall_get_set_args_cb(&ufi, &data); 2199 } 2200 } 2201