1 /* 2 * Kernel support for the ptrace() and syscall tracing interfaces. 3 * 4 * Copyright (C) 1999-2005 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * 7 * Derived from the x86 and Alpha versions. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/errno.h> 14 #include <linux/ptrace.h> 15 #include <linux/smp_lock.h> 16 #include <linux/user.h> 17 #include <linux/security.h> 18 #include <linux/audit.h> 19 #include <linux/signal.h> 20 21 #include <asm/pgtable.h> 22 #include <asm/processor.h> 23 #include <asm/ptrace_offsets.h> 24 #include <asm/rse.h> 25 #include <asm/system.h> 26 #include <asm/uaccess.h> 27 #include <asm/unwind.h> 28 #ifdef CONFIG_PERFMON 29 #include <asm/perfmon.h> 30 #endif 31 32 #include "entry.h" 33 34 /* 35 * Bits in the PSR that we allow ptrace() to change: 36 * be, up, ac, mfl, mfh (the user mask; five bits total) 37 * db (debug breakpoint fault; one bit) 38 * id (instruction debug fault disable; one bit) 39 * dd (data debug fault disable; one bit) 40 * ri (restart instruction; two bits) 41 * is (instruction set; one bit) 42 */ 43 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ 44 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) 45 46 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ 47 #define PFM_MASK MASK(38) 48 49 #define PTRACE_DEBUG 0 50 51 #if PTRACE_DEBUG 52 # define dprintk(format...) printk(format) 53 # define inline 54 #else 55 # define dprintk(format...) 56 #endif 57 58 /* Return TRUE if PT was created due to kernel-entry via a system-call. */ 59 60 static inline int 61 in_syscall (struct pt_regs *pt) 62 { 63 return (long) pt->cr_ifs >= 0; 64 } 65 66 /* 67 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT 68 * bitset where bit i is set iff the NaT bit of register i is set. 69 */ 70 unsigned long 71 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) 72 { 73 # define GET_BITS(first, last, unat) \ 74 ({ \ 75 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 76 unsigned long nbits = (last - first + 1); \ 77 unsigned long mask = MASK(nbits) << first; \ 78 unsigned long dist; \ 79 if (bit < first) \ 80 dist = 64 + bit - first; \ 81 else \ 82 dist = bit - first; \ 83 ia64_rotr(unat, dist) & mask; \ 84 }) 85 unsigned long val; 86 87 /* 88 * Registers that are stored consecutively in struct pt_regs 89 * can be handled in parallel. If the register order in 90 * struct_pt_regs changes, this code MUST be updated. 91 */ 92 val = GET_BITS( 1, 1, scratch_unat); 93 val |= GET_BITS( 2, 3, scratch_unat); 94 val |= GET_BITS(12, 13, scratch_unat); 95 val |= GET_BITS(14, 14, scratch_unat); 96 val |= GET_BITS(15, 15, scratch_unat); 97 val |= GET_BITS( 8, 11, scratch_unat); 98 val |= GET_BITS(16, 31, scratch_unat); 99 return val; 100 101 # undef GET_BITS 102 } 103 104 /* 105 * Set the NaT bits for the scratch registers according to NAT and 106 * return the resulting unat (assuming the scratch registers are 107 * stored in PT). 108 */ 109 unsigned long 110 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) 111 { 112 # define PUT_BITS(first, last, nat) \ 113 ({ \ 114 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 115 unsigned long nbits = (last - first + 1); \ 116 unsigned long mask = MASK(nbits) << first; \ 117 long dist; \ 118 if (bit < first) \ 119 dist = 64 + bit - first; \ 120 else \ 121 dist = bit - first; \ 122 ia64_rotl(nat & mask, dist); \ 123 }) 124 unsigned long scratch_unat; 125 126 /* 127 * Registers that are stored consecutively in struct pt_regs 128 * can be handled in parallel. If the register order in 129 * struct_pt_regs changes, this code MUST be updated. 130 */ 131 scratch_unat = PUT_BITS( 1, 1, nat); 132 scratch_unat |= PUT_BITS( 2, 3, nat); 133 scratch_unat |= PUT_BITS(12, 13, nat); 134 scratch_unat |= PUT_BITS(14, 14, nat); 135 scratch_unat |= PUT_BITS(15, 15, nat); 136 scratch_unat |= PUT_BITS( 8, 11, nat); 137 scratch_unat |= PUT_BITS(16, 31, nat); 138 139 return scratch_unat; 140 141 # undef PUT_BITS 142 } 143 144 #define IA64_MLX_TEMPLATE 0x2 145 #define IA64_MOVL_OPCODE 6 146 147 void 148 ia64_increment_ip (struct pt_regs *regs) 149 { 150 unsigned long w0, ri = ia64_psr(regs)->ri + 1; 151 152 if (ri > 2) { 153 ri = 0; 154 regs->cr_iip += 16; 155 } else if (ri == 2) { 156 get_user(w0, (char __user *) regs->cr_iip + 0); 157 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 158 /* 159 * rfi'ing to slot 2 of an MLX bundle causes 160 * an illegal operation fault. We don't want 161 * that to happen... 162 */ 163 ri = 0; 164 regs->cr_iip += 16; 165 } 166 } 167 ia64_psr(regs)->ri = ri; 168 } 169 170 void 171 ia64_decrement_ip (struct pt_regs *regs) 172 { 173 unsigned long w0, ri = ia64_psr(regs)->ri - 1; 174 175 if (ia64_psr(regs)->ri == 0) { 176 regs->cr_iip -= 16; 177 ri = 2; 178 get_user(w0, (char __user *) regs->cr_iip + 0); 179 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 180 /* 181 * rfi'ing to slot 2 of an MLX bundle causes 182 * an illegal operation fault. We don't want 183 * that to happen... 184 */ 185 ri = 1; 186 } 187 } 188 ia64_psr(regs)->ri = ri; 189 } 190 191 /* 192 * This routine is used to read an rnat bits that are stored on the 193 * kernel backing store. Since, in general, the alignment of the user 194 * and kernel are different, this is not completely trivial. In 195 * essence, we need to construct the user RNAT based on up to two 196 * kernel RNAT values and/or the RNAT value saved in the child's 197 * pt_regs. 198 * 199 * user rbs 200 * 201 * +--------+ <-- lowest address 202 * | slot62 | 203 * +--------+ 204 * | rnat | 0x....1f8 205 * +--------+ 206 * | slot00 | \ 207 * +--------+ | 208 * | slot01 | > child_regs->ar_rnat 209 * +--------+ | 210 * | slot02 | / kernel rbs 211 * +--------+ +--------+ 212 * <- child_regs->ar_bspstore | slot61 | <-- krbs 213 * +- - - - + +--------+ 214 * | slot62 | 215 * +- - - - + +--------+ 216 * | rnat | 217 * +- - - - + +--------+ 218 * vrnat | slot00 | 219 * +- - - - + +--------+ 220 * = = 221 * +--------+ 222 * | slot00 | \ 223 * +--------+ | 224 * | slot01 | > child_stack->ar_rnat 225 * +--------+ | 226 * | slot02 | / 227 * +--------+ 228 * <--- child_stack->ar_bspstore 229 * 230 * The way to think of this code is as follows: bit 0 in the user rnat 231 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat 232 * value. The kernel rnat value holding this bit is stored in 233 * variable rnat0. rnat1 is loaded with the kernel rnat value that 234 * form the upper bits of the user rnat value. 235 * 236 * Boundary cases: 237 * 238 * o when reading the rnat "below" the first rnat slot on the kernel 239 * backing store, rnat0/rnat1 are set to 0 and the low order bits are 240 * merged in from pt->ar_rnat. 241 * 242 * o when reading the rnat "above" the last rnat slot on the kernel 243 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. 244 */ 245 static unsigned long 246 get_rnat (struct task_struct *task, struct switch_stack *sw, 247 unsigned long *krbs, unsigned long *urnat_addr, 248 unsigned long *urbs_end) 249 { 250 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; 251 unsigned long umask = 0, mask, m; 252 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 253 long num_regs, nbits; 254 struct pt_regs *pt; 255 256 pt = task_pt_regs(task); 257 kbsp = (unsigned long *) sw->ar_bspstore; 258 ubspstore = (unsigned long *) pt->ar_bspstore; 259 260 if (urbs_end < urnat_addr) 261 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); 262 else 263 nbits = 63; 264 mask = MASK(nbits); 265 /* 266 * First, figure out which bit number slot 0 in user-land maps 267 * to in the kernel rnat. Do this by figuring out how many 268 * register slots we're beyond the user's backingstore and 269 * then computing the equivalent address in kernel space. 270 */ 271 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 272 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 273 shift = ia64_rse_slot_num(slot0_kaddr); 274 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 275 rnat0_kaddr = rnat1_kaddr - 64; 276 277 if (ubspstore + 63 > urnat_addr) { 278 /* some bits need to be merged in from pt->ar_rnat */ 279 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 280 urnat = (pt->ar_rnat & umask); 281 mask &= ~umask; 282 if (!mask) 283 return urnat; 284 } 285 286 m = mask << shift; 287 if (rnat0_kaddr >= kbsp) 288 rnat0 = sw->ar_rnat; 289 else if (rnat0_kaddr > krbs) 290 rnat0 = *rnat0_kaddr; 291 urnat |= (rnat0 & m) >> shift; 292 293 m = mask >> (63 - shift); 294 if (rnat1_kaddr >= kbsp) 295 rnat1 = sw->ar_rnat; 296 else if (rnat1_kaddr > krbs) 297 rnat1 = *rnat1_kaddr; 298 urnat |= (rnat1 & m) << (63 - shift); 299 return urnat; 300 } 301 302 /* 303 * The reverse of get_rnat. 304 */ 305 static void 306 put_rnat (struct task_struct *task, struct switch_stack *sw, 307 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, 308 unsigned long *urbs_end) 309 { 310 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; 311 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 312 long num_regs, nbits; 313 struct pt_regs *pt; 314 unsigned long cfm, *urbs_kargs; 315 316 pt = task_pt_regs(task); 317 kbsp = (unsigned long *) sw->ar_bspstore; 318 ubspstore = (unsigned long *) pt->ar_bspstore; 319 320 urbs_kargs = urbs_end; 321 if (in_syscall(pt)) { 322 /* 323 * If entered via syscall, don't allow user to set rnat bits 324 * for syscall args. 325 */ 326 cfm = pt->cr_ifs; 327 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); 328 } 329 330 if (urbs_kargs >= urnat_addr) 331 nbits = 63; 332 else { 333 if ((urnat_addr - 63) >= urbs_kargs) 334 return; 335 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); 336 } 337 mask = MASK(nbits); 338 339 /* 340 * First, figure out which bit number slot 0 in user-land maps 341 * to in the kernel rnat. Do this by figuring out how many 342 * register slots we're beyond the user's backingstore and 343 * then computing the equivalent address in kernel space. 344 */ 345 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 346 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 347 shift = ia64_rse_slot_num(slot0_kaddr); 348 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 349 rnat0_kaddr = rnat1_kaddr - 64; 350 351 if (ubspstore + 63 > urnat_addr) { 352 /* some bits need to be place in pt->ar_rnat: */ 353 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 354 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); 355 mask &= ~umask; 356 if (!mask) 357 return; 358 } 359 /* 360 * Note: Section 11.1 of the EAS guarantees that bit 63 of an 361 * rnat slot is ignored. so we don't have to clear it here. 362 */ 363 rnat0 = (urnat << shift); 364 m = mask << shift; 365 if (rnat0_kaddr >= kbsp) 366 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); 367 else if (rnat0_kaddr > krbs) 368 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); 369 370 rnat1 = (urnat >> (63 - shift)); 371 m = mask >> (63 - shift); 372 if (rnat1_kaddr >= kbsp) 373 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); 374 else if (rnat1_kaddr > krbs) 375 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); 376 } 377 378 static inline int 379 on_kernel_rbs (unsigned long addr, unsigned long bspstore, 380 unsigned long urbs_end) 381 { 382 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) 383 urbs_end); 384 return (addr >= bspstore && addr <= (unsigned long) rnat_addr); 385 } 386 387 /* 388 * Read a word from the user-level backing store of task CHILD. ADDR 389 * is the user-level address to read the word from, VAL a pointer to 390 * the return value, and USER_BSP gives the end of the user-level 391 * backing store (i.e., it's the address that would be in ar.bsp after 392 * the user executed a "cover" instruction). 393 * 394 * This routine takes care of accessing the kernel register backing 395 * store for those registers that got spilled there. It also takes 396 * care of calculating the appropriate RNaT collection words. 397 */ 398 long 399 ia64_peek (struct task_struct *child, struct switch_stack *child_stack, 400 unsigned long user_rbs_end, unsigned long addr, long *val) 401 { 402 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; 403 struct pt_regs *child_regs; 404 size_t copied; 405 long ret; 406 407 urbs_end = (long *) user_rbs_end; 408 laddr = (unsigned long *) addr; 409 child_regs = task_pt_regs(child); 410 bspstore = (unsigned long *) child_regs->ar_bspstore; 411 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 412 if (on_kernel_rbs(addr, (unsigned long) bspstore, 413 (unsigned long) urbs_end)) 414 { 415 /* 416 * Attempt to read the RBS in an area that's actually 417 * on the kernel RBS => read the corresponding bits in 418 * the kernel RBS. 419 */ 420 rnat_addr = ia64_rse_rnat_addr(laddr); 421 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); 422 423 if (laddr == rnat_addr) { 424 /* return NaT collection word itself */ 425 *val = ret; 426 return 0; 427 } 428 429 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { 430 /* 431 * It is implementation dependent whether the 432 * data portion of a NaT value gets saved on a 433 * st8.spill or RSE spill (e.g., see EAS 2.6, 434 * 4.4.4.6 Register Spill and Fill). To get 435 * consistent behavior across all possible 436 * IA-64 implementations, we return zero in 437 * this case. 438 */ 439 *val = 0; 440 return 0; 441 } 442 443 if (laddr < urbs_end) { 444 /* 445 * The desired word is on the kernel RBS and 446 * is not a NaT. 447 */ 448 regnum = ia64_rse_num_regs(bspstore, laddr); 449 *val = *ia64_rse_skip_regs(krbs, regnum); 450 return 0; 451 } 452 } 453 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0); 454 if (copied != sizeof(ret)) 455 return -EIO; 456 *val = ret; 457 return 0; 458 } 459 460 long 461 ia64_poke (struct task_struct *child, struct switch_stack *child_stack, 462 unsigned long user_rbs_end, unsigned long addr, long val) 463 { 464 unsigned long *bspstore, *krbs, regnum, *laddr; 465 unsigned long *urbs_end = (long *) user_rbs_end; 466 struct pt_regs *child_regs; 467 468 laddr = (unsigned long *) addr; 469 child_regs = task_pt_regs(child); 470 bspstore = (unsigned long *) child_regs->ar_bspstore; 471 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 472 if (on_kernel_rbs(addr, (unsigned long) bspstore, 473 (unsigned long) urbs_end)) 474 { 475 /* 476 * Attempt to write the RBS in an area that's actually 477 * on the kernel RBS => write the corresponding bits 478 * in the kernel RBS. 479 */ 480 if (ia64_rse_is_rnat_slot(laddr)) 481 put_rnat(child, child_stack, krbs, laddr, val, 482 urbs_end); 483 else { 484 if (laddr < urbs_end) { 485 regnum = ia64_rse_num_regs(bspstore, laddr); 486 *ia64_rse_skip_regs(krbs, regnum) = val; 487 } 488 } 489 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) 490 != sizeof(val)) 491 return -EIO; 492 return 0; 493 } 494 495 /* 496 * Calculate the address of the end of the user-level register backing 497 * store. This is the address that would have been stored in ar.bsp 498 * if the user had executed a "cover" instruction right before 499 * entering the kernel. If CFMP is not NULL, it is used to return the 500 * "current frame mask" that was active at the time the kernel was 501 * entered. 502 */ 503 unsigned long 504 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, 505 unsigned long *cfmp) 506 { 507 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; 508 long ndirty; 509 510 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 511 bspstore = (unsigned long *) pt->ar_bspstore; 512 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 513 514 if (in_syscall(pt)) 515 ndirty += (cfm & 0x7f); 516 else 517 cfm &= ~(1UL << 63); /* clear valid bit */ 518 519 if (cfmp) 520 *cfmp = cfm; 521 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); 522 } 523 524 /* 525 * Synchronize (i.e, write) the RSE backing store living in kernel 526 * space to the VM of the CHILD task. SW and PT are the pointers to 527 * the switch_stack and pt_regs structures, respectively. 528 * USER_RBS_END is the user-level address at which the backing store 529 * ends. 530 */ 531 long 532 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, 533 unsigned long user_rbs_start, unsigned long user_rbs_end) 534 { 535 unsigned long addr, val; 536 long ret; 537 538 /* now copy word for word from kernel rbs to user rbs: */ 539 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 540 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 541 if (ret < 0) 542 return ret; 543 if (access_process_vm(child, addr, &val, sizeof(val), 1) 544 != sizeof(val)) 545 return -EIO; 546 } 547 return 0; 548 } 549 550 static long 551 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, 552 unsigned long user_rbs_start, unsigned long user_rbs_end) 553 { 554 unsigned long addr, val; 555 long ret; 556 557 /* now copy word for word from user rbs to kernel rbs: */ 558 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 559 if (access_process_vm(child, addr, &val, sizeof(val), 0) 560 != sizeof(val)) 561 return -EIO; 562 563 ret = ia64_poke(child, sw, user_rbs_end, addr, val); 564 if (ret < 0) 565 return ret; 566 } 567 return 0; 568 } 569 570 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, 571 unsigned long, unsigned long); 572 573 static void do_sync_rbs(struct unw_frame_info *info, void *arg) 574 { 575 struct pt_regs *pt; 576 unsigned long urbs_end; 577 syncfunc_t fn = arg; 578 579 if (unw_unwind_to_user(info) < 0) 580 return; 581 pt = task_pt_regs(info->task); 582 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); 583 584 fn(info->task, info->sw, pt->ar_bspstore, urbs_end); 585 } 586 587 /* 588 * when a thread is stopped (ptraced), debugger might change thread's user 589 * stack (change memory directly), and we must avoid the RSE stored in kernel 590 * to override user stack (user space's RSE is newer than kernel's in the 591 * case). To workaround the issue, we copy kernel RSE to user RSE before the 592 * task is stopped, so user RSE has updated data. we then copy user RSE to 593 * kernel after the task is resummed from traced stop and kernel will use the 594 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need 595 * synchronize user RSE to kernel. 596 */ 597 void ia64_ptrace_stop(void) 598 { 599 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 600 return; 601 tsk_set_notify_resume(current); 602 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 603 } 604 605 /* 606 * This is called to read back the register backing store. 607 */ 608 void ia64_sync_krbs(void) 609 { 610 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 611 tsk_clear_notify_resume(current); 612 613 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 614 } 615 616 /* 617 * After PTRACE_ATTACH, a thread's register backing store area in user 618 * space is assumed to contain correct data whenever the thread is 619 * stopped. arch_ptrace_stop takes care of this on tracing stops. 620 * But if the child was already stopped for job control when we attach 621 * to it, then it might not ever get into ptrace_stop by the time we 622 * want to examine the user memory containing the RBS. 623 */ 624 void 625 ptrace_attach_sync_user_rbs (struct task_struct *child) 626 { 627 int stopped = 0; 628 struct unw_frame_info info; 629 630 /* 631 * If the child is in TASK_STOPPED, we need to change that to 632 * TASK_TRACED momentarily while we operate on it. This ensures 633 * that the child won't be woken up and return to user mode while 634 * we are doing the sync. (It can only be woken up for SIGKILL.) 635 */ 636 637 read_lock(&tasklist_lock); 638 if (child->signal) { 639 spin_lock_irq(&child->sighand->siglock); 640 if (child->state == TASK_STOPPED && 641 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 642 tsk_set_notify_resume(child); 643 644 child->state = TASK_TRACED; 645 stopped = 1; 646 } 647 spin_unlock_irq(&child->sighand->siglock); 648 } 649 read_unlock(&tasklist_lock); 650 651 if (!stopped) 652 return; 653 654 unw_init_from_blocked_task(&info, child); 655 do_sync_rbs(&info, ia64_sync_user_rbs); 656 657 /* 658 * Now move the child back into TASK_STOPPED if it should be in a 659 * job control stop, so that SIGCONT can be used to wake it up. 660 */ 661 read_lock(&tasklist_lock); 662 if (child->signal) { 663 spin_lock_irq(&child->sighand->siglock); 664 if (child->state == TASK_TRACED && 665 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 666 child->state = TASK_STOPPED; 667 } 668 spin_unlock_irq(&child->sighand->siglock); 669 } 670 read_unlock(&tasklist_lock); 671 } 672 673 static inline int 674 thread_matches (struct task_struct *thread, unsigned long addr) 675 { 676 unsigned long thread_rbs_end; 677 struct pt_regs *thread_regs; 678 679 if (ptrace_check_attach(thread, 0) < 0) 680 /* 681 * If the thread is not in an attachable state, we'll 682 * ignore it. The net effect is that if ADDR happens 683 * to overlap with the portion of the thread's 684 * register backing store that is currently residing 685 * on the thread's kernel stack, then ptrace() may end 686 * up accessing a stale value. But if the thread 687 * isn't stopped, that's a problem anyhow, so we're 688 * doing as well as we can... 689 */ 690 return 0; 691 692 thread_regs = task_pt_regs(thread); 693 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 694 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 695 return 0; 696 697 return 1; /* looks like we've got a winner */ 698 } 699 700 /* 701 * Write f32-f127 back to task->thread.fph if it has been modified. 702 */ 703 inline void 704 ia64_flush_fph (struct task_struct *task) 705 { 706 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 707 708 /* 709 * Prevent migrating this task while 710 * we're fiddling with the FPU state 711 */ 712 preempt_disable(); 713 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 714 psr->mfh = 0; 715 task->thread.flags |= IA64_THREAD_FPH_VALID; 716 ia64_save_fpu(&task->thread.fph[0]); 717 } 718 preempt_enable(); 719 } 720 721 /* 722 * Sync the fph state of the task so that it can be manipulated 723 * through thread.fph. If necessary, f32-f127 are written back to 724 * thread.fph or, if the fph state hasn't been used before, thread.fph 725 * is cleared to zeroes. Also, access to f32-f127 is disabled to 726 * ensure that the task picks up the state from thread.fph when it 727 * executes again. 728 */ 729 void 730 ia64_sync_fph (struct task_struct *task) 731 { 732 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 733 734 ia64_flush_fph(task); 735 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 736 task->thread.flags |= IA64_THREAD_FPH_VALID; 737 memset(&task->thread.fph, 0, sizeof(task->thread.fph)); 738 } 739 ia64_drop_fpu(task); 740 psr->dfh = 1; 741 } 742 743 static int 744 access_fr (struct unw_frame_info *info, int regnum, int hi, 745 unsigned long *data, int write_access) 746 { 747 struct ia64_fpreg fpval; 748 int ret; 749 750 ret = unw_get_fr(info, regnum, &fpval); 751 if (ret < 0) 752 return ret; 753 754 if (write_access) { 755 fpval.u.bits[hi] = *data; 756 ret = unw_set_fr(info, regnum, fpval); 757 } else 758 *data = fpval.u.bits[hi]; 759 return ret; 760 } 761 762 /* 763 * Change the machine-state of CHILD such that it will return via the normal 764 * kernel exit-path, rather than the syscall-exit path. 765 */ 766 static void 767 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, 768 unsigned long cfm) 769 { 770 struct unw_frame_info info, prev_info; 771 unsigned long ip, sp, pr; 772 773 unw_init_from_blocked_task(&info, child); 774 while (1) { 775 prev_info = info; 776 if (unw_unwind(&info) < 0) 777 return; 778 779 unw_get_sp(&info, &sp); 780 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 781 < IA64_PT_REGS_SIZE) { 782 dprintk("ptrace.%s: ran off the top of the kernel " 783 "stack\n", __func__); 784 return; 785 } 786 if (unw_get_pr (&prev_info, &pr) < 0) { 787 unw_get_rp(&prev_info, &ip); 788 dprintk("ptrace.%s: failed to read " 789 "predicate register (ip=0x%lx)\n", 790 __func__, ip); 791 return; 792 } 793 if (unw_is_intr_frame(&info) 794 && (pr & (1UL << PRED_USER_STACK))) 795 break; 796 } 797 798 /* 799 * Note: at the time of this call, the target task is blocked 800 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL 801 * (aka, "pLvSys") we redirect execution from 802 * .work_pending_syscall_end to .work_processed_kernel. 803 */ 804 unw_get_pr(&prev_info, &pr); 805 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); 806 pr |= (1UL << PRED_NON_SYSCALL); 807 unw_set_pr(&prev_info, pr); 808 809 pt->cr_ifs = (1UL << 63) | cfm; 810 /* 811 * Clear the memory that is NOT written on syscall-entry to 812 * ensure we do not leak kernel-state to user when execution 813 * resumes. 814 */ 815 pt->r2 = 0; 816 pt->r3 = 0; 817 pt->r14 = 0; 818 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ 819 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ 820 pt->b7 = 0; 821 pt->ar_ccv = 0; 822 pt->ar_csd = 0; 823 pt->ar_ssd = 0; 824 } 825 826 static int 827 access_nat_bits (struct task_struct *child, struct pt_regs *pt, 828 struct unw_frame_info *info, 829 unsigned long *data, int write_access) 830 { 831 unsigned long regnum, nat_bits, scratch_unat, dummy = 0; 832 char nat = 0; 833 834 if (write_access) { 835 nat_bits = *data; 836 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); 837 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { 838 dprintk("ptrace: failed to set ar.unat\n"); 839 return -1; 840 } 841 for (regnum = 4; regnum <= 7; ++regnum) { 842 unw_get_gr(info, regnum, &dummy, &nat); 843 unw_set_gr(info, regnum, dummy, 844 (nat_bits >> regnum) & 1); 845 } 846 } else { 847 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { 848 dprintk("ptrace: failed to read ar.unat\n"); 849 return -1; 850 } 851 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); 852 for (regnum = 4; regnum <= 7; ++regnum) { 853 unw_get_gr(info, regnum, &dummy, &nat); 854 nat_bits |= (nat != 0) << regnum; 855 } 856 *data = nat_bits; 857 } 858 return 0; 859 } 860 861 static int 862 access_uarea (struct task_struct *child, unsigned long addr, 863 unsigned long *data, int write_access) 864 { 865 unsigned long *ptr, regnum, urbs_end, cfm; 866 struct switch_stack *sw; 867 struct pt_regs *pt; 868 # define pt_reg_addr(pt, reg) ((void *) \ 869 ((unsigned long) (pt) \ 870 + offsetof(struct pt_regs, reg))) 871 872 873 pt = task_pt_regs(child); 874 sw = (struct switch_stack *) (child->thread.ksp + 16); 875 876 if ((addr & 0x7) != 0) { 877 dprintk("ptrace: unaligned register address 0x%lx\n", addr); 878 return -1; 879 } 880 881 if (addr < PT_F127 + 16) { 882 /* accessing fph */ 883 if (write_access) 884 ia64_sync_fph(child); 885 else 886 ia64_flush_fph(child); 887 ptr = (unsigned long *) 888 ((unsigned long) &child->thread.fph + addr); 889 } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) { 890 /* scratch registers untouched by kernel (saved in pt_regs) */ 891 ptr = pt_reg_addr(pt, f10) + (addr - PT_F10); 892 } else if (addr >= PT_F12 && addr < PT_F15 + 16) { 893 /* 894 * Scratch registers untouched by kernel (saved in 895 * switch_stack). 896 */ 897 ptr = (unsigned long *) ((long) sw 898 + (addr - PT_NAT_BITS - 32)); 899 } else if (addr < PT_AR_LC + 8) { 900 /* preserved state: */ 901 struct unw_frame_info info; 902 char nat = 0; 903 int ret; 904 905 unw_init_from_blocked_task(&info, child); 906 if (unw_unwind_to_user(&info) < 0) 907 return -1; 908 909 switch (addr) { 910 case PT_NAT_BITS: 911 return access_nat_bits(child, pt, &info, 912 data, write_access); 913 914 case PT_R4: case PT_R5: case PT_R6: case PT_R7: 915 if (write_access) { 916 /* read NaT bit first: */ 917 unsigned long dummy; 918 919 ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, 920 &dummy, &nat); 921 if (ret < 0) 922 return ret; 923 } 924 return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, 925 &nat, write_access); 926 927 case PT_B1: case PT_B2: case PT_B3: 928 case PT_B4: case PT_B5: 929 return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, 930 write_access); 931 932 case PT_AR_EC: 933 return unw_access_ar(&info, UNW_AR_EC, data, 934 write_access); 935 936 case PT_AR_LC: 937 return unw_access_ar(&info, UNW_AR_LC, data, 938 write_access); 939 940 default: 941 if (addr >= PT_F2 && addr < PT_F5 + 16) 942 return access_fr(&info, (addr - PT_F2)/16 + 2, 943 (addr & 8) != 0, data, 944 write_access); 945 else if (addr >= PT_F16 && addr < PT_F31 + 16) 946 return access_fr(&info, 947 (addr - PT_F16)/16 + 16, 948 (addr & 8) != 0, 949 data, write_access); 950 else { 951 dprintk("ptrace: rejecting access to register " 952 "address 0x%lx\n", addr); 953 return -1; 954 } 955 } 956 } else if (addr < PT_F9+16) { 957 /* scratch state */ 958 switch (addr) { 959 case PT_AR_BSP: 960 /* 961 * By convention, we use PT_AR_BSP to refer to 962 * the end of the user-level backing store. 963 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) 964 * to get the real value of ar.bsp at the time 965 * the kernel was entered. 966 * 967 * Furthermore, when changing the contents of 968 * PT_AR_BSP (or PT_CFM) while the task is 969 * blocked in a system call, convert the state 970 * so that the non-system-call exit 971 * path is used. This ensures that the proper 972 * state will be picked up when resuming 973 * execution. However, it *also* means that 974 * once we write PT_AR_BSP/PT_CFM, it won't be 975 * possible to modify the syscall arguments of 976 * the pending system call any longer. This 977 * shouldn't be an issue because modifying 978 * PT_AR_BSP/PT_CFM generally implies that 979 * we're either abandoning the pending system 980 * call or that we defer it's re-execution 981 * (e.g., due to GDB doing an inferior 982 * function call). 983 */ 984 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); 985 if (write_access) { 986 if (*data != urbs_end) { 987 if (in_syscall(pt)) 988 convert_to_non_syscall(child, 989 pt, 990 cfm); 991 /* 992 * Simulate user-level write 993 * of ar.bsp: 994 */ 995 pt->loadrs = 0; 996 pt->ar_bspstore = *data; 997 } 998 } else 999 *data = urbs_end; 1000 return 0; 1001 1002 case PT_CFM: 1003 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); 1004 if (write_access) { 1005 if (((cfm ^ *data) & PFM_MASK) != 0) { 1006 if (in_syscall(pt)) 1007 convert_to_non_syscall(child, 1008 pt, 1009 cfm); 1010 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) 1011 | (*data & PFM_MASK)); 1012 } 1013 } else 1014 *data = cfm; 1015 return 0; 1016 1017 case PT_CR_IPSR: 1018 if (write_access) { 1019 unsigned long tmp = *data; 1020 /* psr.ri==3 is a reserved value: SDM 2:25 */ 1021 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) 1022 tmp &= ~IA64_PSR_RI; 1023 pt->cr_ipsr = ((tmp & IPSR_MASK) 1024 | (pt->cr_ipsr & ~IPSR_MASK)); 1025 } else 1026 *data = (pt->cr_ipsr & IPSR_MASK); 1027 return 0; 1028 1029 case PT_AR_RSC: 1030 if (write_access) 1031 pt->ar_rsc = *data | (3 << 2); /* force PL3 */ 1032 else 1033 *data = pt->ar_rsc; 1034 return 0; 1035 1036 case PT_AR_RNAT: 1037 ptr = pt_reg_addr(pt, ar_rnat); 1038 break; 1039 case PT_R1: 1040 ptr = pt_reg_addr(pt, r1); 1041 break; 1042 case PT_R2: case PT_R3: 1043 ptr = pt_reg_addr(pt, r2) + (addr - PT_R2); 1044 break; 1045 case PT_R8: case PT_R9: case PT_R10: case PT_R11: 1046 ptr = pt_reg_addr(pt, r8) + (addr - PT_R8); 1047 break; 1048 case PT_R12: case PT_R13: 1049 ptr = pt_reg_addr(pt, r12) + (addr - PT_R12); 1050 break; 1051 case PT_R14: 1052 ptr = pt_reg_addr(pt, r14); 1053 break; 1054 case PT_R15: 1055 ptr = pt_reg_addr(pt, r15); 1056 break; 1057 case PT_R16: case PT_R17: case PT_R18: case PT_R19: 1058 case PT_R20: case PT_R21: case PT_R22: case PT_R23: 1059 case PT_R24: case PT_R25: case PT_R26: case PT_R27: 1060 case PT_R28: case PT_R29: case PT_R30: case PT_R31: 1061 ptr = pt_reg_addr(pt, r16) + (addr - PT_R16); 1062 break; 1063 case PT_B0: 1064 ptr = pt_reg_addr(pt, b0); 1065 break; 1066 case PT_B6: 1067 ptr = pt_reg_addr(pt, b6); 1068 break; 1069 case PT_B7: 1070 ptr = pt_reg_addr(pt, b7); 1071 break; 1072 case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8: 1073 case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8: 1074 ptr = pt_reg_addr(pt, f6) + (addr - PT_F6); 1075 break; 1076 case PT_AR_BSPSTORE: 1077 ptr = pt_reg_addr(pt, ar_bspstore); 1078 break; 1079 case PT_AR_UNAT: 1080 ptr = pt_reg_addr(pt, ar_unat); 1081 break; 1082 case PT_AR_PFS: 1083 ptr = pt_reg_addr(pt, ar_pfs); 1084 break; 1085 case PT_AR_CCV: 1086 ptr = pt_reg_addr(pt, ar_ccv); 1087 break; 1088 case PT_AR_FPSR: 1089 ptr = pt_reg_addr(pt, ar_fpsr); 1090 break; 1091 case PT_CR_IIP: 1092 ptr = pt_reg_addr(pt, cr_iip); 1093 break; 1094 case PT_PR: 1095 ptr = pt_reg_addr(pt, pr); 1096 break; 1097 /* scratch register */ 1098 1099 default: 1100 /* disallow accessing anything else... */ 1101 dprintk("ptrace: rejecting access to register " 1102 "address 0x%lx\n", addr); 1103 return -1; 1104 } 1105 } else if (addr <= PT_AR_SSD) { 1106 ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD); 1107 } else { 1108 /* access debug registers */ 1109 1110 if (addr >= PT_IBR) { 1111 regnum = (addr - PT_IBR) >> 3; 1112 ptr = &child->thread.ibr[0]; 1113 } else { 1114 regnum = (addr - PT_DBR) >> 3; 1115 ptr = &child->thread.dbr[0]; 1116 } 1117 1118 if (regnum >= 8) { 1119 dprintk("ptrace: rejecting access to register " 1120 "address 0x%lx\n", addr); 1121 return -1; 1122 } 1123 #ifdef CONFIG_PERFMON 1124 /* 1125 * Check if debug registers are used by perfmon. This 1126 * test must be done once we know that we can do the 1127 * operation, i.e. the arguments are all valid, but 1128 * before we start modifying the state. 1129 * 1130 * Perfmon needs to keep a count of how many processes 1131 * are trying to modify the debug registers for system 1132 * wide monitoring sessions. 1133 * 1134 * We also include read access here, because they may 1135 * cause the PMU-installed debug register state 1136 * (dbr[], ibr[]) to be reset. The two arrays are also 1137 * used by perfmon, but we do not use 1138 * IA64_THREAD_DBG_VALID. The registers are restored 1139 * by the PMU context switch code. 1140 */ 1141 if (pfm_use_debug_registers(child)) return -1; 1142 #endif 1143 1144 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { 1145 child->thread.flags |= IA64_THREAD_DBG_VALID; 1146 memset(child->thread.dbr, 0, 1147 sizeof(child->thread.dbr)); 1148 memset(child->thread.ibr, 0, 1149 sizeof(child->thread.ibr)); 1150 } 1151 1152 ptr += regnum; 1153 1154 if ((regnum & 1) && write_access) { 1155 /* don't let the user set kernel-level breakpoints: */ 1156 *ptr = *data & ~(7UL << 56); 1157 return 0; 1158 } 1159 } 1160 if (write_access) 1161 *ptr = *data; 1162 else 1163 *data = *ptr; 1164 return 0; 1165 } 1166 1167 static long 1168 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 1169 { 1170 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; 1171 struct unw_frame_info info; 1172 struct ia64_fpreg fpval; 1173 struct switch_stack *sw; 1174 struct pt_regs *pt; 1175 long ret, retval = 0; 1176 char nat = 0; 1177 int i; 1178 1179 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 1180 return -EIO; 1181 1182 pt = task_pt_regs(child); 1183 sw = (struct switch_stack *) (child->thread.ksp + 16); 1184 unw_init_from_blocked_task(&info, child); 1185 if (unw_unwind_to_user(&info) < 0) { 1186 return -EIO; 1187 } 1188 1189 if (((unsigned long) ppr & 0x7) != 0) { 1190 dprintk("ptrace:unaligned register address %p\n", ppr); 1191 return -EIO; 1192 } 1193 1194 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 1195 || access_uarea(child, PT_AR_EC, &ec, 0) < 0 1196 || access_uarea(child, PT_AR_LC, &lc, 0) < 0 1197 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 1198 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 1199 || access_uarea(child, PT_CFM, &cfm, 0) 1200 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) 1201 return -EIO; 1202 1203 /* control regs */ 1204 1205 retval |= __put_user(pt->cr_iip, &ppr->cr_iip); 1206 retval |= __put_user(psr, &ppr->cr_ipsr); 1207 1208 /* app regs */ 1209 1210 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 1211 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); 1212 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 1213 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 1214 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 1215 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 1216 1217 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); 1218 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); 1219 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); 1220 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); 1221 retval |= __put_user(cfm, &ppr->cfm); 1222 1223 /* gr1-gr3 */ 1224 1225 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); 1226 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); 1227 1228 /* gr4-gr7 */ 1229 1230 for (i = 4; i < 8; i++) { 1231 if (unw_access_gr(&info, i, &val, &nat, 0) < 0) 1232 return -EIO; 1233 retval |= __put_user(val, &ppr->gr[i]); 1234 } 1235 1236 /* gr8-gr11 */ 1237 1238 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); 1239 1240 /* gr12-gr15 */ 1241 1242 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); 1243 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); 1244 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); 1245 1246 /* gr16-gr31 */ 1247 1248 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); 1249 1250 /* b0 */ 1251 1252 retval |= __put_user(pt->b0, &ppr->br[0]); 1253 1254 /* b1-b5 */ 1255 1256 for (i = 1; i < 6; i++) { 1257 if (unw_access_br(&info, i, &val, 0) < 0) 1258 return -EIO; 1259 __put_user(val, &ppr->br[i]); 1260 } 1261 1262 /* b6-b7 */ 1263 1264 retval |= __put_user(pt->b6, &ppr->br[6]); 1265 retval |= __put_user(pt->b7, &ppr->br[7]); 1266 1267 /* fr2-fr5 */ 1268 1269 for (i = 2; i < 6; i++) { 1270 if (unw_get_fr(&info, i, &fpval) < 0) 1271 return -EIO; 1272 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 1273 } 1274 1275 /* fr6-fr11 */ 1276 1277 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, 1278 sizeof(struct ia64_fpreg) * 6); 1279 1280 /* fp scratch regs(12-15) */ 1281 1282 retval |= __copy_to_user(&ppr->fr[12], &sw->f12, 1283 sizeof(struct ia64_fpreg) * 4); 1284 1285 /* fr16-fr31 */ 1286 1287 for (i = 16; i < 32; i++) { 1288 if (unw_get_fr(&info, i, &fpval) < 0) 1289 return -EIO; 1290 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 1291 } 1292 1293 /* fph */ 1294 1295 ia64_flush_fph(child); 1296 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, 1297 sizeof(ppr->fr[32]) * 96); 1298 1299 /* preds */ 1300 1301 retval |= __put_user(pt->pr, &ppr->pr); 1302 1303 /* nat bits */ 1304 1305 retval |= __put_user(nat_bits, &ppr->nat); 1306 1307 ret = retval ? -EIO : 0; 1308 return ret; 1309 } 1310 1311 static long 1312 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 1313 { 1314 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; 1315 struct unw_frame_info info; 1316 struct switch_stack *sw; 1317 struct ia64_fpreg fpval; 1318 struct pt_regs *pt; 1319 long ret, retval = 0; 1320 int i; 1321 1322 memset(&fpval, 0, sizeof(fpval)); 1323 1324 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 1325 return -EIO; 1326 1327 pt = task_pt_regs(child); 1328 sw = (struct switch_stack *) (child->thread.ksp + 16); 1329 unw_init_from_blocked_task(&info, child); 1330 if (unw_unwind_to_user(&info) < 0) { 1331 return -EIO; 1332 } 1333 1334 if (((unsigned long) ppr & 0x7) != 0) { 1335 dprintk("ptrace:unaligned register address %p\n", ppr); 1336 return -EIO; 1337 } 1338 1339 /* control regs */ 1340 1341 retval |= __get_user(pt->cr_iip, &ppr->cr_iip); 1342 retval |= __get_user(psr, &ppr->cr_ipsr); 1343 1344 /* app regs */ 1345 1346 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 1347 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); 1348 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 1349 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 1350 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 1351 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 1352 1353 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); 1354 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); 1355 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); 1356 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); 1357 retval |= __get_user(cfm, &ppr->cfm); 1358 1359 /* gr1-gr3 */ 1360 1361 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); 1362 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); 1363 1364 /* gr4-gr7 */ 1365 1366 for (i = 4; i < 8; i++) { 1367 retval |= __get_user(val, &ppr->gr[i]); 1368 /* NaT bit will be set via PT_NAT_BITS: */ 1369 if (unw_set_gr(&info, i, val, 0) < 0) 1370 return -EIO; 1371 } 1372 1373 /* gr8-gr11 */ 1374 1375 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); 1376 1377 /* gr12-gr15 */ 1378 1379 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); 1380 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); 1381 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); 1382 1383 /* gr16-gr31 */ 1384 1385 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); 1386 1387 /* b0 */ 1388 1389 retval |= __get_user(pt->b0, &ppr->br[0]); 1390 1391 /* b1-b5 */ 1392 1393 for (i = 1; i < 6; i++) { 1394 retval |= __get_user(val, &ppr->br[i]); 1395 unw_set_br(&info, i, val); 1396 } 1397 1398 /* b6-b7 */ 1399 1400 retval |= __get_user(pt->b6, &ppr->br[6]); 1401 retval |= __get_user(pt->b7, &ppr->br[7]); 1402 1403 /* fr2-fr5 */ 1404 1405 for (i = 2; i < 6; i++) { 1406 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); 1407 if (unw_set_fr(&info, i, fpval) < 0) 1408 return -EIO; 1409 } 1410 1411 /* fr6-fr11 */ 1412 1413 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], 1414 sizeof(ppr->fr[6]) * 6); 1415 1416 /* fp scratch regs(12-15) */ 1417 1418 retval |= __copy_from_user(&sw->f12, &ppr->fr[12], 1419 sizeof(ppr->fr[12]) * 4); 1420 1421 /* fr16-fr31 */ 1422 1423 for (i = 16; i < 32; i++) { 1424 retval |= __copy_from_user(&fpval, &ppr->fr[i], 1425 sizeof(fpval)); 1426 if (unw_set_fr(&info, i, fpval) < 0) 1427 return -EIO; 1428 } 1429 1430 /* fph */ 1431 1432 ia64_sync_fph(child); 1433 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], 1434 sizeof(ppr->fr[32]) * 96); 1435 1436 /* preds */ 1437 1438 retval |= __get_user(pt->pr, &ppr->pr); 1439 1440 /* nat bits */ 1441 1442 retval |= __get_user(nat_bits, &ppr->nat); 1443 1444 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); 1445 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); 1446 retval |= access_uarea(child, PT_AR_EC, &ec, 1); 1447 retval |= access_uarea(child, PT_AR_LC, &lc, 1); 1448 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); 1449 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); 1450 retval |= access_uarea(child, PT_CFM, &cfm, 1); 1451 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); 1452 1453 ret = retval ? -EIO : 0; 1454 return ret; 1455 } 1456 1457 void 1458 user_enable_single_step (struct task_struct *child) 1459 { 1460 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1461 1462 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1463 child_psr->ss = 1; 1464 } 1465 1466 void 1467 user_enable_block_step (struct task_struct *child) 1468 { 1469 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1470 1471 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1472 child_psr->tb = 1; 1473 } 1474 1475 void 1476 user_disable_single_step (struct task_struct *child) 1477 { 1478 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1479 1480 /* make sure the single step/taken-branch trap bits are not set: */ 1481 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 1482 child_psr->ss = 0; 1483 child_psr->tb = 0; 1484 } 1485 1486 /* 1487 * Called by kernel/ptrace.c when detaching.. 1488 * 1489 * Make sure the single step bit is not set. 1490 */ 1491 void 1492 ptrace_disable (struct task_struct *child) 1493 { 1494 user_disable_single_step(child); 1495 } 1496 1497 long 1498 arch_ptrace (struct task_struct *child, long request, long addr, long data) 1499 { 1500 switch (request) { 1501 case PTRACE_PEEKTEXT: 1502 case PTRACE_PEEKDATA: 1503 /* read word at location addr */ 1504 if (access_process_vm(child, addr, &data, sizeof(data), 0) 1505 != sizeof(data)) 1506 return -EIO; 1507 /* ensure return value is not mistaken for error code */ 1508 force_successful_syscall_return(); 1509 return data; 1510 1511 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled 1512 * by the generic ptrace_request(). 1513 */ 1514 1515 case PTRACE_PEEKUSR: 1516 /* read the word at addr in the USER area */ 1517 if (access_uarea(child, addr, &data, 0) < 0) 1518 return -EIO; 1519 /* ensure return value is not mistaken for error code */ 1520 force_successful_syscall_return(); 1521 return data; 1522 1523 case PTRACE_POKEUSR: 1524 /* write the word at addr in the USER area */ 1525 if (access_uarea(child, addr, &data, 1) < 0) 1526 return -EIO; 1527 return 0; 1528 1529 case PTRACE_OLD_GETSIGINFO: 1530 /* for backwards-compatibility */ 1531 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); 1532 1533 case PTRACE_OLD_SETSIGINFO: 1534 /* for backwards-compatibility */ 1535 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); 1536 1537 case PTRACE_GETREGS: 1538 return ptrace_getregs(child, 1539 (struct pt_all_user_regs __user *) data); 1540 1541 case PTRACE_SETREGS: 1542 return ptrace_setregs(child, 1543 (struct pt_all_user_regs __user *) data); 1544 1545 default: 1546 return ptrace_request(child, request, addr, data); 1547 } 1548 } 1549 1550 1551 static void 1552 syscall_trace (void) 1553 { 1554 /* 1555 * The 0x80 provides a way for the tracing parent to 1556 * distinguish between a syscall stop and SIGTRAP delivery. 1557 */ 1558 ptrace_notify(SIGTRAP 1559 | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); 1560 1561 /* 1562 * This isn't the same as continuing with a signal, but it 1563 * will do for normal use. strace only continues with a 1564 * signal if the stopping signal is not SIGTRAP. -brl 1565 */ 1566 if (current->exit_code) { 1567 send_sig(current->exit_code, current, 1); 1568 current->exit_code = 0; 1569 } 1570 } 1571 1572 /* "asmlinkage" so the input arguments are preserved... */ 1573 1574 asmlinkage void 1575 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1576 long arg4, long arg5, long arg6, long arg7, 1577 struct pt_regs regs) 1578 { 1579 if (test_thread_flag(TIF_SYSCALL_TRACE) 1580 && (current->ptrace & PT_PTRACED)) 1581 syscall_trace(); 1582 1583 /* copy user rbs to kernel rbs */ 1584 if (test_thread_flag(TIF_RESTORE_RSE)) 1585 ia64_sync_krbs(); 1586 1587 if (unlikely(current->audit_context)) { 1588 long syscall; 1589 int arch; 1590 1591 if (IS_IA32_PROCESS(®s)) { 1592 syscall = regs.r1; 1593 arch = AUDIT_ARCH_I386; 1594 } else { 1595 syscall = regs.r15; 1596 arch = AUDIT_ARCH_IA64; 1597 } 1598 1599 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); 1600 } 1601 1602 } 1603 1604 /* "asmlinkage" so the input arguments are preserved... */ 1605 1606 asmlinkage void 1607 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, 1608 long arg4, long arg5, long arg6, long arg7, 1609 struct pt_regs regs) 1610 { 1611 if (unlikely(current->audit_context)) { 1612 int success = AUDITSC_RESULT(regs.r10); 1613 long result = regs.r8; 1614 1615 if (success != AUDITSC_SUCCESS) 1616 result = -result; 1617 audit_syscall_exit(success, result); 1618 } 1619 1620 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1621 || test_thread_flag(TIF_SINGLESTEP)) 1622 && (current->ptrace & PT_PTRACED)) 1623 syscall_trace(); 1624 1625 /* copy user rbs to kernel rbs */ 1626 if (test_thread_flag(TIF_RESTORE_RSE)) 1627 ia64_sync_krbs(); 1628 } 1629