1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel support for the ptrace() and syscall tracing interfaces. 4 * 5 * Copyright (C) 1999-2005 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 * Copyright (C) 2006 Intel Co 8 * 2006-08-12 - IA64 Native Utrace implementation support added by 9 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 10 * 11 * Derived from the x86 and Alpha versions. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 #include <linux/sched/task.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/mm.h> 18 #include <linux/errno.h> 19 #include <linux/ptrace.h> 20 #include <linux/user.h> 21 #include <linux/security.h> 22 #include <linux/audit.h> 23 #include <linux/signal.h> 24 #include <linux/regset.h> 25 #include <linux/elf.h> 26 #include <linux/tracehook.h> 27 28 #include <asm/processor.h> 29 #include <asm/ptrace_offsets.h> 30 #include <asm/rse.h> 31 #include <linux/uaccess.h> 32 #include <asm/unwind.h> 33 #ifdef CONFIG_PERFMON 34 #include <asm/perfmon.h> 35 #endif 36 37 #include "entry.h" 38 39 /* 40 * Bits in the PSR that we allow ptrace() to change: 41 * be, up, ac, mfl, mfh (the user mask; five bits total) 42 * db (debug breakpoint fault; one bit) 43 * id (instruction debug fault disable; one bit) 44 * dd (data debug fault disable; one bit) 45 * ri (restart instruction; two bits) 46 * is (instruction set; one bit) 47 */ 48 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ 49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) 50 51 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ 52 #define PFM_MASK MASK(38) 53 54 #define PTRACE_DEBUG 0 55 56 #if PTRACE_DEBUG 57 # define dprintk(format...) printk(format) 58 # define inline 59 #else 60 # define dprintk(format...) 61 #endif 62 63 /* Return TRUE if PT was created due to kernel-entry via a system-call. */ 64 65 static inline int 66 in_syscall (struct pt_regs *pt) 67 { 68 return (long) pt->cr_ifs >= 0; 69 } 70 71 /* 72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT 73 * bitset where bit i is set iff the NaT bit of register i is set. 74 */ 75 unsigned long 76 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) 77 { 78 # define GET_BITS(first, last, unat) \ 79 ({ \ 80 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 81 unsigned long nbits = (last - first + 1); \ 82 unsigned long mask = MASK(nbits) << first; \ 83 unsigned long dist; \ 84 if (bit < first) \ 85 dist = 64 + bit - first; \ 86 else \ 87 dist = bit - first; \ 88 ia64_rotr(unat, dist) & mask; \ 89 }) 90 unsigned long val; 91 92 /* 93 * Registers that are stored consecutively in struct pt_regs 94 * can be handled in parallel. If the register order in 95 * struct_pt_regs changes, this code MUST be updated. 96 */ 97 val = GET_BITS( 1, 1, scratch_unat); 98 val |= GET_BITS( 2, 3, scratch_unat); 99 val |= GET_BITS(12, 13, scratch_unat); 100 val |= GET_BITS(14, 14, scratch_unat); 101 val |= GET_BITS(15, 15, scratch_unat); 102 val |= GET_BITS( 8, 11, scratch_unat); 103 val |= GET_BITS(16, 31, scratch_unat); 104 return val; 105 106 # undef GET_BITS 107 } 108 109 /* 110 * Set the NaT bits for the scratch registers according to NAT and 111 * return the resulting unat (assuming the scratch registers are 112 * stored in PT). 113 */ 114 unsigned long 115 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) 116 { 117 # define PUT_BITS(first, last, nat) \ 118 ({ \ 119 unsigned long bit = ia64_unat_pos(&pt->r##first); \ 120 unsigned long nbits = (last - first + 1); \ 121 unsigned long mask = MASK(nbits) << first; \ 122 long dist; \ 123 if (bit < first) \ 124 dist = 64 + bit - first; \ 125 else \ 126 dist = bit - first; \ 127 ia64_rotl(nat & mask, dist); \ 128 }) 129 unsigned long scratch_unat; 130 131 /* 132 * Registers that are stored consecutively in struct pt_regs 133 * can be handled in parallel. If the register order in 134 * struct_pt_regs changes, this code MUST be updated. 135 */ 136 scratch_unat = PUT_BITS( 1, 1, nat); 137 scratch_unat |= PUT_BITS( 2, 3, nat); 138 scratch_unat |= PUT_BITS(12, 13, nat); 139 scratch_unat |= PUT_BITS(14, 14, nat); 140 scratch_unat |= PUT_BITS(15, 15, nat); 141 scratch_unat |= PUT_BITS( 8, 11, nat); 142 scratch_unat |= PUT_BITS(16, 31, nat); 143 144 return scratch_unat; 145 146 # undef PUT_BITS 147 } 148 149 #define IA64_MLX_TEMPLATE 0x2 150 #define IA64_MOVL_OPCODE 6 151 152 void 153 ia64_increment_ip (struct pt_regs *regs) 154 { 155 unsigned long w0, ri = ia64_psr(regs)->ri + 1; 156 157 if (ri > 2) { 158 ri = 0; 159 regs->cr_iip += 16; 160 } else if (ri == 2) { 161 get_user(w0, (char __user *) regs->cr_iip + 0); 162 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 163 /* 164 * rfi'ing to slot 2 of an MLX bundle causes 165 * an illegal operation fault. We don't want 166 * that to happen... 167 */ 168 ri = 0; 169 regs->cr_iip += 16; 170 } 171 } 172 ia64_psr(regs)->ri = ri; 173 } 174 175 void 176 ia64_decrement_ip (struct pt_regs *regs) 177 { 178 unsigned long w0, ri = ia64_psr(regs)->ri - 1; 179 180 if (ia64_psr(regs)->ri == 0) { 181 regs->cr_iip -= 16; 182 ri = 2; 183 get_user(w0, (char __user *) regs->cr_iip + 0); 184 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { 185 /* 186 * rfi'ing to slot 2 of an MLX bundle causes 187 * an illegal operation fault. We don't want 188 * that to happen... 189 */ 190 ri = 1; 191 } 192 } 193 ia64_psr(regs)->ri = ri; 194 } 195 196 /* 197 * This routine is used to read an rnat bits that are stored on the 198 * kernel backing store. Since, in general, the alignment of the user 199 * and kernel are different, this is not completely trivial. In 200 * essence, we need to construct the user RNAT based on up to two 201 * kernel RNAT values and/or the RNAT value saved in the child's 202 * pt_regs. 203 * 204 * user rbs 205 * 206 * +--------+ <-- lowest address 207 * | slot62 | 208 * +--------+ 209 * | rnat | 0x....1f8 210 * +--------+ 211 * | slot00 | \ 212 * +--------+ | 213 * | slot01 | > child_regs->ar_rnat 214 * +--------+ | 215 * | slot02 | / kernel rbs 216 * +--------+ +--------+ 217 * <- child_regs->ar_bspstore | slot61 | <-- krbs 218 * +- - - - + +--------+ 219 * | slot62 | 220 * +- - - - + +--------+ 221 * | rnat | 222 * +- - - - + +--------+ 223 * vrnat | slot00 | 224 * +- - - - + +--------+ 225 * = = 226 * +--------+ 227 * | slot00 | \ 228 * +--------+ | 229 * | slot01 | > child_stack->ar_rnat 230 * +--------+ | 231 * | slot02 | / 232 * +--------+ 233 * <--- child_stack->ar_bspstore 234 * 235 * The way to think of this code is as follows: bit 0 in the user rnat 236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat 237 * value. The kernel rnat value holding this bit is stored in 238 * variable rnat0. rnat1 is loaded with the kernel rnat value that 239 * form the upper bits of the user rnat value. 240 * 241 * Boundary cases: 242 * 243 * o when reading the rnat "below" the first rnat slot on the kernel 244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are 245 * merged in from pt->ar_rnat. 246 * 247 * o when reading the rnat "above" the last rnat slot on the kernel 248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. 249 */ 250 static unsigned long 251 get_rnat (struct task_struct *task, struct switch_stack *sw, 252 unsigned long *krbs, unsigned long *urnat_addr, 253 unsigned long *urbs_end) 254 { 255 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; 256 unsigned long umask = 0, mask, m; 257 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 258 long num_regs, nbits; 259 struct pt_regs *pt; 260 261 pt = task_pt_regs(task); 262 kbsp = (unsigned long *) sw->ar_bspstore; 263 ubspstore = (unsigned long *) pt->ar_bspstore; 264 265 if (urbs_end < urnat_addr) 266 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); 267 else 268 nbits = 63; 269 mask = MASK(nbits); 270 /* 271 * First, figure out which bit number slot 0 in user-land maps 272 * to in the kernel rnat. Do this by figuring out how many 273 * register slots we're beyond the user's backingstore and 274 * then computing the equivalent address in kernel space. 275 */ 276 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 277 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 278 shift = ia64_rse_slot_num(slot0_kaddr); 279 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 280 rnat0_kaddr = rnat1_kaddr - 64; 281 282 if (ubspstore + 63 > urnat_addr) { 283 /* some bits need to be merged in from pt->ar_rnat */ 284 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 285 urnat = (pt->ar_rnat & umask); 286 mask &= ~umask; 287 if (!mask) 288 return urnat; 289 } 290 291 m = mask << shift; 292 if (rnat0_kaddr >= kbsp) 293 rnat0 = sw->ar_rnat; 294 else if (rnat0_kaddr > krbs) 295 rnat0 = *rnat0_kaddr; 296 urnat |= (rnat0 & m) >> shift; 297 298 m = mask >> (63 - shift); 299 if (rnat1_kaddr >= kbsp) 300 rnat1 = sw->ar_rnat; 301 else if (rnat1_kaddr > krbs) 302 rnat1 = *rnat1_kaddr; 303 urnat |= (rnat1 & m) << (63 - shift); 304 return urnat; 305 } 306 307 /* 308 * The reverse of get_rnat. 309 */ 310 static void 311 put_rnat (struct task_struct *task, struct switch_stack *sw, 312 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, 313 unsigned long *urbs_end) 314 { 315 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; 316 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; 317 long num_regs, nbits; 318 struct pt_regs *pt; 319 unsigned long cfm, *urbs_kargs; 320 321 pt = task_pt_regs(task); 322 kbsp = (unsigned long *) sw->ar_bspstore; 323 ubspstore = (unsigned long *) pt->ar_bspstore; 324 325 urbs_kargs = urbs_end; 326 if (in_syscall(pt)) { 327 /* 328 * If entered via syscall, don't allow user to set rnat bits 329 * for syscall args. 330 */ 331 cfm = pt->cr_ifs; 332 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); 333 } 334 335 if (urbs_kargs >= urnat_addr) 336 nbits = 63; 337 else { 338 if ((urnat_addr - 63) >= urbs_kargs) 339 return; 340 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); 341 } 342 mask = MASK(nbits); 343 344 /* 345 * First, figure out which bit number slot 0 in user-land maps 346 * to in the kernel rnat. Do this by figuring out how many 347 * register slots we're beyond the user's backingstore and 348 * then computing the equivalent address in kernel space. 349 */ 350 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); 351 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); 352 shift = ia64_rse_slot_num(slot0_kaddr); 353 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); 354 rnat0_kaddr = rnat1_kaddr - 64; 355 356 if (ubspstore + 63 > urnat_addr) { 357 /* some bits need to be place in pt->ar_rnat: */ 358 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; 359 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); 360 mask &= ~umask; 361 if (!mask) 362 return; 363 } 364 /* 365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an 366 * rnat slot is ignored. so we don't have to clear it here. 367 */ 368 rnat0 = (urnat << shift); 369 m = mask << shift; 370 if (rnat0_kaddr >= kbsp) 371 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); 372 else if (rnat0_kaddr > krbs) 373 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); 374 375 rnat1 = (urnat >> (63 - shift)); 376 m = mask >> (63 - shift); 377 if (rnat1_kaddr >= kbsp) 378 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); 379 else if (rnat1_kaddr > krbs) 380 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); 381 } 382 383 static inline int 384 on_kernel_rbs (unsigned long addr, unsigned long bspstore, 385 unsigned long urbs_end) 386 { 387 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) 388 urbs_end); 389 return (addr >= bspstore && addr <= (unsigned long) rnat_addr); 390 } 391 392 /* 393 * Read a word from the user-level backing store of task CHILD. ADDR 394 * is the user-level address to read the word from, VAL a pointer to 395 * the return value, and USER_BSP gives the end of the user-level 396 * backing store (i.e., it's the address that would be in ar.bsp after 397 * the user executed a "cover" instruction). 398 * 399 * This routine takes care of accessing the kernel register backing 400 * store for those registers that got spilled there. It also takes 401 * care of calculating the appropriate RNaT collection words. 402 */ 403 long 404 ia64_peek (struct task_struct *child, struct switch_stack *child_stack, 405 unsigned long user_rbs_end, unsigned long addr, long *val) 406 { 407 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; 408 struct pt_regs *child_regs; 409 size_t copied; 410 long ret; 411 412 urbs_end = (long *) user_rbs_end; 413 laddr = (unsigned long *) addr; 414 child_regs = task_pt_regs(child); 415 bspstore = (unsigned long *) child_regs->ar_bspstore; 416 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 417 if (on_kernel_rbs(addr, (unsigned long) bspstore, 418 (unsigned long) urbs_end)) 419 { 420 /* 421 * Attempt to read the RBS in an area that's actually 422 * on the kernel RBS => read the corresponding bits in 423 * the kernel RBS. 424 */ 425 rnat_addr = ia64_rse_rnat_addr(laddr); 426 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); 427 428 if (laddr == rnat_addr) { 429 /* return NaT collection word itself */ 430 *val = ret; 431 return 0; 432 } 433 434 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { 435 /* 436 * It is implementation dependent whether the 437 * data portion of a NaT value gets saved on a 438 * st8.spill or RSE spill (e.g., see EAS 2.6, 439 * 4.4.4.6 Register Spill and Fill). To get 440 * consistent behavior across all possible 441 * IA-64 implementations, we return zero in 442 * this case. 443 */ 444 *val = 0; 445 return 0; 446 } 447 448 if (laddr < urbs_end) { 449 /* 450 * The desired word is on the kernel RBS and 451 * is not a NaT. 452 */ 453 regnum = ia64_rse_num_regs(bspstore, laddr); 454 *val = *ia64_rse_skip_regs(krbs, regnum); 455 return 0; 456 } 457 } 458 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE); 459 if (copied != sizeof(ret)) 460 return -EIO; 461 *val = ret; 462 return 0; 463 } 464 465 long 466 ia64_poke (struct task_struct *child, struct switch_stack *child_stack, 467 unsigned long user_rbs_end, unsigned long addr, long val) 468 { 469 unsigned long *bspstore, *krbs, regnum, *laddr; 470 unsigned long *urbs_end = (long *) user_rbs_end; 471 struct pt_regs *child_regs; 472 473 laddr = (unsigned long *) addr; 474 child_regs = task_pt_regs(child); 475 bspstore = (unsigned long *) child_regs->ar_bspstore; 476 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 477 if (on_kernel_rbs(addr, (unsigned long) bspstore, 478 (unsigned long) urbs_end)) 479 { 480 /* 481 * Attempt to write the RBS in an area that's actually 482 * on the kernel RBS => write the corresponding bits 483 * in the kernel RBS. 484 */ 485 if (ia64_rse_is_rnat_slot(laddr)) 486 put_rnat(child, child_stack, krbs, laddr, val, 487 urbs_end); 488 else { 489 if (laddr < urbs_end) { 490 regnum = ia64_rse_num_regs(bspstore, laddr); 491 *ia64_rse_skip_regs(krbs, regnum) = val; 492 } 493 } 494 } else if (access_process_vm(child, addr, &val, sizeof(val), 495 FOLL_FORCE | FOLL_WRITE) 496 != sizeof(val)) 497 return -EIO; 498 return 0; 499 } 500 501 /* 502 * Calculate the address of the end of the user-level register backing 503 * store. This is the address that would have been stored in ar.bsp 504 * if the user had executed a "cover" instruction right before 505 * entering the kernel. If CFMP is not NULL, it is used to return the 506 * "current frame mask" that was active at the time the kernel was 507 * entered. 508 */ 509 unsigned long 510 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, 511 unsigned long *cfmp) 512 { 513 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; 514 long ndirty; 515 516 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 517 bspstore = (unsigned long *) pt->ar_bspstore; 518 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 519 520 if (in_syscall(pt)) 521 ndirty += (cfm & 0x7f); 522 else 523 cfm &= ~(1UL << 63); /* clear valid bit */ 524 525 if (cfmp) 526 *cfmp = cfm; 527 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); 528 } 529 530 /* 531 * Synchronize (i.e, write) the RSE backing store living in kernel 532 * space to the VM of the CHILD task. SW and PT are the pointers to 533 * the switch_stack and pt_regs structures, respectively. 534 * USER_RBS_END is the user-level address at which the backing store 535 * ends. 536 */ 537 long 538 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, 539 unsigned long user_rbs_start, unsigned long user_rbs_end) 540 { 541 unsigned long addr, val; 542 long ret; 543 544 /* now copy word for word from kernel rbs to user rbs: */ 545 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 546 ret = ia64_peek(child, sw, user_rbs_end, addr, &val); 547 if (ret < 0) 548 return ret; 549 if (access_process_vm(child, addr, &val, sizeof(val), 550 FOLL_FORCE | FOLL_WRITE) 551 != sizeof(val)) 552 return -EIO; 553 } 554 return 0; 555 } 556 557 static long 558 ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, 559 unsigned long user_rbs_start, unsigned long user_rbs_end) 560 { 561 unsigned long addr, val; 562 long ret; 563 564 /* now copy word for word from user rbs to kernel rbs: */ 565 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 566 if (access_process_vm(child, addr, &val, sizeof(val), 567 FOLL_FORCE) 568 != sizeof(val)) 569 return -EIO; 570 571 ret = ia64_poke(child, sw, user_rbs_end, addr, val); 572 if (ret < 0) 573 return ret; 574 } 575 return 0; 576 } 577 578 typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, 579 unsigned long, unsigned long); 580 581 static void do_sync_rbs(struct unw_frame_info *info, void *arg) 582 { 583 struct pt_regs *pt; 584 unsigned long urbs_end; 585 syncfunc_t fn = arg; 586 587 if (unw_unwind_to_user(info) < 0) 588 return; 589 pt = task_pt_regs(info->task); 590 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); 591 592 fn(info->task, info->sw, pt->ar_bspstore, urbs_end); 593 } 594 595 /* 596 * when a thread is stopped (ptraced), debugger might change thread's user 597 * stack (change memory directly), and we must avoid the RSE stored in kernel 598 * to override user stack (user space's RSE is newer than kernel's in the 599 * case). To workaround the issue, we copy kernel RSE to user RSE before the 600 * task is stopped, so user RSE has updated data. we then copy user RSE to 601 * kernel after the task is resummed from traced stop and kernel will use the 602 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need 603 * synchronize user RSE to kernel. 604 */ 605 void ia64_ptrace_stop(void) 606 { 607 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 608 return; 609 set_notify_resume(current); 610 unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 611 } 612 613 /* 614 * This is called to read back the register backing store. 615 */ 616 void ia64_sync_krbs(void) 617 { 618 clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 619 620 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 621 } 622 623 /* 624 * After PTRACE_ATTACH, a thread's register backing store area in user 625 * space is assumed to contain correct data whenever the thread is 626 * stopped. arch_ptrace_stop takes care of this on tracing stops. 627 * But if the child was already stopped for job control when we attach 628 * to it, then it might not ever get into ptrace_stop by the time we 629 * want to examine the user memory containing the RBS. 630 */ 631 void 632 ptrace_attach_sync_user_rbs (struct task_struct *child) 633 { 634 int stopped = 0; 635 struct unw_frame_info info; 636 637 /* 638 * If the child is in TASK_STOPPED, we need to change that to 639 * TASK_TRACED momentarily while we operate on it. This ensures 640 * that the child won't be woken up and return to user mode while 641 * we are doing the sync. (It can only be woken up for SIGKILL.) 642 */ 643 644 read_lock(&tasklist_lock); 645 if (child->sighand) { 646 spin_lock_irq(&child->sighand->siglock); 647 if (child->state == TASK_STOPPED && 648 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { 649 set_notify_resume(child); 650 651 child->state = TASK_TRACED; 652 stopped = 1; 653 } 654 spin_unlock_irq(&child->sighand->siglock); 655 } 656 read_unlock(&tasklist_lock); 657 658 if (!stopped) 659 return; 660 661 unw_init_from_blocked_task(&info, child); 662 do_sync_rbs(&info, ia64_sync_user_rbs); 663 664 /* 665 * Now move the child back into TASK_STOPPED if it should be in a 666 * job control stop, so that SIGCONT can be used to wake it up. 667 */ 668 read_lock(&tasklist_lock); 669 if (child->sighand) { 670 spin_lock_irq(&child->sighand->siglock); 671 if (child->state == TASK_TRACED && 672 (child->signal->flags & SIGNAL_STOP_STOPPED)) { 673 child->state = TASK_STOPPED; 674 } 675 spin_unlock_irq(&child->sighand->siglock); 676 } 677 read_unlock(&tasklist_lock); 678 } 679 680 /* 681 * Write f32-f127 back to task->thread.fph if it has been modified. 682 */ 683 inline void 684 ia64_flush_fph (struct task_struct *task) 685 { 686 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 687 688 /* 689 * Prevent migrating this task while 690 * we're fiddling with the FPU state 691 */ 692 preempt_disable(); 693 if (ia64_is_local_fpu_owner(task) && psr->mfh) { 694 psr->mfh = 0; 695 task->thread.flags |= IA64_THREAD_FPH_VALID; 696 ia64_save_fpu(&task->thread.fph[0]); 697 } 698 preempt_enable(); 699 } 700 701 /* 702 * Sync the fph state of the task so that it can be manipulated 703 * through thread.fph. If necessary, f32-f127 are written back to 704 * thread.fph or, if the fph state hasn't been used before, thread.fph 705 * is cleared to zeroes. Also, access to f32-f127 is disabled to 706 * ensure that the task picks up the state from thread.fph when it 707 * executes again. 708 */ 709 void 710 ia64_sync_fph (struct task_struct *task) 711 { 712 struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 713 714 ia64_flush_fph(task); 715 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { 716 task->thread.flags |= IA64_THREAD_FPH_VALID; 717 memset(&task->thread.fph, 0, sizeof(task->thread.fph)); 718 } 719 ia64_drop_fpu(task); 720 psr->dfh = 1; 721 } 722 723 /* 724 * Change the machine-state of CHILD such that it will return via the normal 725 * kernel exit-path, rather than the syscall-exit path. 726 */ 727 static void 728 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, 729 unsigned long cfm) 730 { 731 struct unw_frame_info info, prev_info; 732 unsigned long ip, sp, pr; 733 734 unw_init_from_blocked_task(&info, child); 735 while (1) { 736 prev_info = info; 737 if (unw_unwind(&info) < 0) 738 return; 739 740 unw_get_sp(&info, &sp); 741 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) 742 < IA64_PT_REGS_SIZE) { 743 dprintk("ptrace.%s: ran off the top of the kernel " 744 "stack\n", __func__); 745 return; 746 } 747 if (unw_get_pr (&prev_info, &pr) < 0) { 748 unw_get_rp(&prev_info, &ip); 749 dprintk("ptrace.%s: failed to read " 750 "predicate register (ip=0x%lx)\n", 751 __func__, ip); 752 return; 753 } 754 if (unw_is_intr_frame(&info) 755 && (pr & (1UL << PRED_USER_STACK))) 756 break; 757 } 758 759 /* 760 * Note: at the time of this call, the target task is blocked 761 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL 762 * (aka, "pLvSys") we redirect execution from 763 * .work_pending_syscall_end to .work_processed_kernel. 764 */ 765 unw_get_pr(&prev_info, &pr); 766 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); 767 pr |= (1UL << PRED_NON_SYSCALL); 768 unw_set_pr(&prev_info, pr); 769 770 pt->cr_ifs = (1UL << 63) | cfm; 771 /* 772 * Clear the memory that is NOT written on syscall-entry to 773 * ensure we do not leak kernel-state to user when execution 774 * resumes. 775 */ 776 pt->r2 = 0; 777 pt->r3 = 0; 778 pt->r14 = 0; 779 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ 780 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ 781 pt->b7 = 0; 782 pt->ar_ccv = 0; 783 pt->ar_csd = 0; 784 pt->ar_ssd = 0; 785 } 786 787 static int 788 access_nat_bits (struct task_struct *child, struct pt_regs *pt, 789 struct unw_frame_info *info, 790 unsigned long *data, int write_access) 791 { 792 unsigned long regnum, nat_bits, scratch_unat, dummy = 0; 793 char nat = 0; 794 795 if (write_access) { 796 nat_bits = *data; 797 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); 798 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { 799 dprintk("ptrace: failed to set ar.unat\n"); 800 return -1; 801 } 802 for (regnum = 4; regnum <= 7; ++regnum) { 803 unw_get_gr(info, regnum, &dummy, &nat); 804 unw_set_gr(info, regnum, dummy, 805 (nat_bits >> regnum) & 1); 806 } 807 } else { 808 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { 809 dprintk("ptrace: failed to read ar.unat\n"); 810 return -1; 811 } 812 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); 813 for (regnum = 4; regnum <= 7; ++regnum) { 814 unw_get_gr(info, regnum, &dummy, &nat); 815 nat_bits |= (nat != 0) << regnum; 816 } 817 *data = nat_bits; 818 } 819 return 0; 820 } 821 822 static int 823 access_uarea (struct task_struct *child, unsigned long addr, 824 unsigned long *data, int write_access); 825 826 static long 827 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 828 { 829 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; 830 struct unw_frame_info info; 831 struct ia64_fpreg fpval; 832 struct switch_stack *sw; 833 struct pt_regs *pt; 834 long ret, retval = 0; 835 char nat = 0; 836 int i; 837 838 if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) 839 return -EIO; 840 841 pt = task_pt_regs(child); 842 sw = (struct switch_stack *) (child->thread.ksp + 16); 843 unw_init_from_blocked_task(&info, child); 844 if (unw_unwind_to_user(&info) < 0) { 845 return -EIO; 846 } 847 848 if (((unsigned long) ppr & 0x7) != 0) { 849 dprintk("ptrace:unaligned register address %p\n", ppr); 850 return -EIO; 851 } 852 853 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0 854 || access_uarea(child, PT_AR_EC, &ec, 0) < 0 855 || access_uarea(child, PT_AR_LC, &lc, 0) < 0 856 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0 857 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0 858 || access_uarea(child, PT_CFM, &cfm, 0) 859 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0)) 860 return -EIO; 861 862 /* control regs */ 863 864 retval |= __put_user(pt->cr_iip, &ppr->cr_iip); 865 retval |= __put_user(psr, &ppr->cr_ipsr); 866 867 /* app regs */ 868 869 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 870 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); 871 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 872 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 873 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 874 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 875 876 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); 877 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); 878 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); 879 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); 880 retval |= __put_user(cfm, &ppr->cfm); 881 882 /* gr1-gr3 */ 883 884 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); 885 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); 886 887 /* gr4-gr7 */ 888 889 for (i = 4; i < 8; i++) { 890 if (unw_access_gr(&info, i, &val, &nat, 0) < 0) 891 return -EIO; 892 retval |= __put_user(val, &ppr->gr[i]); 893 } 894 895 /* gr8-gr11 */ 896 897 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); 898 899 /* gr12-gr15 */ 900 901 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); 902 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); 903 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); 904 905 /* gr16-gr31 */ 906 907 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); 908 909 /* b0 */ 910 911 retval |= __put_user(pt->b0, &ppr->br[0]); 912 913 /* b1-b5 */ 914 915 for (i = 1; i < 6; i++) { 916 if (unw_access_br(&info, i, &val, 0) < 0) 917 return -EIO; 918 __put_user(val, &ppr->br[i]); 919 } 920 921 /* b6-b7 */ 922 923 retval |= __put_user(pt->b6, &ppr->br[6]); 924 retval |= __put_user(pt->b7, &ppr->br[7]); 925 926 /* fr2-fr5 */ 927 928 for (i = 2; i < 6; i++) { 929 if (unw_get_fr(&info, i, &fpval) < 0) 930 return -EIO; 931 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 932 } 933 934 /* fr6-fr11 */ 935 936 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, 937 sizeof(struct ia64_fpreg) * 6); 938 939 /* fp scratch regs(12-15) */ 940 941 retval |= __copy_to_user(&ppr->fr[12], &sw->f12, 942 sizeof(struct ia64_fpreg) * 4); 943 944 /* fr16-fr31 */ 945 946 for (i = 16; i < 32; i++) { 947 if (unw_get_fr(&info, i, &fpval) < 0) 948 return -EIO; 949 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); 950 } 951 952 /* fph */ 953 954 ia64_flush_fph(child); 955 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, 956 sizeof(ppr->fr[32]) * 96); 957 958 /* preds */ 959 960 retval |= __put_user(pt->pr, &ppr->pr); 961 962 /* nat bits */ 963 964 retval |= __put_user(nat_bits, &ppr->nat); 965 966 ret = retval ? -EIO : 0; 967 return ret; 968 } 969 970 static long 971 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) 972 { 973 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; 974 struct unw_frame_info info; 975 struct switch_stack *sw; 976 struct ia64_fpreg fpval; 977 struct pt_regs *pt; 978 long ret, retval = 0; 979 int i; 980 981 memset(&fpval, 0, sizeof(fpval)); 982 983 if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) 984 return -EIO; 985 986 pt = task_pt_regs(child); 987 sw = (struct switch_stack *) (child->thread.ksp + 16); 988 unw_init_from_blocked_task(&info, child); 989 if (unw_unwind_to_user(&info) < 0) { 990 return -EIO; 991 } 992 993 if (((unsigned long) ppr & 0x7) != 0) { 994 dprintk("ptrace:unaligned register address %p\n", ppr); 995 return -EIO; 996 } 997 998 /* control regs */ 999 1000 retval |= __get_user(pt->cr_iip, &ppr->cr_iip); 1001 retval |= __get_user(psr, &ppr->cr_ipsr); 1002 1003 /* app regs */ 1004 1005 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); 1006 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); 1007 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); 1008 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); 1009 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); 1010 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); 1011 1012 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); 1013 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); 1014 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); 1015 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); 1016 retval |= __get_user(cfm, &ppr->cfm); 1017 1018 /* gr1-gr3 */ 1019 1020 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); 1021 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); 1022 1023 /* gr4-gr7 */ 1024 1025 for (i = 4; i < 8; i++) { 1026 retval |= __get_user(val, &ppr->gr[i]); 1027 /* NaT bit will be set via PT_NAT_BITS: */ 1028 if (unw_set_gr(&info, i, val, 0) < 0) 1029 return -EIO; 1030 } 1031 1032 /* gr8-gr11 */ 1033 1034 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); 1035 1036 /* gr12-gr15 */ 1037 1038 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); 1039 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); 1040 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); 1041 1042 /* gr16-gr31 */ 1043 1044 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); 1045 1046 /* b0 */ 1047 1048 retval |= __get_user(pt->b0, &ppr->br[0]); 1049 1050 /* b1-b5 */ 1051 1052 for (i = 1; i < 6; i++) { 1053 retval |= __get_user(val, &ppr->br[i]); 1054 unw_set_br(&info, i, val); 1055 } 1056 1057 /* b6-b7 */ 1058 1059 retval |= __get_user(pt->b6, &ppr->br[6]); 1060 retval |= __get_user(pt->b7, &ppr->br[7]); 1061 1062 /* fr2-fr5 */ 1063 1064 for (i = 2; i < 6; i++) { 1065 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); 1066 if (unw_set_fr(&info, i, fpval) < 0) 1067 return -EIO; 1068 } 1069 1070 /* fr6-fr11 */ 1071 1072 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], 1073 sizeof(ppr->fr[6]) * 6); 1074 1075 /* fp scratch regs(12-15) */ 1076 1077 retval |= __copy_from_user(&sw->f12, &ppr->fr[12], 1078 sizeof(ppr->fr[12]) * 4); 1079 1080 /* fr16-fr31 */ 1081 1082 for (i = 16; i < 32; i++) { 1083 retval |= __copy_from_user(&fpval, &ppr->fr[i], 1084 sizeof(fpval)); 1085 if (unw_set_fr(&info, i, fpval) < 0) 1086 return -EIO; 1087 } 1088 1089 /* fph */ 1090 1091 ia64_sync_fph(child); 1092 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], 1093 sizeof(ppr->fr[32]) * 96); 1094 1095 /* preds */ 1096 1097 retval |= __get_user(pt->pr, &ppr->pr); 1098 1099 /* nat bits */ 1100 1101 retval |= __get_user(nat_bits, &ppr->nat); 1102 1103 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1); 1104 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1); 1105 retval |= access_uarea(child, PT_AR_EC, &ec, 1); 1106 retval |= access_uarea(child, PT_AR_LC, &lc, 1); 1107 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1); 1108 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1); 1109 retval |= access_uarea(child, PT_CFM, &cfm, 1); 1110 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1); 1111 1112 ret = retval ? -EIO : 0; 1113 return ret; 1114 } 1115 1116 void 1117 user_enable_single_step (struct task_struct *child) 1118 { 1119 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1120 1121 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1122 child_psr->ss = 1; 1123 } 1124 1125 void 1126 user_enable_block_step (struct task_struct *child) 1127 { 1128 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1129 1130 set_tsk_thread_flag(child, TIF_SINGLESTEP); 1131 child_psr->tb = 1; 1132 } 1133 1134 void 1135 user_disable_single_step (struct task_struct *child) 1136 { 1137 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1138 1139 /* make sure the single step/taken-branch trap bits are not set: */ 1140 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 1141 child_psr->ss = 0; 1142 child_psr->tb = 0; 1143 } 1144 1145 /* 1146 * Called by kernel/ptrace.c when detaching.. 1147 * 1148 * Make sure the single step bit is not set. 1149 */ 1150 void 1151 ptrace_disable (struct task_struct *child) 1152 { 1153 user_disable_single_step(child); 1154 } 1155 1156 long 1157 arch_ptrace (struct task_struct *child, long request, 1158 unsigned long addr, unsigned long data) 1159 { 1160 switch (request) { 1161 case PTRACE_PEEKTEXT: 1162 case PTRACE_PEEKDATA: 1163 /* read word at location addr */ 1164 if (ptrace_access_vm(child, addr, &data, sizeof(data), 1165 FOLL_FORCE) 1166 != sizeof(data)) 1167 return -EIO; 1168 /* ensure return value is not mistaken for error code */ 1169 force_successful_syscall_return(); 1170 return data; 1171 1172 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled 1173 * by the generic ptrace_request(). 1174 */ 1175 1176 case PTRACE_PEEKUSR: 1177 /* read the word at addr in the USER area */ 1178 if (access_uarea(child, addr, &data, 0) < 0) 1179 return -EIO; 1180 /* ensure return value is not mistaken for error code */ 1181 force_successful_syscall_return(); 1182 return data; 1183 1184 case PTRACE_POKEUSR: 1185 /* write the word at addr in the USER area */ 1186 if (access_uarea(child, addr, &data, 1) < 0) 1187 return -EIO; 1188 return 0; 1189 1190 case PTRACE_OLD_GETSIGINFO: 1191 /* for backwards-compatibility */ 1192 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); 1193 1194 case PTRACE_OLD_SETSIGINFO: 1195 /* for backwards-compatibility */ 1196 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); 1197 1198 case PTRACE_GETREGS: 1199 return ptrace_getregs(child, 1200 (struct pt_all_user_regs __user *) data); 1201 1202 case PTRACE_SETREGS: 1203 return ptrace_setregs(child, 1204 (struct pt_all_user_regs __user *) data); 1205 1206 default: 1207 return ptrace_request(child, request, addr, data); 1208 } 1209 } 1210 1211 1212 /* "asmlinkage" so the input arguments are preserved... */ 1213 1214 asmlinkage long 1215 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, 1216 long arg4, long arg5, long arg6, long arg7, 1217 struct pt_regs regs) 1218 { 1219 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1220 if (tracehook_report_syscall_entry(®s)) 1221 return -ENOSYS; 1222 1223 /* copy user rbs to kernel rbs */ 1224 if (test_thread_flag(TIF_RESTORE_RSE)) 1225 ia64_sync_krbs(); 1226 1227 1228 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3); 1229 1230 return 0; 1231 } 1232 1233 /* "asmlinkage" so the input arguments are preserved... */ 1234 1235 asmlinkage void 1236 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, 1237 long arg4, long arg5, long arg6, long arg7, 1238 struct pt_regs regs) 1239 { 1240 int step; 1241 1242 audit_syscall_exit(®s); 1243 1244 step = test_thread_flag(TIF_SINGLESTEP); 1245 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1246 tracehook_report_syscall_exit(®s, step); 1247 1248 /* copy user rbs to kernel rbs */ 1249 if (test_thread_flag(TIF_RESTORE_RSE)) 1250 ia64_sync_krbs(); 1251 } 1252 1253 /* Utrace implementation starts here */ 1254 struct regset_get { 1255 void *kbuf; 1256 void __user *ubuf; 1257 }; 1258 1259 struct regset_set { 1260 const void *kbuf; 1261 const void __user *ubuf; 1262 }; 1263 1264 struct regset_getset { 1265 struct task_struct *target; 1266 const struct user_regset *regset; 1267 union { 1268 struct regset_get get; 1269 struct regset_set set; 1270 } u; 1271 unsigned int pos; 1272 unsigned int count; 1273 int ret; 1274 }; 1275 1276 static const ptrdiff_t pt_offsets[32] = 1277 { 1278 #define R(n) offsetof(struct pt_regs, r##n) 1279 [0] = -1, R(1), R(2), R(3), 1280 [4] = -1, [5] = -1, [6] = -1, [7] = -1, 1281 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15), 1282 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23), 1283 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31), 1284 #undef R 1285 }; 1286 1287 static int 1288 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, 1289 unsigned long addr, unsigned long *data, int write_access) 1290 { 1291 struct pt_regs *pt = task_pt_regs(target); 1292 unsigned reg = addr / sizeof(unsigned long); 1293 ptrdiff_t d = pt_offsets[reg]; 1294 1295 if (d >= 0) { 1296 unsigned long *ptr = (void *)pt + d; 1297 if (write_access) 1298 *ptr = *data; 1299 else 1300 *data = *ptr; 1301 return 0; 1302 } else { 1303 char nat = 0; 1304 if (write_access) { 1305 /* read NaT bit first: */ 1306 unsigned long dummy; 1307 int ret = unw_get_gr(info, reg, &dummy, &nat); 1308 if (ret < 0) 1309 return ret; 1310 } 1311 return unw_access_gr(info, reg, data, &nat, write_access); 1312 } 1313 } 1314 1315 static int 1316 access_elf_breg(struct task_struct *target, struct unw_frame_info *info, 1317 unsigned long addr, unsigned long *data, int write_access) 1318 { 1319 struct pt_regs *pt; 1320 unsigned long *ptr = NULL; 1321 1322 pt = task_pt_regs(target); 1323 switch (addr) { 1324 case ELF_BR_OFFSET(0): 1325 ptr = &pt->b0; 1326 break; 1327 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): 1328 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, 1329 data, write_access); 1330 case ELF_BR_OFFSET(6): 1331 ptr = &pt->b6; 1332 break; 1333 case ELF_BR_OFFSET(7): 1334 ptr = &pt->b7; 1335 } 1336 if (write_access) 1337 *ptr = *data; 1338 else 1339 *data = *ptr; 1340 return 0; 1341 } 1342 1343 static int 1344 access_elf_areg(struct task_struct *target, struct unw_frame_info *info, 1345 unsigned long addr, unsigned long *data, int write_access) 1346 { 1347 struct pt_regs *pt; 1348 unsigned long cfm, urbs_end; 1349 unsigned long *ptr = NULL; 1350 1351 pt = task_pt_regs(target); 1352 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { 1353 switch (addr) { 1354 case ELF_AR_RSC_OFFSET: 1355 /* force PL3 */ 1356 if (write_access) 1357 pt->ar_rsc = *data | (3 << 2); 1358 else 1359 *data = pt->ar_rsc; 1360 return 0; 1361 case ELF_AR_BSP_OFFSET: 1362 /* 1363 * By convention, we use PT_AR_BSP to refer to 1364 * the end of the user-level backing store. 1365 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) 1366 * to get the real value of ar.bsp at the time 1367 * the kernel was entered. 1368 * 1369 * Furthermore, when changing the contents of 1370 * PT_AR_BSP (or PT_CFM) while the task is 1371 * blocked in a system call, convert the state 1372 * so that the non-system-call exit 1373 * path is used. This ensures that the proper 1374 * state will be picked up when resuming 1375 * execution. However, it *also* means that 1376 * once we write PT_AR_BSP/PT_CFM, it won't be 1377 * possible to modify the syscall arguments of 1378 * the pending system call any longer. This 1379 * shouldn't be an issue because modifying 1380 * PT_AR_BSP/PT_CFM generally implies that 1381 * we're either abandoning the pending system 1382 * call or that we defer it's re-execution 1383 * (e.g., due to GDB doing an inferior 1384 * function call). 1385 */ 1386 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1387 if (write_access) { 1388 if (*data != urbs_end) { 1389 if (in_syscall(pt)) 1390 convert_to_non_syscall(target, 1391 pt, 1392 cfm); 1393 /* 1394 * Simulate user-level write 1395 * of ar.bsp: 1396 */ 1397 pt->loadrs = 0; 1398 pt->ar_bspstore = *data; 1399 } 1400 } else 1401 *data = urbs_end; 1402 return 0; 1403 case ELF_AR_BSPSTORE_OFFSET: 1404 ptr = &pt->ar_bspstore; 1405 break; 1406 case ELF_AR_RNAT_OFFSET: 1407 ptr = &pt->ar_rnat; 1408 break; 1409 case ELF_AR_CCV_OFFSET: 1410 ptr = &pt->ar_ccv; 1411 break; 1412 case ELF_AR_UNAT_OFFSET: 1413 ptr = &pt->ar_unat; 1414 break; 1415 case ELF_AR_FPSR_OFFSET: 1416 ptr = &pt->ar_fpsr; 1417 break; 1418 case ELF_AR_PFS_OFFSET: 1419 ptr = &pt->ar_pfs; 1420 break; 1421 case ELF_AR_LC_OFFSET: 1422 return unw_access_ar(info, UNW_AR_LC, data, 1423 write_access); 1424 case ELF_AR_EC_OFFSET: 1425 return unw_access_ar(info, UNW_AR_EC, data, 1426 write_access); 1427 case ELF_AR_CSD_OFFSET: 1428 ptr = &pt->ar_csd; 1429 break; 1430 case ELF_AR_SSD_OFFSET: 1431 ptr = &pt->ar_ssd; 1432 } 1433 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { 1434 switch (addr) { 1435 case ELF_CR_IIP_OFFSET: 1436 ptr = &pt->cr_iip; 1437 break; 1438 case ELF_CFM_OFFSET: 1439 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); 1440 if (write_access) { 1441 if (((cfm ^ *data) & PFM_MASK) != 0) { 1442 if (in_syscall(pt)) 1443 convert_to_non_syscall(target, 1444 pt, 1445 cfm); 1446 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) 1447 | (*data & PFM_MASK)); 1448 } 1449 } else 1450 *data = cfm; 1451 return 0; 1452 case ELF_CR_IPSR_OFFSET: 1453 if (write_access) { 1454 unsigned long tmp = *data; 1455 /* psr.ri==3 is a reserved value: SDM 2:25 */ 1456 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) 1457 tmp &= ~IA64_PSR_RI; 1458 pt->cr_ipsr = ((tmp & IPSR_MASK) 1459 | (pt->cr_ipsr & ~IPSR_MASK)); 1460 } else 1461 *data = (pt->cr_ipsr & IPSR_MASK); 1462 return 0; 1463 } 1464 } else if (addr == ELF_NAT_OFFSET) 1465 return access_nat_bits(target, pt, info, 1466 data, write_access); 1467 else if (addr == ELF_PR_OFFSET) 1468 ptr = &pt->pr; 1469 else 1470 return -1; 1471 1472 if (write_access) 1473 *ptr = *data; 1474 else 1475 *data = *ptr; 1476 1477 return 0; 1478 } 1479 1480 static int 1481 access_elf_reg(struct task_struct *target, struct unw_frame_info *info, 1482 unsigned long addr, unsigned long *data, int write_access) 1483 { 1484 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31)) 1485 return access_elf_gpreg(target, info, addr, data, write_access); 1486 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) 1487 return access_elf_breg(target, info, addr, data, write_access); 1488 else 1489 return access_elf_areg(target, info, addr, data, write_access); 1490 } 1491 1492 struct regset_membuf { 1493 struct membuf to; 1494 int ret; 1495 }; 1496 1497 void do_gpregs_get(struct unw_frame_info *info, void *arg) 1498 { 1499 struct regset_membuf *dst = arg; 1500 struct membuf to = dst->to; 1501 unsigned int n; 1502 elf_greg_t reg; 1503 1504 if (unw_unwind_to_user(info) < 0) 1505 return; 1506 1507 /* 1508 * coredump format: 1509 * r0-r31 1510 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) 1511 * predicate registers (p0-p63) 1512 * b0-b7 1513 * ip cfm user-mask 1514 * ar.rsc ar.bsp ar.bspstore ar.rnat 1515 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec 1516 */ 1517 1518 1519 /* Skip r0 */ 1520 membuf_zero(&to, 8); 1521 for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) { 1522 if (access_elf_reg(info->task, info, n, ®, 0) < 0) { 1523 dst->ret = -EIO; 1524 return; 1525 } 1526 membuf_store(&to, reg); 1527 } 1528 } 1529 1530 void do_gpregs_set(struct unw_frame_info *info, void *arg) 1531 { 1532 struct regset_getset *dst = arg; 1533 1534 if (unw_unwind_to_user(info) < 0) 1535 return; 1536 1537 if (!dst->count) 1538 return; 1539 /* Skip r0 */ 1540 if (dst->pos < ELF_GR_OFFSET(1)) { 1541 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1542 &dst->u.set.kbuf, 1543 &dst->u.set.ubuf, 1544 0, ELF_GR_OFFSET(1)); 1545 if (dst->ret) 1546 return; 1547 } 1548 1549 while (dst->count && dst->pos < ELF_AR_END_OFFSET) { 1550 unsigned int n, from, to; 1551 elf_greg_t tmp[16]; 1552 1553 from = dst->pos; 1554 to = from + sizeof(tmp); 1555 if (to > ELF_AR_END_OFFSET) 1556 to = ELF_AR_END_OFFSET; 1557 /* get up to 16 values */ 1558 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1559 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1560 from, to); 1561 if (dst->ret) 1562 return; 1563 /* now copy them into registers */ 1564 for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++) 1565 if (access_elf_reg(dst->target, info, from, 1566 &tmp[n], 1) < 0) { 1567 dst->ret = -EIO; 1568 return; 1569 } 1570 } 1571 } 1572 1573 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) 1574 1575 void do_fpregs_get(struct unw_frame_info *info, void *arg) 1576 { 1577 struct task_struct *task = info->task; 1578 struct regset_membuf *dst = arg; 1579 struct membuf to = dst->to; 1580 elf_fpreg_t reg; 1581 unsigned int n; 1582 1583 if (unw_unwind_to_user(info) < 0) 1584 return; 1585 1586 /* Skip pos 0 and 1 */ 1587 membuf_zero(&to, 2 * sizeof(elf_fpreg_t)); 1588 1589 /* fr2-fr31 */ 1590 for (n = 2; to.left && n < 32; n++) { 1591 if (unw_get_fr(info, n, ®)) { 1592 dst->ret = -EIO; 1593 return; 1594 } 1595 membuf_write(&to, ®, sizeof(reg)); 1596 } 1597 1598 /* fph */ 1599 if (!to.left) 1600 return; 1601 1602 ia64_flush_fph(task); 1603 if (task->thread.flags & IA64_THREAD_FPH_VALID) 1604 membuf_write(&to, &task->thread.fph, 96 * sizeof(reg)); 1605 else 1606 membuf_zero(&to, 96 * sizeof(reg)); 1607 } 1608 1609 void do_fpregs_set(struct unw_frame_info *info, void *arg) 1610 { 1611 struct regset_getset *dst = arg; 1612 elf_fpreg_t fpreg, tmp[30]; 1613 int index, start, end; 1614 1615 if (unw_unwind_to_user(info) < 0) 1616 return; 1617 1618 /* Skip pos 0 and 1 */ 1619 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { 1620 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count, 1621 &dst->u.set.kbuf, 1622 &dst->u.set.ubuf, 1623 0, ELF_FP_OFFSET(2)); 1624 if (dst->count == 0 || dst->ret) 1625 return; 1626 } 1627 1628 /* fr2-fr31 */ 1629 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { 1630 start = dst->pos; 1631 end = min(((unsigned int)ELF_FP_OFFSET(32)), 1632 dst->pos + dst->count); 1633 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1634 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, 1635 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); 1636 if (dst->ret) 1637 return; 1638 1639 if (start & 0xF) { /* only write high part */ 1640 if (unw_get_fr(info, start / sizeof(elf_fpreg_t), 1641 &fpreg)) { 1642 dst->ret = -EIO; 1643 return; 1644 } 1645 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] 1646 = fpreg.u.bits[0]; 1647 start &= ~0xFUL; 1648 } 1649 if (end & 0xF) { /* only write low part */ 1650 if (unw_get_fr(info, end / sizeof(elf_fpreg_t), 1651 &fpreg)) { 1652 dst->ret = -EIO; 1653 return; 1654 } 1655 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] 1656 = fpreg.u.bits[1]; 1657 end = (end + 0xF) & ~0xFUL; 1658 } 1659 1660 for ( ; start < end ; start += sizeof(elf_fpreg_t)) { 1661 index = start / sizeof(elf_fpreg_t); 1662 if (unw_set_fr(info, index, tmp[index - 2])) { 1663 dst->ret = -EIO; 1664 return; 1665 } 1666 } 1667 if (dst->ret || dst->count == 0) 1668 return; 1669 } 1670 1671 /* fph */ 1672 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { 1673 ia64_sync_fph(dst->target); 1674 dst->ret = user_regset_copyin(&dst->pos, &dst->count, 1675 &dst->u.set.kbuf, 1676 &dst->u.set.ubuf, 1677 &dst->target->thread.fph, 1678 ELF_FP_OFFSET(32), -1); 1679 } 1680 } 1681 1682 static void 1683 unwind_and_call(void (*call)(struct unw_frame_info *, void *), 1684 struct task_struct *target, void *data) 1685 { 1686 if (target == current) 1687 unw_init_running(call, data); 1688 else { 1689 struct unw_frame_info info; 1690 memset(&info, 0, sizeof(info)); 1691 unw_init_from_blocked_task(&info, target); 1692 (*call)(&info, data); 1693 } 1694 } 1695 1696 static int 1697 do_regset_call(void (*call)(struct unw_frame_info *, void *), 1698 struct task_struct *target, 1699 const struct user_regset *regset, 1700 unsigned int pos, unsigned int count, 1701 const void *kbuf, const void __user *ubuf) 1702 { 1703 struct regset_getset info = { .target = target, .regset = regset, 1704 .pos = pos, .count = count, 1705 .u.set = { .kbuf = kbuf, .ubuf = ubuf }, 1706 .ret = 0 }; 1707 unwind_and_call(call, target, &info); 1708 return info.ret; 1709 } 1710 1711 static int 1712 gpregs_get(struct task_struct *target, 1713 const struct user_regset *regset, 1714 struct membuf to) 1715 { 1716 struct regset_membuf info = {.to = to}; 1717 unwind_and_call(do_gpregs_get, target, &info); 1718 return info.ret; 1719 } 1720 1721 static int gpregs_set(struct task_struct *target, 1722 const struct user_regset *regset, 1723 unsigned int pos, unsigned int count, 1724 const void *kbuf, const void __user *ubuf) 1725 { 1726 return do_regset_call(do_gpregs_set, target, regset, pos, count, 1727 kbuf, ubuf); 1728 } 1729 1730 static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) 1731 { 1732 do_sync_rbs(info, ia64_sync_user_rbs); 1733 } 1734 1735 /* 1736 * This is called to write back the register backing store. 1737 * ptrace does this before it stops, so that a tracer reading the user 1738 * memory after the thread stops will get the current register data. 1739 */ 1740 static int 1741 gpregs_writeback(struct task_struct *target, 1742 const struct user_regset *regset, 1743 int now) 1744 { 1745 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) 1746 return 0; 1747 set_notify_resume(target); 1748 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, 1749 NULL, NULL); 1750 } 1751 1752 static int 1753 fpregs_active(struct task_struct *target, const struct user_regset *regset) 1754 { 1755 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; 1756 } 1757 1758 static int fpregs_get(struct task_struct *target, 1759 const struct user_regset *regset, 1760 struct membuf to) 1761 { 1762 struct regset_membuf info = {.to = to}; 1763 unwind_and_call(do_fpregs_get, target, &info); 1764 return info.ret; 1765 } 1766 1767 static int fpregs_set(struct task_struct *target, 1768 const struct user_regset *regset, 1769 unsigned int pos, unsigned int count, 1770 const void *kbuf, const void __user *ubuf) 1771 { 1772 return do_regset_call(do_fpregs_set, target, regset, pos, count, 1773 kbuf, ubuf); 1774 } 1775 1776 static int 1777 access_uarea(struct task_struct *child, unsigned long addr, 1778 unsigned long *data, int write_access) 1779 { 1780 unsigned int pos = -1; /* an invalid value */ 1781 unsigned long *ptr, regnum; 1782 1783 if ((addr & 0x7) != 0) { 1784 dprintk("ptrace: unaligned register address 0x%lx\n", addr); 1785 return -1; 1786 } 1787 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || 1788 (addr >= PT_R7 + 8 && addr < PT_B1) || 1789 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || 1790 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { 1791 dprintk("ptrace: rejecting access to register " 1792 "address 0x%lx\n", addr); 1793 return -1; 1794 } 1795 1796 switch (addr) { 1797 case PT_F32 ... (PT_F127 + 15): 1798 pos = addr - PT_F32 + ELF_FP_OFFSET(32); 1799 break; 1800 case PT_F2 ... (PT_F5 + 15): 1801 pos = addr - PT_F2 + ELF_FP_OFFSET(2); 1802 break; 1803 case PT_F10 ... (PT_F31 + 15): 1804 pos = addr - PT_F10 + ELF_FP_OFFSET(10); 1805 break; 1806 case PT_F6 ... (PT_F9 + 15): 1807 pos = addr - PT_F6 + ELF_FP_OFFSET(6); 1808 break; 1809 } 1810 1811 if (pos != -1) { 1812 unsigned reg = pos / sizeof(elf_fpreg_t); 1813 int which_half = (pos / sizeof(unsigned long)) & 1; 1814 1815 if (reg < 32) { /* fr2-fr31 */ 1816 struct unw_frame_info info; 1817 elf_fpreg_t fpreg; 1818 1819 memset(&info, 0, sizeof(info)); 1820 unw_init_from_blocked_task(&info, child); 1821 if (unw_unwind_to_user(&info) < 0) 1822 return 0; 1823 1824 if (unw_get_fr(&info, reg, &fpreg)) 1825 return -1; 1826 if (write_access) { 1827 fpreg.u.bits[which_half] = *data; 1828 if (unw_set_fr(&info, reg, fpreg)) 1829 return -1; 1830 } else { 1831 *data = fpreg.u.bits[which_half]; 1832 } 1833 } else { /* fph */ 1834 elf_fpreg_t *p = &child->thread.fph[reg - 32]; 1835 unsigned long *bits = &p->u.bits[which_half]; 1836 1837 ia64_sync_fph(child); 1838 if (write_access) 1839 *bits = *data; 1840 else if (child->thread.flags & IA64_THREAD_FPH_VALID) 1841 *data = *bits; 1842 else 1843 *data = 0; 1844 } 1845 return 0; 1846 } 1847 1848 switch (addr) { 1849 case PT_NAT_BITS: 1850 pos = ELF_NAT_OFFSET; 1851 break; 1852 case PT_R4 ... PT_R7: 1853 pos = addr - PT_R4 + ELF_GR_OFFSET(4); 1854 break; 1855 case PT_B1 ... PT_B5: 1856 pos = addr - PT_B1 + ELF_BR_OFFSET(1); 1857 break; 1858 case PT_AR_EC: 1859 pos = ELF_AR_EC_OFFSET; 1860 break; 1861 case PT_AR_LC: 1862 pos = ELF_AR_LC_OFFSET; 1863 break; 1864 case PT_CR_IPSR: 1865 pos = ELF_CR_IPSR_OFFSET; 1866 break; 1867 case PT_CR_IIP: 1868 pos = ELF_CR_IIP_OFFSET; 1869 break; 1870 case PT_CFM: 1871 pos = ELF_CFM_OFFSET; 1872 break; 1873 case PT_AR_UNAT: 1874 pos = ELF_AR_UNAT_OFFSET; 1875 break; 1876 case PT_AR_PFS: 1877 pos = ELF_AR_PFS_OFFSET; 1878 break; 1879 case PT_AR_RSC: 1880 pos = ELF_AR_RSC_OFFSET; 1881 break; 1882 case PT_AR_RNAT: 1883 pos = ELF_AR_RNAT_OFFSET; 1884 break; 1885 case PT_AR_BSPSTORE: 1886 pos = ELF_AR_BSPSTORE_OFFSET; 1887 break; 1888 case PT_PR: 1889 pos = ELF_PR_OFFSET; 1890 break; 1891 case PT_B6: 1892 pos = ELF_BR_OFFSET(6); 1893 break; 1894 case PT_AR_BSP: 1895 pos = ELF_AR_BSP_OFFSET; 1896 break; 1897 case PT_R1 ... PT_R3: 1898 pos = addr - PT_R1 + ELF_GR_OFFSET(1); 1899 break; 1900 case PT_R12 ... PT_R15: 1901 pos = addr - PT_R12 + ELF_GR_OFFSET(12); 1902 break; 1903 case PT_R8 ... PT_R11: 1904 pos = addr - PT_R8 + ELF_GR_OFFSET(8); 1905 break; 1906 case PT_R16 ... PT_R31: 1907 pos = addr - PT_R16 + ELF_GR_OFFSET(16); 1908 break; 1909 case PT_AR_CCV: 1910 pos = ELF_AR_CCV_OFFSET; 1911 break; 1912 case PT_AR_FPSR: 1913 pos = ELF_AR_FPSR_OFFSET; 1914 break; 1915 case PT_B0: 1916 pos = ELF_BR_OFFSET(0); 1917 break; 1918 case PT_B7: 1919 pos = ELF_BR_OFFSET(7); 1920 break; 1921 case PT_AR_CSD: 1922 pos = ELF_AR_CSD_OFFSET; 1923 break; 1924 case PT_AR_SSD: 1925 pos = ELF_AR_SSD_OFFSET; 1926 break; 1927 } 1928 1929 if (pos != -1) { 1930 struct unw_frame_info info; 1931 1932 memset(&info, 0, sizeof(info)); 1933 unw_init_from_blocked_task(&info, child); 1934 if (unw_unwind_to_user(&info) < 0) 1935 return 0; 1936 1937 return access_elf_reg(child, &info, pos, data, write_access); 1938 } 1939 1940 /* access debug registers */ 1941 if (addr >= PT_IBR) { 1942 regnum = (addr - PT_IBR) >> 3; 1943 ptr = &child->thread.ibr[0]; 1944 } else { 1945 regnum = (addr - PT_DBR) >> 3; 1946 ptr = &child->thread.dbr[0]; 1947 } 1948 1949 if (regnum >= 8) { 1950 dprintk("ptrace: rejecting access to register " 1951 "address 0x%lx\n", addr); 1952 return -1; 1953 } 1954 #ifdef CONFIG_PERFMON 1955 /* 1956 * Check if debug registers are used by perfmon. This 1957 * test must be done once we know that we can do the 1958 * operation, i.e. the arguments are all valid, but 1959 * before we start modifying the state. 1960 * 1961 * Perfmon needs to keep a count of how many processes 1962 * are trying to modify the debug registers for system 1963 * wide monitoring sessions. 1964 * 1965 * We also include read access here, because they may 1966 * cause the PMU-installed debug register state 1967 * (dbr[], ibr[]) to be reset. The two arrays are also 1968 * used by perfmon, but we do not use 1969 * IA64_THREAD_DBG_VALID. The registers are restored 1970 * by the PMU context switch code. 1971 */ 1972 if (pfm_use_debug_registers(child)) 1973 return -1; 1974 #endif 1975 1976 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { 1977 child->thread.flags |= IA64_THREAD_DBG_VALID; 1978 memset(child->thread.dbr, 0, 1979 sizeof(child->thread.dbr)); 1980 memset(child->thread.ibr, 0, 1981 sizeof(child->thread.ibr)); 1982 } 1983 1984 ptr += regnum; 1985 1986 if ((regnum & 1) && write_access) { 1987 /* don't let the user set kernel-level breakpoints: */ 1988 *ptr = *data & ~(7UL << 56); 1989 return 0; 1990 } 1991 if (write_access) 1992 *ptr = *data; 1993 else 1994 *data = *ptr; 1995 return 0; 1996 } 1997 1998 static const struct user_regset native_regsets[] = { 1999 { 2000 .core_note_type = NT_PRSTATUS, 2001 .n = ELF_NGREG, 2002 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), 2003 .regset_get = gpregs_get, .set = gpregs_set, 2004 .writeback = gpregs_writeback 2005 }, 2006 { 2007 .core_note_type = NT_PRFPREG, 2008 .n = ELF_NFPREG, 2009 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), 2010 .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active 2011 }, 2012 }; 2013 2014 static const struct user_regset_view user_ia64_view = { 2015 .name = "ia64", 2016 .e_machine = EM_IA_64, 2017 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) 2018 }; 2019 2020 const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) 2021 { 2022 return &user_ia64_view; 2023 } 2024 2025 struct syscall_get_set_args { 2026 unsigned int i; 2027 unsigned int n; 2028 unsigned long *args; 2029 struct pt_regs *regs; 2030 int rw; 2031 }; 2032 2033 static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data) 2034 { 2035 struct syscall_get_set_args *args = data; 2036 struct pt_regs *pt = args->regs; 2037 unsigned long *krbs, cfm, ndirty; 2038 int i, count; 2039 2040 if (unw_unwind_to_user(info) < 0) 2041 return; 2042 2043 cfm = pt->cr_ifs; 2044 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; 2045 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); 2046 2047 count = 0; 2048 if (in_syscall(pt)) 2049 count = min_t(int, args->n, cfm & 0x7f); 2050 2051 for (i = 0; i < count; i++) { 2052 if (args->rw) 2053 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) = 2054 args->args[i]; 2055 else 2056 args->args[i] = *ia64_rse_skip_regs(krbs, 2057 ndirty + i + args->i); 2058 } 2059 2060 if (!args->rw) { 2061 while (i < args->n) { 2062 args->args[i] = 0; 2063 i++; 2064 } 2065 } 2066 } 2067 2068 void ia64_syscall_get_set_arguments(struct task_struct *task, 2069 struct pt_regs *regs, unsigned long *args, int rw) 2070 { 2071 struct syscall_get_set_args data = { 2072 .i = 0, 2073 .n = 6, 2074 .args = args, 2075 .regs = regs, 2076 .rw = rw, 2077 }; 2078 2079 if (task == current) 2080 unw_init_running(syscall_get_set_args_cb, &data); 2081 else { 2082 struct unw_frame_info ufi; 2083 memset(&ufi, 0, sizeof(ufi)); 2084 unw_init_from_blocked_task(&ufi, task); 2085 syscall_get_set_args_cb(&ufi, &data); 2086 } 2087 } 2088