1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/user.h> 18 #include <linux/elf.h> 19 #include <linux/security.h> 20 #include <linux/audit.h> 21 #include <linux/seccomp.h> 22 #include <linux/signal.h> 23 24 #include <asm/uaccess.h> 25 #include <asm/pgtable.h> 26 #include <asm/system.h> 27 #include <asm/processor.h> 28 #include <asm/i387.h> 29 #include <asm/debugreg.h> 30 #include <asm/ldt.h> 31 #include <asm/desc.h> 32 #include <asm/prctl.h> 33 #include <asm/proto.h> 34 #include <asm/ds.h> 35 36 #include "tls.h" 37 38 enum x86_regset { 39 REGSET_GENERAL, 40 REGSET_FP, 41 REGSET_XFP, 42 REGSET_TLS, 43 }; 44 45 /* 46 * does not yet catch signals sent when the child dies. 47 * in exit.c or in signal.c. 48 */ 49 50 /* 51 * Determines which flags the user has access to [1 = access, 0 = no access]. 52 */ 53 #define FLAG_MASK_32 ((unsigned long) \ 54 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 55 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 56 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 57 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 58 X86_EFLAGS_RF | X86_EFLAGS_AC)) 59 60 /* 61 * Determines whether a value may be installed in a segment register. 62 */ 63 static inline bool invalid_selector(u16 value) 64 { 65 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 66 } 67 68 #ifdef CONFIG_X86_32 69 70 #define FLAG_MASK FLAG_MASK_32 71 72 static long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 73 { 74 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 75 regno >>= 2; 76 if (regno > FS) 77 --regno; 78 return ®s->bx + regno; 79 } 80 81 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 82 { 83 /* 84 * Returning the value truncates it to 16 bits. 85 */ 86 unsigned int retval; 87 if (offset != offsetof(struct user_regs_struct, gs)) 88 retval = *pt_regs_access(task_pt_regs(task), offset); 89 else { 90 retval = task->thread.gs; 91 if (task == current) 92 savesegment(gs, retval); 93 } 94 return retval; 95 } 96 97 static int set_segment_reg(struct task_struct *task, 98 unsigned long offset, u16 value) 99 { 100 /* 101 * The value argument was already truncated to 16 bits. 102 */ 103 if (invalid_selector(value)) 104 return -EIO; 105 106 if (offset != offsetof(struct user_regs_struct, gs)) 107 *pt_regs_access(task_pt_regs(task), offset) = value; 108 else { 109 task->thread.gs = value; 110 if (task == current) 111 /* 112 * The user-mode %gs is not affected by 113 * kernel entry, so we must update the CPU. 114 */ 115 loadsegment(gs, value); 116 } 117 118 return 0; 119 } 120 121 static unsigned long debugreg_addr_limit(struct task_struct *task) 122 { 123 return TASK_SIZE - 3; 124 } 125 126 #else /* CONFIG_X86_64 */ 127 128 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 129 130 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 131 { 132 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 133 return ®s->r15 + (offset / sizeof(regs->r15)); 134 } 135 136 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 137 { 138 /* 139 * Returning the value truncates it to 16 bits. 140 */ 141 unsigned int seg; 142 143 switch (offset) { 144 case offsetof(struct user_regs_struct, fs): 145 if (task == current) { 146 /* Older gas can't assemble movq %?s,%r?? */ 147 asm("movl %%fs,%0" : "=r" (seg)); 148 return seg; 149 } 150 return task->thread.fsindex; 151 case offsetof(struct user_regs_struct, gs): 152 if (task == current) { 153 asm("movl %%gs,%0" : "=r" (seg)); 154 return seg; 155 } 156 return task->thread.gsindex; 157 case offsetof(struct user_regs_struct, ds): 158 if (task == current) { 159 asm("movl %%ds,%0" : "=r" (seg)); 160 return seg; 161 } 162 return task->thread.ds; 163 case offsetof(struct user_regs_struct, es): 164 if (task == current) { 165 asm("movl %%es,%0" : "=r" (seg)); 166 return seg; 167 } 168 return task->thread.es; 169 170 case offsetof(struct user_regs_struct, cs): 171 case offsetof(struct user_regs_struct, ss): 172 break; 173 } 174 return *pt_regs_access(task_pt_regs(task), offset); 175 } 176 177 static int set_segment_reg(struct task_struct *task, 178 unsigned long offset, u16 value) 179 { 180 /* 181 * The value argument was already truncated to 16 bits. 182 */ 183 if (invalid_selector(value)) 184 return -EIO; 185 186 switch (offset) { 187 case offsetof(struct user_regs_struct,fs): 188 /* 189 * If this is setting fs as for normal 64-bit use but 190 * setting fs_base has implicitly changed it, leave it. 191 */ 192 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 193 task->thread.fs != 0) || 194 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 195 task->thread.fs == 0)) 196 break; 197 task->thread.fsindex = value; 198 if (task == current) 199 loadsegment(fs, task->thread.fsindex); 200 break; 201 case offsetof(struct user_regs_struct,gs): 202 /* 203 * If this is setting gs as for normal 64-bit use but 204 * setting gs_base has implicitly changed it, leave it. 205 */ 206 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 207 task->thread.gs != 0) || 208 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 209 task->thread.gs == 0)) 210 break; 211 task->thread.gsindex = value; 212 if (task == current) 213 load_gs_index(task->thread.gsindex); 214 break; 215 case offsetof(struct user_regs_struct,ds): 216 task->thread.ds = value; 217 if (task == current) 218 loadsegment(ds, task->thread.ds); 219 break; 220 case offsetof(struct user_regs_struct,es): 221 task->thread.es = value; 222 if (task == current) 223 loadsegment(es, task->thread.es); 224 break; 225 226 /* 227 * Can't actually change these in 64-bit mode. 228 */ 229 case offsetof(struct user_regs_struct,cs): 230 #ifdef CONFIG_IA32_EMULATION 231 if (test_tsk_thread_flag(task, TIF_IA32)) 232 task_pt_regs(task)->cs = value; 233 #endif 234 break; 235 case offsetof(struct user_regs_struct,ss): 236 #ifdef CONFIG_IA32_EMULATION 237 if (test_tsk_thread_flag(task, TIF_IA32)) 238 task_pt_regs(task)->ss = value; 239 #endif 240 break; 241 } 242 243 return 0; 244 } 245 246 static unsigned long debugreg_addr_limit(struct task_struct *task) 247 { 248 #ifdef CONFIG_IA32_EMULATION 249 if (test_tsk_thread_flag(task, TIF_IA32)) 250 return IA32_PAGE_OFFSET - 3; 251 #endif 252 return TASK_SIZE64 - 7; 253 } 254 255 #endif /* CONFIG_X86_32 */ 256 257 static unsigned long get_flags(struct task_struct *task) 258 { 259 unsigned long retval = task_pt_regs(task)->flags; 260 261 /* 262 * If the debugger set TF, hide it from the readout. 263 */ 264 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 265 retval &= ~X86_EFLAGS_TF; 266 267 return retval; 268 } 269 270 static int set_flags(struct task_struct *task, unsigned long value) 271 { 272 struct pt_regs *regs = task_pt_regs(task); 273 274 /* 275 * If the user value contains TF, mark that 276 * it was not "us" (the debugger) that set it. 277 * If not, make sure it stays set if we had. 278 */ 279 if (value & X86_EFLAGS_TF) 280 clear_tsk_thread_flag(task, TIF_FORCED_TF); 281 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 282 value |= X86_EFLAGS_TF; 283 284 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 285 286 return 0; 287 } 288 289 static int putreg(struct task_struct *child, 290 unsigned long offset, unsigned long value) 291 { 292 switch (offset) { 293 case offsetof(struct user_regs_struct, cs): 294 case offsetof(struct user_regs_struct, ds): 295 case offsetof(struct user_regs_struct, es): 296 case offsetof(struct user_regs_struct, fs): 297 case offsetof(struct user_regs_struct, gs): 298 case offsetof(struct user_regs_struct, ss): 299 return set_segment_reg(child, offset, value); 300 301 case offsetof(struct user_regs_struct, flags): 302 return set_flags(child, value); 303 304 #ifdef CONFIG_X86_64 305 case offsetof(struct user_regs_struct,fs_base): 306 if (value >= TASK_SIZE_OF(child)) 307 return -EIO; 308 /* 309 * When changing the segment base, use do_arch_prctl 310 * to set either thread.fs or thread.fsindex and the 311 * corresponding GDT slot. 312 */ 313 if (child->thread.fs != value) 314 return do_arch_prctl(child, ARCH_SET_FS, value); 315 return 0; 316 case offsetof(struct user_regs_struct,gs_base): 317 /* 318 * Exactly the same here as the %fs handling above. 319 */ 320 if (value >= TASK_SIZE_OF(child)) 321 return -EIO; 322 if (child->thread.gs != value) 323 return do_arch_prctl(child, ARCH_SET_GS, value); 324 return 0; 325 #endif 326 } 327 328 *pt_regs_access(task_pt_regs(child), offset) = value; 329 return 0; 330 } 331 332 static unsigned long getreg(struct task_struct *task, unsigned long offset) 333 { 334 switch (offset) { 335 case offsetof(struct user_regs_struct, cs): 336 case offsetof(struct user_regs_struct, ds): 337 case offsetof(struct user_regs_struct, es): 338 case offsetof(struct user_regs_struct, fs): 339 case offsetof(struct user_regs_struct, gs): 340 case offsetof(struct user_regs_struct, ss): 341 return get_segment_reg(task, offset); 342 343 case offsetof(struct user_regs_struct, flags): 344 return get_flags(task); 345 346 #ifdef CONFIG_X86_64 347 case offsetof(struct user_regs_struct, fs_base): { 348 /* 349 * do_arch_prctl may have used a GDT slot instead of 350 * the MSR. To userland, it appears the same either 351 * way, except the %fs segment selector might not be 0. 352 */ 353 unsigned int seg = task->thread.fsindex; 354 if (task->thread.fs != 0) 355 return task->thread.fs; 356 if (task == current) 357 asm("movl %%fs,%0" : "=r" (seg)); 358 if (seg != FS_TLS_SEL) 359 return 0; 360 return get_desc_base(&task->thread.tls_array[FS_TLS]); 361 } 362 case offsetof(struct user_regs_struct, gs_base): { 363 /* 364 * Exactly the same here as the %fs handling above. 365 */ 366 unsigned int seg = task->thread.gsindex; 367 if (task->thread.gs != 0) 368 return task->thread.gs; 369 if (task == current) 370 asm("movl %%gs,%0" : "=r" (seg)); 371 if (seg != GS_TLS_SEL) 372 return 0; 373 return get_desc_base(&task->thread.tls_array[GS_TLS]); 374 } 375 #endif 376 } 377 378 return *pt_regs_access(task_pt_regs(task), offset); 379 } 380 381 static int genregs_get(struct task_struct *target, 382 const struct user_regset *regset, 383 unsigned int pos, unsigned int count, 384 void *kbuf, void __user *ubuf) 385 { 386 if (kbuf) { 387 unsigned long *k = kbuf; 388 while (count > 0) { 389 *k++ = getreg(target, pos); 390 count -= sizeof(*k); 391 pos += sizeof(*k); 392 } 393 } else { 394 unsigned long __user *u = ubuf; 395 while (count > 0) { 396 if (__put_user(getreg(target, pos), u++)) 397 return -EFAULT; 398 count -= sizeof(*u); 399 pos += sizeof(*u); 400 } 401 } 402 403 return 0; 404 } 405 406 static int genregs_set(struct task_struct *target, 407 const struct user_regset *regset, 408 unsigned int pos, unsigned int count, 409 const void *kbuf, const void __user *ubuf) 410 { 411 int ret = 0; 412 if (kbuf) { 413 const unsigned long *k = kbuf; 414 while (count > 0 && !ret) { 415 ret = putreg(target, pos, *k++); 416 count -= sizeof(*k); 417 pos += sizeof(*k); 418 } 419 } else { 420 const unsigned long __user *u = ubuf; 421 while (count > 0 && !ret) { 422 unsigned long word; 423 ret = __get_user(word, u++); 424 if (ret) 425 break; 426 ret = putreg(target, pos, word); 427 count -= sizeof(*u); 428 pos += sizeof(*u); 429 } 430 } 431 return ret; 432 } 433 434 /* 435 * This function is trivial and will be inlined by the compiler. 436 * Having it separates the implementation details of debug 437 * registers from the interface details of ptrace. 438 */ 439 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) 440 { 441 switch (n) { 442 case 0: return child->thread.debugreg0; 443 case 1: return child->thread.debugreg1; 444 case 2: return child->thread.debugreg2; 445 case 3: return child->thread.debugreg3; 446 case 6: return child->thread.debugreg6; 447 case 7: return child->thread.debugreg7; 448 } 449 return 0; 450 } 451 452 static int ptrace_set_debugreg(struct task_struct *child, 453 int n, unsigned long data) 454 { 455 int i; 456 457 if (unlikely(n == 4 || n == 5)) 458 return -EIO; 459 460 if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) 461 return -EIO; 462 463 switch (n) { 464 case 0: child->thread.debugreg0 = data; break; 465 case 1: child->thread.debugreg1 = data; break; 466 case 2: child->thread.debugreg2 = data; break; 467 case 3: child->thread.debugreg3 = data; break; 468 469 case 6: 470 if ((data & ~0xffffffffUL) != 0) 471 return -EIO; 472 child->thread.debugreg6 = data; 473 break; 474 475 case 7: 476 /* 477 * Sanity-check data. Take one half-byte at once with 478 * check = (val >> (16 + 4*i)) & 0xf. It contains the 479 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits 480 * 2 and 3 are LENi. Given a list of invalid values, 481 * we do mask |= 1 << invalid_value, so that 482 * (mask >> check) & 1 is a correct test for invalid 483 * values. 484 * 485 * R/Wi contains the type of the breakpoint / 486 * watchpoint, LENi contains the length of the watched 487 * data in the watchpoint case. 488 * 489 * The invalid values are: 490 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] 491 * - R/Wi == 0x10 (break on I/O reads or writes), so 492 * mask |= 0x4444. 493 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= 494 * 0x1110. 495 * 496 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. 497 * 498 * See the Intel Manual "System Programming Guide", 499 * 15.2.4 500 * 501 * Note that LENi == 0x10 is defined on x86_64 in long 502 * mode (i.e. even for 32-bit userspace software, but 503 * 64-bit kernel), so the x86_64 mask value is 0x5454. 504 * See the AMD manual no. 24593 (AMD64 System Programming) 505 */ 506 #ifdef CONFIG_X86_32 507 #define DR7_MASK 0x5f54 508 #else 509 #define DR7_MASK 0x5554 510 #endif 511 data &= ~DR_CONTROL_RESERVED; 512 for (i = 0; i < 4; i++) 513 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) 514 return -EIO; 515 child->thread.debugreg7 = data; 516 if (data) 517 set_tsk_thread_flag(child, TIF_DEBUG); 518 else 519 clear_tsk_thread_flag(child, TIF_DEBUG); 520 break; 521 } 522 523 return 0; 524 } 525 526 static int ptrace_bts_get_size(struct task_struct *child) 527 { 528 if (!child->thread.ds_area_msr) 529 return -ENXIO; 530 531 return ds_get_bts_index((void *)child->thread.ds_area_msr); 532 } 533 534 static int ptrace_bts_read_record(struct task_struct *child, 535 long index, 536 struct bts_struct __user *out) 537 { 538 struct bts_struct ret; 539 int retval; 540 int bts_end; 541 int bts_index; 542 543 if (!child->thread.ds_area_msr) 544 return -ENXIO; 545 546 if (index < 0) 547 return -EINVAL; 548 549 bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr); 550 if (bts_end <= index) 551 return -EINVAL; 552 553 /* translate the ptrace bts index into the ds bts index */ 554 bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr); 555 bts_index -= (index + 1); 556 if (bts_index < 0) 557 bts_index += bts_end; 558 559 retval = ds_read_bts((void *)child->thread.ds_area_msr, 560 bts_index, &ret); 561 if (retval < 0) 562 return retval; 563 564 if (copy_to_user(out, &ret, sizeof(ret))) 565 return -EFAULT; 566 567 return sizeof(ret); 568 } 569 570 static int ptrace_bts_write_record(struct task_struct *child, 571 const struct bts_struct *in) 572 { 573 int retval; 574 575 if (!child->thread.ds_area_msr) 576 return -ENXIO; 577 578 retval = ds_write_bts((void *)child->thread.ds_area_msr, in); 579 if (retval) 580 return retval; 581 582 return sizeof(*in); 583 } 584 585 static int ptrace_bts_clear(struct task_struct *child) 586 { 587 if (!child->thread.ds_area_msr) 588 return -ENXIO; 589 590 return ds_clear((void *)child->thread.ds_area_msr); 591 } 592 593 static int ptrace_bts_drain(struct task_struct *child, 594 long size, 595 struct bts_struct __user *out) 596 { 597 int end, i; 598 void *ds = (void *)child->thread.ds_area_msr; 599 600 if (!ds) 601 return -ENXIO; 602 603 end = ds_get_bts_index(ds); 604 if (end <= 0) 605 return end; 606 607 if (size < (end * sizeof(struct bts_struct))) 608 return -EIO; 609 610 for (i = 0; i < end; i++, out++) { 611 struct bts_struct ret; 612 int retval; 613 614 retval = ds_read_bts(ds, i, &ret); 615 if (retval < 0) 616 return retval; 617 618 if (copy_to_user(out, &ret, sizeof(ret))) 619 return -EFAULT; 620 } 621 622 ds_clear(ds); 623 624 return end; 625 } 626 627 static int ptrace_bts_realloc(struct task_struct *child, 628 int size, int reduce_size) 629 { 630 unsigned long rlim, vm; 631 int ret, old_size; 632 633 if (size < 0) 634 return -EINVAL; 635 636 old_size = ds_get_bts_size((void *)child->thread.ds_area_msr); 637 if (old_size < 0) 638 return old_size; 639 640 ret = ds_free((void **)&child->thread.ds_area_msr); 641 if (ret < 0) 642 goto out; 643 644 size >>= PAGE_SHIFT; 645 old_size >>= PAGE_SHIFT; 646 647 current->mm->total_vm -= old_size; 648 current->mm->locked_vm -= old_size; 649 650 if (size == 0) 651 goto out; 652 653 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 654 vm = current->mm->total_vm + size; 655 if (rlim < vm) { 656 ret = -ENOMEM; 657 658 if (!reduce_size) 659 goto out; 660 661 size = rlim - current->mm->total_vm; 662 if (size <= 0) 663 goto out; 664 } 665 666 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 667 vm = current->mm->locked_vm + size; 668 if (rlim < vm) { 669 ret = -ENOMEM; 670 671 if (!reduce_size) 672 goto out; 673 674 size = rlim - current->mm->locked_vm; 675 if (size <= 0) 676 goto out; 677 } 678 679 ret = ds_allocate((void **)&child->thread.ds_area_msr, 680 size << PAGE_SHIFT); 681 if (ret < 0) 682 goto out; 683 684 current->mm->total_vm += size; 685 current->mm->locked_vm += size; 686 687 out: 688 if (child->thread.ds_area_msr) 689 set_tsk_thread_flag(child, TIF_DS_AREA_MSR); 690 else 691 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR); 692 693 return ret; 694 } 695 696 static int ptrace_bts_config(struct task_struct *child, 697 long cfg_size, 698 const struct ptrace_bts_config __user *ucfg) 699 { 700 struct ptrace_bts_config cfg; 701 int bts_size, ret = 0; 702 void *ds; 703 704 if (cfg_size < sizeof(cfg)) 705 return -EIO; 706 707 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 708 return -EFAULT; 709 710 if ((int)cfg.size < 0) 711 return -EINVAL; 712 713 bts_size = 0; 714 ds = (void *)child->thread.ds_area_msr; 715 if (ds) { 716 bts_size = ds_get_bts_size(ds); 717 if (bts_size < 0) 718 return bts_size; 719 } 720 cfg.size = PAGE_ALIGN(cfg.size); 721 722 if (bts_size != cfg.size) { 723 ret = ptrace_bts_realloc(child, cfg.size, 724 cfg.flags & PTRACE_BTS_O_CUT_SIZE); 725 if (ret < 0) 726 goto errout; 727 728 ds = (void *)child->thread.ds_area_msr; 729 } 730 731 if (cfg.flags & PTRACE_BTS_O_SIGNAL) 732 ret = ds_set_overflow(ds, DS_O_SIGNAL); 733 else 734 ret = ds_set_overflow(ds, DS_O_WRAP); 735 if (ret < 0) 736 goto errout; 737 738 if (cfg.flags & PTRACE_BTS_O_TRACE) 739 child->thread.debugctlmsr |= ds_debugctl_mask(); 740 else 741 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 742 743 if (cfg.flags & PTRACE_BTS_O_SCHED) 744 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 745 else 746 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 747 748 ret = sizeof(cfg); 749 750 out: 751 if (child->thread.debugctlmsr) 752 set_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 753 else 754 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 755 756 return ret; 757 758 errout: 759 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 760 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 761 goto out; 762 } 763 764 static int ptrace_bts_status(struct task_struct *child, 765 long cfg_size, 766 struct ptrace_bts_config __user *ucfg) 767 { 768 void *ds = (void *)child->thread.ds_area_msr; 769 struct ptrace_bts_config cfg; 770 771 if (cfg_size < sizeof(cfg)) 772 return -EIO; 773 774 memset(&cfg, 0, sizeof(cfg)); 775 776 if (ds) { 777 cfg.size = ds_get_bts_size(ds); 778 779 if (ds_get_overflow(ds) == DS_O_SIGNAL) 780 cfg.flags |= PTRACE_BTS_O_SIGNAL; 781 782 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) && 783 child->thread.debugctlmsr & ds_debugctl_mask()) 784 cfg.flags |= PTRACE_BTS_O_TRACE; 785 786 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS)) 787 cfg.flags |= PTRACE_BTS_O_SCHED; 788 } 789 790 cfg.bts_size = sizeof(struct bts_struct); 791 792 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 793 return -EFAULT; 794 795 return sizeof(cfg); 796 } 797 798 void ptrace_bts_take_timestamp(struct task_struct *tsk, 799 enum bts_qualifier qualifier) 800 { 801 struct bts_struct rec = { 802 .qualifier = qualifier, 803 .variant.jiffies = jiffies_64 804 }; 805 806 ptrace_bts_write_record(tsk, &rec); 807 } 808 809 /* 810 * Called by kernel/ptrace.c when detaching.. 811 * 812 * Make sure the single step bit is not set. 813 */ 814 void ptrace_disable(struct task_struct *child) 815 { 816 user_disable_single_step(child); 817 #ifdef TIF_SYSCALL_EMU 818 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 819 #endif 820 if (child->thread.ds_area_msr) { 821 ptrace_bts_realloc(child, 0, 0); 822 child->thread.debugctlmsr &= ~ds_debugctl_mask(); 823 if (!child->thread.debugctlmsr) 824 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 825 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS); 826 } 827 } 828 829 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 830 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 831 #endif 832 833 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 834 { 835 int ret; 836 unsigned long __user *datap = (unsigned long __user *)data; 837 838 switch (request) { 839 /* read the word at location addr in the USER area. */ 840 case PTRACE_PEEKUSR: { 841 unsigned long tmp; 842 843 ret = -EIO; 844 if ((addr & (sizeof(data) - 1)) || addr < 0 || 845 addr >= sizeof(struct user)) 846 break; 847 848 tmp = 0; /* Default return condition */ 849 if (addr < sizeof(struct user_regs_struct)) 850 tmp = getreg(child, addr); 851 else if (addr >= offsetof(struct user, u_debugreg[0]) && 852 addr <= offsetof(struct user, u_debugreg[7])) { 853 addr -= offsetof(struct user, u_debugreg[0]); 854 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 855 } 856 ret = put_user(tmp, datap); 857 break; 858 } 859 860 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 861 ret = -EIO; 862 if ((addr & (sizeof(data) - 1)) || addr < 0 || 863 addr >= sizeof(struct user)) 864 break; 865 866 if (addr < sizeof(struct user_regs_struct)) 867 ret = putreg(child, addr, data); 868 else if (addr >= offsetof(struct user, u_debugreg[0]) && 869 addr <= offsetof(struct user, u_debugreg[7])) { 870 addr -= offsetof(struct user, u_debugreg[0]); 871 ret = ptrace_set_debugreg(child, 872 addr / sizeof(data), data); 873 } 874 break; 875 876 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 877 return copy_regset_to_user(child, 878 task_user_regset_view(current), 879 REGSET_GENERAL, 880 0, sizeof(struct user_regs_struct), 881 datap); 882 883 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 884 return copy_regset_from_user(child, 885 task_user_regset_view(current), 886 REGSET_GENERAL, 887 0, sizeof(struct user_regs_struct), 888 datap); 889 890 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 891 return copy_regset_to_user(child, 892 task_user_regset_view(current), 893 REGSET_FP, 894 0, sizeof(struct user_i387_struct), 895 datap); 896 897 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 898 return copy_regset_from_user(child, 899 task_user_regset_view(current), 900 REGSET_FP, 901 0, sizeof(struct user_i387_struct), 902 datap); 903 904 #ifdef CONFIG_X86_32 905 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 906 return copy_regset_to_user(child, &user_x86_32_view, 907 REGSET_XFP, 908 0, sizeof(struct user_fxsr_struct), 909 datap); 910 911 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 912 return copy_regset_from_user(child, &user_x86_32_view, 913 REGSET_XFP, 914 0, sizeof(struct user_fxsr_struct), 915 datap); 916 #endif 917 918 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 919 case PTRACE_GET_THREAD_AREA: 920 if (addr < 0) 921 return -EIO; 922 ret = do_get_thread_area(child, addr, 923 (struct user_desc __user *) data); 924 break; 925 926 case PTRACE_SET_THREAD_AREA: 927 if (addr < 0) 928 return -EIO; 929 ret = do_set_thread_area(child, addr, 930 (struct user_desc __user *) data, 0); 931 break; 932 #endif 933 934 #ifdef CONFIG_X86_64 935 /* normal 64bit interface to access TLS data. 936 Works just like arch_prctl, except that the arguments 937 are reversed. */ 938 case PTRACE_ARCH_PRCTL: 939 ret = do_arch_prctl(child, data, addr); 940 break; 941 #endif 942 943 case PTRACE_BTS_CONFIG: 944 ret = ptrace_bts_config 945 (child, data, (struct ptrace_bts_config __user *)addr); 946 break; 947 948 case PTRACE_BTS_STATUS: 949 ret = ptrace_bts_status 950 (child, data, (struct ptrace_bts_config __user *)addr); 951 break; 952 953 case PTRACE_BTS_SIZE: 954 ret = ptrace_bts_get_size(child); 955 break; 956 957 case PTRACE_BTS_GET: 958 ret = ptrace_bts_read_record 959 (child, data, (struct bts_struct __user *) addr); 960 break; 961 962 case PTRACE_BTS_CLEAR: 963 ret = ptrace_bts_clear(child); 964 break; 965 966 case PTRACE_BTS_DRAIN: 967 ret = ptrace_bts_drain 968 (child, data, (struct bts_struct __user *) addr); 969 break; 970 971 default: 972 ret = ptrace_request(child, request, addr, data); 973 break; 974 } 975 976 return ret; 977 } 978 979 #ifdef CONFIG_IA32_EMULATION 980 981 #include <linux/compat.h> 982 #include <linux/syscalls.h> 983 #include <asm/ia32.h> 984 #include <asm/user32.h> 985 986 #define R32(l,q) \ 987 case offsetof(struct user32, regs.l): \ 988 regs->q = value; break 989 990 #define SEG32(rs) \ 991 case offsetof(struct user32, regs.rs): \ 992 return set_segment_reg(child, \ 993 offsetof(struct user_regs_struct, rs), \ 994 value); \ 995 break 996 997 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 998 { 999 struct pt_regs *regs = task_pt_regs(child); 1000 1001 switch (regno) { 1002 1003 SEG32(cs); 1004 SEG32(ds); 1005 SEG32(es); 1006 SEG32(fs); 1007 SEG32(gs); 1008 SEG32(ss); 1009 1010 R32(ebx, bx); 1011 R32(ecx, cx); 1012 R32(edx, dx); 1013 R32(edi, di); 1014 R32(esi, si); 1015 R32(ebp, bp); 1016 R32(eax, ax); 1017 R32(orig_eax, orig_ax); 1018 R32(eip, ip); 1019 R32(esp, sp); 1020 1021 case offsetof(struct user32, regs.eflags): 1022 return set_flags(child, value); 1023 1024 case offsetof(struct user32, u_debugreg[0]) ... 1025 offsetof(struct user32, u_debugreg[7]): 1026 regno -= offsetof(struct user32, u_debugreg[0]); 1027 return ptrace_set_debugreg(child, regno / 4, value); 1028 1029 default: 1030 if (regno > sizeof(struct user32) || (regno & 3)) 1031 return -EIO; 1032 1033 /* 1034 * Other dummy fields in the virtual user structure 1035 * are ignored 1036 */ 1037 break; 1038 } 1039 return 0; 1040 } 1041 1042 #undef R32 1043 #undef SEG32 1044 1045 #define R32(l,q) \ 1046 case offsetof(struct user32, regs.l): \ 1047 *val = regs->q; break 1048 1049 #define SEG32(rs) \ 1050 case offsetof(struct user32, regs.rs): \ 1051 *val = get_segment_reg(child, \ 1052 offsetof(struct user_regs_struct, rs)); \ 1053 break 1054 1055 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1056 { 1057 struct pt_regs *regs = task_pt_regs(child); 1058 1059 switch (regno) { 1060 1061 SEG32(ds); 1062 SEG32(es); 1063 SEG32(fs); 1064 SEG32(gs); 1065 1066 R32(cs, cs); 1067 R32(ss, ss); 1068 R32(ebx, bx); 1069 R32(ecx, cx); 1070 R32(edx, dx); 1071 R32(edi, di); 1072 R32(esi, si); 1073 R32(ebp, bp); 1074 R32(eax, ax); 1075 R32(orig_eax, orig_ax); 1076 R32(eip, ip); 1077 R32(esp, sp); 1078 1079 case offsetof(struct user32, regs.eflags): 1080 *val = get_flags(child); 1081 break; 1082 1083 case offsetof(struct user32, u_debugreg[0]) ... 1084 offsetof(struct user32, u_debugreg[7]): 1085 regno -= offsetof(struct user32, u_debugreg[0]); 1086 *val = ptrace_get_debugreg(child, regno / 4); 1087 break; 1088 1089 default: 1090 if (regno > sizeof(struct user32) || (regno & 3)) 1091 return -EIO; 1092 1093 /* 1094 * Other dummy fields in the virtual user structure 1095 * are ignored 1096 */ 1097 *val = 0; 1098 break; 1099 } 1100 return 0; 1101 } 1102 1103 #undef R32 1104 #undef SEG32 1105 1106 static int genregs32_get(struct task_struct *target, 1107 const struct user_regset *regset, 1108 unsigned int pos, unsigned int count, 1109 void *kbuf, void __user *ubuf) 1110 { 1111 if (kbuf) { 1112 compat_ulong_t *k = kbuf; 1113 while (count > 0) { 1114 getreg32(target, pos, k++); 1115 count -= sizeof(*k); 1116 pos += sizeof(*k); 1117 } 1118 } else { 1119 compat_ulong_t __user *u = ubuf; 1120 while (count > 0) { 1121 compat_ulong_t word; 1122 getreg32(target, pos, &word); 1123 if (__put_user(word, u++)) 1124 return -EFAULT; 1125 count -= sizeof(*u); 1126 pos += sizeof(*u); 1127 } 1128 } 1129 1130 return 0; 1131 } 1132 1133 static int genregs32_set(struct task_struct *target, 1134 const struct user_regset *regset, 1135 unsigned int pos, unsigned int count, 1136 const void *kbuf, const void __user *ubuf) 1137 { 1138 int ret = 0; 1139 if (kbuf) { 1140 const compat_ulong_t *k = kbuf; 1141 while (count > 0 && !ret) { 1142 ret = putreg(target, pos, *k++); 1143 count -= sizeof(*k); 1144 pos += sizeof(*k); 1145 } 1146 } else { 1147 const compat_ulong_t __user *u = ubuf; 1148 while (count > 0 && !ret) { 1149 compat_ulong_t word; 1150 ret = __get_user(word, u++); 1151 if (ret) 1152 break; 1153 ret = putreg(target, pos, word); 1154 count -= sizeof(*u); 1155 pos += sizeof(*u); 1156 } 1157 } 1158 return ret; 1159 } 1160 1161 static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data) 1162 { 1163 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t)); 1164 compat_siginfo_t __user *si32 = compat_ptr(data); 1165 siginfo_t ssi; 1166 int ret; 1167 1168 if (request == PTRACE_SETSIGINFO) { 1169 memset(&ssi, 0, sizeof(siginfo_t)); 1170 ret = copy_siginfo_from_user32(&ssi, si32); 1171 if (ret) 1172 return ret; 1173 if (copy_to_user(si, &ssi, sizeof(siginfo_t))) 1174 return -EFAULT; 1175 } 1176 ret = sys_ptrace(request, pid, addr, (unsigned long)si); 1177 if (ret) 1178 return ret; 1179 if (request == PTRACE_GETSIGINFO) { 1180 if (copy_from_user(&ssi, si, sizeof(siginfo_t))) 1181 return -EFAULT; 1182 ret = copy_siginfo_to_user32(si32, &ssi); 1183 } 1184 return ret; 1185 } 1186 1187 asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data) 1188 { 1189 struct task_struct *child; 1190 struct pt_regs *childregs; 1191 void __user *datap = compat_ptr(data); 1192 int ret; 1193 __u32 val; 1194 1195 switch (request) { 1196 case PTRACE_TRACEME: 1197 case PTRACE_ATTACH: 1198 case PTRACE_KILL: 1199 case PTRACE_CONT: 1200 case PTRACE_SINGLESTEP: 1201 case PTRACE_SINGLEBLOCK: 1202 case PTRACE_DETACH: 1203 case PTRACE_SYSCALL: 1204 case PTRACE_OLDSETOPTIONS: 1205 case PTRACE_SETOPTIONS: 1206 case PTRACE_SET_THREAD_AREA: 1207 case PTRACE_GET_THREAD_AREA: 1208 case PTRACE_BTS_CONFIG: 1209 case PTRACE_BTS_STATUS: 1210 case PTRACE_BTS_SIZE: 1211 case PTRACE_BTS_GET: 1212 case PTRACE_BTS_CLEAR: 1213 case PTRACE_BTS_DRAIN: 1214 return sys_ptrace(request, pid, addr, data); 1215 1216 default: 1217 return -EINVAL; 1218 1219 case PTRACE_PEEKTEXT: 1220 case PTRACE_PEEKDATA: 1221 case PTRACE_POKEDATA: 1222 case PTRACE_POKETEXT: 1223 case PTRACE_POKEUSR: 1224 case PTRACE_PEEKUSR: 1225 case PTRACE_GETREGS: 1226 case PTRACE_SETREGS: 1227 case PTRACE_SETFPREGS: 1228 case PTRACE_GETFPREGS: 1229 case PTRACE_SETFPXREGS: 1230 case PTRACE_GETFPXREGS: 1231 case PTRACE_GETEVENTMSG: 1232 break; 1233 1234 case PTRACE_SETSIGINFO: 1235 case PTRACE_GETSIGINFO: 1236 return ptrace32_siginfo(request, pid, addr, data); 1237 } 1238 1239 child = ptrace_get_task_struct(pid); 1240 if (IS_ERR(child)) 1241 return PTR_ERR(child); 1242 1243 ret = ptrace_check_attach(child, request == PTRACE_KILL); 1244 if (ret < 0) 1245 goto out; 1246 1247 childregs = task_pt_regs(child); 1248 1249 switch (request) { 1250 case PTRACE_PEEKUSR: 1251 ret = getreg32(child, addr, &val); 1252 if (ret == 0) 1253 ret = put_user(val, (__u32 __user *)datap); 1254 break; 1255 1256 case PTRACE_POKEUSR: 1257 ret = putreg32(child, addr, data); 1258 break; 1259 1260 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1261 return copy_regset_to_user(child, &user_x86_32_view, 1262 REGSET_GENERAL, 1263 0, sizeof(struct user_regs_struct32), 1264 datap); 1265 1266 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1267 return copy_regset_from_user(child, &user_x86_32_view, 1268 REGSET_GENERAL, 0, 1269 sizeof(struct user_regs_struct32), 1270 datap); 1271 1272 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1273 return copy_regset_to_user(child, &user_x86_32_view, 1274 REGSET_FP, 0, 1275 sizeof(struct user_i387_ia32_struct), 1276 datap); 1277 1278 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1279 return copy_regset_from_user( 1280 child, &user_x86_32_view, REGSET_FP, 1281 0, sizeof(struct user_i387_ia32_struct), datap); 1282 1283 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1284 return copy_regset_to_user(child, &user_x86_32_view, 1285 REGSET_XFP, 0, 1286 sizeof(struct user32_fxsr_struct), 1287 datap); 1288 1289 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1290 return copy_regset_from_user(child, &user_x86_32_view, 1291 REGSET_XFP, 0, 1292 sizeof(struct user32_fxsr_struct), 1293 datap); 1294 1295 default: 1296 return compat_ptrace_request(child, request, addr, data); 1297 } 1298 1299 out: 1300 put_task_struct(child); 1301 return ret; 1302 } 1303 1304 #endif /* CONFIG_IA32_EMULATION */ 1305 1306 #ifdef CONFIG_X86_64 1307 1308 static const struct user_regset x86_64_regsets[] = { 1309 [REGSET_GENERAL] = { 1310 .core_note_type = NT_PRSTATUS, 1311 .n = sizeof(struct user_regs_struct) / sizeof(long), 1312 .size = sizeof(long), .align = sizeof(long), 1313 .get = genregs_get, .set = genregs_set 1314 }, 1315 [REGSET_FP] = { 1316 .core_note_type = NT_PRFPREG, 1317 .n = sizeof(struct user_i387_struct) / sizeof(long), 1318 .size = sizeof(long), .align = sizeof(long), 1319 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1320 }, 1321 }; 1322 1323 static const struct user_regset_view user_x86_64_view = { 1324 .name = "x86_64", .e_machine = EM_X86_64, 1325 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1326 }; 1327 1328 #else /* CONFIG_X86_32 */ 1329 1330 #define user_regs_struct32 user_regs_struct 1331 #define genregs32_get genregs_get 1332 #define genregs32_set genregs_set 1333 1334 #endif /* CONFIG_X86_64 */ 1335 1336 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1337 static const struct user_regset x86_32_regsets[] = { 1338 [REGSET_GENERAL] = { 1339 .core_note_type = NT_PRSTATUS, 1340 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1341 .size = sizeof(u32), .align = sizeof(u32), 1342 .get = genregs32_get, .set = genregs32_set 1343 }, 1344 [REGSET_FP] = { 1345 .core_note_type = NT_PRFPREG, 1346 .n = sizeof(struct user_i387_struct) / sizeof(u32), 1347 .size = sizeof(u32), .align = sizeof(u32), 1348 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1349 }, 1350 [REGSET_XFP] = { 1351 .core_note_type = NT_PRXFPREG, 1352 .n = sizeof(struct user_i387_struct) / sizeof(u32), 1353 .size = sizeof(u32), .align = sizeof(u32), 1354 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1355 }, 1356 [REGSET_TLS] = { 1357 .core_note_type = NT_386_TLS, 1358 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1359 .size = sizeof(struct user_desc), 1360 .align = sizeof(struct user_desc), 1361 .active = regset_tls_active, 1362 .get = regset_tls_get, .set = regset_tls_set 1363 }, 1364 }; 1365 1366 static const struct user_regset_view user_x86_32_view = { 1367 .name = "i386", .e_machine = EM_386, 1368 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1369 }; 1370 #endif 1371 1372 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1373 { 1374 #ifdef CONFIG_IA32_EMULATION 1375 if (test_tsk_thread_flag(task, TIF_IA32)) 1376 #endif 1377 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1378 return &user_x86_32_view; 1379 #endif 1380 #ifdef CONFIG_X86_64 1381 return &user_x86_64_view; 1382 #endif 1383 } 1384 1385 #ifdef CONFIG_X86_32 1386 1387 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) 1388 { 1389 struct siginfo info; 1390 1391 tsk->thread.trap_no = 1; 1392 tsk->thread.error_code = error_code; 1393 1394 memset(&info, 0, sizeof(info)); 1395 info.si_signo = SIGTRAP; 1396 info.si_code = TRAP_BRKPT; 1397 1398 /* User-mode ip? */ 1399 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1400 1401 /* Send us the fake SIGTRAP */ 1402 force_sig_info(SIGTRAP, &info, tsk); 1403 } 1404 1405 /* notification of system call entry/exit 1406 * - triggered by current->work.syscall_trace 1407 */ 1408 __attribute__((regparm(3))) 1409 int do_syscall_trace(struct pt_regs *regs, int entryexit) 1410 { 1411 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU); 1412 /* 1413 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall 1414 * interception 1415 */ 1416 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP); 1417 int ret = 0; 1418 1419 /* do the secure computing check first */ 1420 if (!entryexit) 1421 secure_computing(regs->orig_ax); 1422 1423 if (unlikely(current->audit_context)) { 1424 if (entryexit) 1425 audit_syscall_exit(AUDITSC_RESULT(regs->ax), 1426 regs->ax); 1427 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only 1428 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is 1429 * not used, entry.S will call us only on syscall exit, not 1430 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid 1431 * calling send_sigtrap() on syscall entry. 1432 * 1433 * Note that when PTRACE_SYSEMU_SINGLESTEP is used, 1434 * is_singlestep is false, despite his name, so we will still do 1435 * the correct thing. 1436 */ 1437 else if (is_singlestep) 1438 goto out; 1439 } 1440 1441 if (!(current->ptrace & PT_PTRACED)) 1442 goto out; 1443 1444 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE 1445 * and then is resumed with SYSEMU_SINGLESTEP, it will come in 1446 * here. We have to check this and return */ 1447 if (is_sysemu && entryexit) 1448 return 0; 1449 1450 /* Fake a debug trap */ 1451 if (is_singlestep) 1452 send_sigtrap(current, regs, 0); 1453 1454 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu) 1455 goto out; 1456 1457 /* the 0x80 provides a way for the tracing parent to distinguish 1458 between a syscall stop and SIGTRAP delivery */ 1459 /* Note that the debugger could change the result of test_thread_flag!*/ 1460 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0)); 1461 1462 /* 1463 * this isn't the same as continuing with a signal, but it will do 1464 * for normal use. strace only continues with a signal if the 1465 * stopping signal is not SIGTRAP. -brl 1466 */ 1467 if (current->exit_code) { 1468 send_sig(current->exit_code, current, 1); 1469 current->exit_code = 0; 1470 } 1471 ret = is_sysemu; 1472 out: 1473 if (unlikely(current->audit_context) && !entryexit) 1474 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax, 1475 regs->bx, regs->cx, regs->dx, regs->si); 1476 if (ret == 0) 1477 return 0; 1478 1479 regs->orig_ax = -1; /* force skip of syscall restarting */ 1480 if (unlikely(current->audit_context)) 1481 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1482 return 1; 1483 } 1484 1485 #else /* CONFIG_X86_64 */ 1486 1487 static void syscall_trace(struct pt_regs *regs) 1488 { 1489 1490 #if 0 1491 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n", 1492 current->comm, 1493 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0), 1494 current_thread_info()->flags, current->ptrace); 1495 #endif 1496 1497 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 1498 ? 0x80 : 0)); 1499 /* 1500 * this isn't the same as continuing with a signal, but it will do 1501 * for normal use. strace only continues with a signal if the 1502 * stopping signal is not SIGTRAP. -brl 1503 */ 1504 if (current->exit_code) { 1505 send_sig(current->exit_code, current, 1); 1506 current->exit_code = 0; 1507 } 1508 } 1509 1510 asmlinkage void syscall_trace_enter(struct pt_regs *regs) 1511 { 1512 /* do the secure computing check first */ 1513 secure_computing(regs->orig_ax); 1514 1515 if (test_thread_flag(TIF_SYSCALL_TRACE) 1516 && (current->ptrace & PT_PTRACED)) 1517 syscall_trace(regs); 1518 1519 if (unlikely(current->audit_context)) { 1520 if (test_thread_flag(TIF_IA32)) { 1521 audit_syscall_entry(AUDIT_ARCH_I386, 1522 regs->orig_ax, 1523 regs->bx, regs->cx, 1524 regs->dx, regs->si); 1525 } else { 1526 audit_syscall_entry(AUDIT_ARCH_X86_64, 1527 regs->orig_ax, 1528 regs->di, regs->si, 1529 regs->dx, regs->r10); 1530 } 1531 } 1532 } 1533 1534 asmlinkage void syscall_trace_leave(struct pt_regs *regs) 1535 { 1536 if (unlikely(current->audit_context)) 1537 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1538 1539 if ((test_thread_flag(TIF_SYSCALL_TRACE) 1540 || test_thread_flag(TIF_SINGLESTEP)) 1541 && (current->ptrace & PT_PTRACED)) 1542 syscall_trace(regs); 1543 } 1544 1545 #endif /* CONFIG_X86_32 */ 1546