1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/tracehook.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/signal.h> 24 #include <linux/workqueue.h> 25 26 #include <asm/uaccess.h> 27 #include <asm/pgtable.h> 28 #include <asm/system.h> 29 #include <asm/processor.h> 30 #include <asm/i387.h> 31 #include <asm/debugreg.h> 32 #include <asm/ldt.h> 33 #include <asm/desc.h> 34 #include <asm/prctl.h> 35 #include <asm/proto.h> 36 #include <asm/ds.h> 37 38 #include <trace/syscall.h> 39 40 #include "tls.h" 41 42 enum x86_regset { 43 REGSET_GENERAL, 44 REGSET_FP, 45 REGSET_XFP, 46 REGSET_IOPERM64 = REGSET_XFP, 47 REGSET_TLS, 48 REGSET_IOPERM32, 49 }; 50 51 /* 52 * does not yet catch signals sent when the child dies. 53 * in exit.c or in signal.c. 54 */ 55 56 /* 57 * Determines which flags the user has access to [1 = access, 0 = no access]. 58 */ 59 #define FLAG_MASK_32 ((unsigned long) \ 60 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 61 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 62 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 63 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 64 X86_EFLAGS_RF | X86_EFLAGS_AC)) 65 66 /* 67 * Determines whether a value may be installed in a segment register. 68 */ 69 static inline bool invalid_selector(u16 value) 70 { 71 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 72 } 73 74 #ifdef CONFIG_X86_32 75 76 #define FLAG_MASK FLAG_MASK_32 77 78 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 79 { 80 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 81 return ®s->bx + (regno >> 2); 82 } 83 84 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 85 { 86 /* 87 * Returning the value truncates it to 16 bits. 88 */ 89 unsigned int retval; 90 if (offset != offsetof(struct user_regs_struct, gs)) 91 retval = *pt_regs_access(task_pt_regs(task), offset); 92 else { 93 if (task == current) 94 retval = get_user_gs(task_pt_regs(task)); 95 else 96 retval = task_user_gs(task); 97 } 98 return retval; 99 } 100 101 static int set_segment_reg(struct task_struct *task, 102 unsigned long offset, u16 value) 103 { 104 /* 105 * The value argument was already truncated to 16 bits. 106 */ 107 if (invalid_selector(value)) 108 return -EIO; 109 110 /* 111 * For %cs and %ss we cannot permit a null selector. 112 * We can permit a bogus selector as long as it has USER_RPL. 113 * Null selectors are fine for other segment registers, but 114 * we will never get back to user mode with invalid %cs or %ss 115 * and will take the trap in iret instead. Much code relies 116 * on user_mode() to distinguish a user trap frame (which can 117 * safely use invalid selectors) from a kernel trap frame. 118 */ 119 switch (offset) { 120 case offsetof(struct user_regs_struct, cs): 121 case offsetof(struct user_regs_struct, ss): 122 if (unlikely(value == 0)) 123 return -EIO; 124 125 default: 126 *pt_regs_access(task_pt_regs(task), offset) = value; 127 break; 128 129 case offsetof(struct user_regs_struct, gs): 130 if (task == current) 131 set_user_gs(task_pt_regs(task), value); 132 else 133 task_user_gs(task) = value; 134 } 135 136 return 0; 137 } 138 139 static unsigned long debugreg_addr_limit(struct task_struct *task) 140 { 141 return TASK_SIZE - 3; 142 } 143 144 #else /* CONFIG_X86_64 */ 145 146 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 147 148 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 149 { 150 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 151 return ®s->r15 + (offset / sizeof(regs->r15)); 152 } 153 154 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 155 { 156 /* 157 * Returning the value truncates it to 16 bits. 158 */ 159 unsigned int seg; 160 161 switch (offset) { 162 case offsetof(struct user_regs_struct, fs): 163 if (task == current) { 164 /* Older gas can't assemble movq %?s,%r?? */ 165 asm("movl %%fs,%0" : "=r" (seg)); 166 return seg; 167 } 168 return task->thread.fsindex; 169 case offsetof(struct user_regs_struct, gs): 170 if (task == current) { 171 asm("movl %%gs,%0" : "=r" (seg)); 172 return seg; 173 } 174 return task->thread.gsindex; 175 case offsetof(struct user_regs_struct, ds): 176 if (task == current) { 177 asm("movl %%ds,%0" : "=r" (seg)); 178 return seg; 179 } 180 return task->thread.ds; 181 case offsetof(struct user_regs_struct, es): 182 if (task == current) { 183 asm("movl %%es,%0" : "=r" (seg)); 184 return seg; 185 } 186 return task->thread.es; 187 188 case offsetof(struct user_regs_struct, cs): 189 case offsetof(struct user_regs_struct, ss): 190 break; 191 } 192 return *pt_regs_access(task_pt_regs(task), offset); 193 } 194 195 static int set_segment_reg(struct task_struct *task, 196 unsigned long offset, u16 value) 197 { 198 /* 199 * The value argument was already truncated to 16 bits. 200 */ 201 if (invalid_selector(value)) 202 return -EIO; 203 204 switch (offset) { 205 case offsetof(struct user_regs_struct,fs): 206 /* 207 * If this is setting fs as for normal 64-bit use but 208 * setting fs_base has implicitly changed it, leave it. 209 */ 210 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 211 task->thread.fs != 0) || 212 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 213 task->thread.fs == 0)) 214 break; 215 task->thread.fsindex = value; 216 if (task == current) 217 loadsegment(fs, task->thread.fsindex); 218 break; 219 case offsetof(struct user_regs_struct,gs): 220 /* 221 * If this is setting gs as for normal 64-bit use but 222 * setting gs_base has implicitly changed it, leave it. 223 */ 224 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 225 task->thread.gs != 0) || 226 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 227 task->thread.gs == 0)) 228 break; 229 task->thread.gsindex = value; 230 if (task == current) 231 load_gs_index(task->thread.gsindex); 232 break; 233 case offsetof(struct user_regs_struct,ds): 234 task->thread.ds = value; 235 if (task == current) 236 loadsegment(ds, task->thread.ds); 237 break; 238 case offsetof(struct user_regs_struct,es): 239 task->thread.es = value; 240 if (task == current) 241 loadsegment(es, task->thread.es); 242 break; 243 244 /* 245 * Can't actually change these in 64-bit mode. 246 */ 247 case offsetof(struct user_regs_struct,cs): 248 if (unlikely(value == 0)) 249 return -EIO; 250 #ifdef CONFIG_IA32_EMULATION 251 if (test_tsk_thread_flag(task, TIF_IA32)) 252 task_pt_regs(task)->cs = value; 253 #endif 254 break; 255 case offsetof(struct user_regs_struct,ss): 256 if (unlikely(value == 0)) 257 return -EIO; 258 #ifdef CONFIG_IA32_EMULATION 259 if (test_tsk_thread_flag(task, TIF_IA32)) 260 task_pt_regs(task)->ss = value; 261 #endif 262 break; 263 } 264 265 return 0; 266 } 267 268 static unsigned long debugreg_addr_limit(struct task_struct *task) 269 { 270 #ifdef CONFIG_IA32_EMULATION 271 if (test_tsk_thread_flag(task, TIF_IA32)) 272 return IA32_PAGE_OFFSET - 3; 273 #endif 274 return TASK_SIZE_MAX - 7; 275 } 276 277 #endif /* CONFIG_X86_32 */ 278 279 static unsigned long get_flags(struct task_struct *task) 280 { 281 unsigned long retval = task_pt_regs(task)->flags; 282 283 /* 284 * If the debugger set TF, hide it from the readout. 285 */ 286 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 287 retval &= ~X86_EFLAGS_TF; 288 289 return retval; 290 } 291 292 static int set_flags(struct task_struct *task, unsigned long value) 293 { 294 struct pt_regs *regs = task_pt_regs(task); 295 296 /* 297 * If the user value contains TF, mark that 298 * it was not "us" (the debugger) that set it. 299 * If not, make sure it stays set if we had. 300 */ 301 if (value & X86_EFLAGS_TF) 302 clear_tsk_thread_flag(task, TIF_FORCED_TF); 303 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 304 value |= X86_EFLAGS_TF; 305 306 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 307 308 return 0; 309 } 310 311 static int putreg(struct task_struct *child, 312 unsigned long offset, unsigned long value) 313 { 314 switch (offset) { 315 case offsetof(struct user_regs_struct, cs): 316 case offsetof(struct user_regs_struct, ds): 317 case offsetof(struct user_regs_struct, es): 318 case offsetof(struct user_regs_struct, fs): 319 case offsetof(struct user_regs_struct, gs): 320 case offsetof(struct user_regs_struct, ss): 321 return set_segment_reg(child, offset, value); 322 323 case offsetof(struct user_regs_struct, flags): 324 return set_flags(child, value); 325 326 #ifdef CONFIG_X86_64 327 /* 328 * Orig_ax is really just a flag with small positive and 329 * negative values, so make sure to always sign-extend it 330 * from 32 bits so that it works correctly regardless of 331 * whether we come from a 32-bit environment or not. 332 */ 333 case offsetof(struct user_regs_struct, orig_ax): 334 value = (long) (s32) value; 335 break; 336 337 case offsetof(struct user_regs_struct,fs_base): 338 if (value >= TASK_SIZE_OF(child)) 339 return -EIO; 340 /* 341 * When changing the segment base, use do_arch_prctl 342 * to set either thread.fs or thread.fsindex and the 343 * corresponding GDT slot. 344 */ 345 if (child->thread.fs != value) 346 return do_arch_prctl(child, ARCH_SET_FS, value); 347 return 0; 348 case offsetof(struct user_regs_struct,gs_base): 349 /* 350 * Exactly the same here as the %fs handling above. 351 */ 352 if (value >= TASK_SIZE_OF(child)) 353 return -EIO; 354 if (child->thread.gs != value) 355 return do_arch_prctl(child, ARCH_SET_GS, value); 356 return 0; 357 #endif 358 } 359 360 *pt_regs_access(task_pt_regs(child), offset) = value; 361 return 0; 362 } 363 364 static unsigned long getreg(struct task_struct *task, unsigned long offset) 365 { 366 switch (offset) { 367 case offsetof(struct user_regs_struct, cs): 368 case offsetof(struct user_regs_struct, ds): 369 case offsetof(struct user_regs_struct, es): 370 case offsetof(struct user_regs_struct, fs): 371 case offsetof(struct user_regs_struct, gs): 372 case offsetof(struct user_regs_struct, ss): 373 return get_segment_reg(task, offset); 374 375 case offsetof(struct user_regs_struct, flags): 376 return get_flags(task); 377 378 #ifdef CONFIG_X86_64 379 case offsetof(struct user_regs_struct, fs_base): { 380 /* 381 * do_arch_prctl may have used a GDT slot instead of 382 * the MSR. To userland, it appears the same either 383 * way, except the %fs segment selector might not be 0. 384 */ 385 unsigned int seg = task->thread.fsindex; 386 if (task->thread.fs != 0) 387 return task->thread.fs; 388 if (task == current) 389 asm("movl %%fs,%0" : "=r" (seg)); 390 if (seg != FS_TLS_SEL) 391 return 0; 392 return get_desc_base(&task->thread.tls_array[FS_TLS]); 393 } 394 case offsetof(struct user_regs_struct, gs_base): { 395 /* 396 * Exactly the same here as the %fs handling above. 397 */ 398 unsigned int seg = task->thread.gsindex; 399 if (task->thread.gs != 0) 400 return task->thread.gs; 401 if (task == current) 402 asm("movl %%gs,%0" : "=r" (seg)); 403 if (seg != GS_TLS_SEL) 404 return 0; 405 return get_desc_base(&task->thread.tls_array[GS_TLS]); 406 } 407 #endif 408 } 409 410 return *pt_regs_access(task_pt_regs(task), offset); 411 } 412 413 static int genregs_get(struct task_struct *target, 414 const struct user_regset *regset, 415 unsigned int pos, unsigned int count, 416 void *kbuf, void __user *ubuf) 417 { 418 if (kbuf) { 419 unsigned long *k = kbuf; 420 while (count > 0) { 421 *k++ = getreg(target, pos); 422 count -= sizeof(*k); 423 pos += sizeof(*k); 424 } 425 } else { 426 unsigned long __user *u = ubuf; 427 while (count > 0) { 428 if (__put_user(getreg(target, pos), u++)) 429 return -EFAULT; 430 count -= sizeof(*u); 431 pos += sizeof(*u); 432 } 433 } 434 435 return 0; 436 } 437 438 static int genregs_set(struct task_struct *target, 439 const struct user_regset *regset, 440 unsigned int pos, unsigned int count, 441 const void *kbuf, const void __user *ubuf) 442 { 443 int ret = 0; 444 if (kbuf) { 445 const unsigned long *k = kbuf; 446 while (count > 0 && !ret) { 447 ret = putreg(target, pos, *k++); 448 count -= sizeof(*k); 449 pos += sizeof(*k); 450 } 451 } else { 452 const unsigned long __user *u = ubuf; 453 while (count > 0 && !ret) { 454 unsigned long word; 455 ret = __get_user(word, u++); 456 if (ret) 457 break; 458 ret = putreg(target, pos, word); 459 count -= sizeof(*u); 460 pos += sizeof(*u); 461 } 462 } 463 return ret; 464 } 465 466 /* 467 * This function is trivial and will be inlined by the compiler. 468 * Having it separates the implementation details of debug 469 * registers from the interface details of ptrace. 470 */ 471 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) 472 { 473 switch (n) { 474 case 0: return child->thread.debugreg0; 475 case 1: return child->thread.debugreg1; 476 case 2: return child->thread.debugreg2; 477 case 3: return child->thread.debugreg3; 478 case 6: return child->thread.debugreg6; 479 case 7: return child->thread.debugreg7; 480 } 481 return 0; 482 } 483 484 static int ptrace_set_debugreg(struct task_struct *child, 485 int n, unsigned long data) 486 { 487 int i; 488 489 if (unlikely(n == 4 || n == 5)) 490 return -EIO; 491 492 if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) 493 return -EIO; 494 495 switch (n) { 496 case 0: child->thread.debugreg0 = data; break; 497 case 1: child->thread.debugreg1 = data; break; 498 case 2: child->thread.debugreg2 = data; break; 499 case 3: child->thread.debugreg3 = data; break; 500 501 case 6: 502 if ((data & ~0xffffffffUL) != 0) 503 return -EIO; 504 child->thread.debugreg6 = data; 505 break; 506 507 case 7: 508 /* 509 * Sanity-check data. Take one half-byte at once with 510 * check = (val >> (16 + 4*i)) & 0xf. It contains the 511 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits 512 * 2 and 3 are LENi. Given a list of invalid values, 513 * we do mask |= 1 << invalid_value, so that 514 * (mask >> check) & 1 is a correct test for invalid 515 * values. 516 * 517 * R/Wi contains the type of the breakpoint / 518 * watchpoint, LENi contains the length of the watched 519 * data in the watchpoint case. 520 * 521 * The invalid values are: 522 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] 523 * - R/Wi == 0x10 (break on I/O reads or writes), so 524 * mask |= 0x4444. 525 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= 526 * 0x1110. 527 * 528 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. 529 * 530 * See the Intel Manual "System Programming Guide", 531 * 15.2.4 532 * 533 * Note that LENi == 0x10 is defined on x86_64 in long 534 * mode (i.e. even for 32-bit userspace software, but 535 * 64-bit kernel), so the x86_64 mask value is 0x5454. 536 * See the AMD manual no. 24593 (AMD64 System Programming) 537 */ 538 #ifdef CONFIG_X86_32 539 #define DR7_MASK 0x5f54 540 #else 541 #define DR7_MASK 0x5554 542 #endif 543 data &= ~DR_CONTROL_RESERVED; 544 for (i = 0; i < 4; i++) 545 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) 546 return -EIO; 547 child->thread.debugreg7 = data; 548 if (data) 549 set_tsk_thread_flag(child, TIF_DEBUG); 550 else 551 clear_tsk_thread_flag(child, TIF_DEBUG); 552 break; 553 } 554 555 return 0; 556 } 557 558 /* 559 * These access the current or another (stopped) task's io permission 560 * bitmap for debugging or core dump. 561 */ 562 static int ioperm_active(struct task_struct *target, 563 const struct user_regset *regset) 564 { 565 return target->thread.io_bitmap_max / regset->size; 566 } 567 568 static int ioperm_get(struct task_struct *target, 569 const struct user_regset *regset, 570 unsigned int pos, unsigned int count, 571 void *kbuf, void __user *ubuf) 572 { 573 if (!target->thread.io_bitmap_ptr) 574 return -ENXIO; 575 576 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 577 target->thread.io_bitmap_ptr, 578 0, IO_BITMAP_BYTES); 579 } 580 581 #ifdef CONFIG_X86_PTRACE_BTS 582 /* 583 * A branch trace store context. 584 * 585 * Contexts may only be installed by ptrace_bts_config() and only for 586 * ptraced tasks. 587 * 588 * Contexts are destroyed when the tracee is detached from the tracer. 589 * The actual destruction work requires interrupts enabled, so the 590 * work is deferred and will be scheduled during __ptrace_unlink(). 591 * 592 * Contexts hold an additional task_struct reference on the traced 593 * task, as well as a reference on the tracer's mm. 594 * 595 * Ptrace already holds a task_struct for the duration of ptrace operations, 596 * but since destruction is deferred, it may be executed after both 597 * tracer and tracee exited. 598 */ 599 struct bts_context { 600 /* The branch trace handle. */ 601 struct bts_tracer *tracer; 602 603 /* The buffer used to store the branch trace and its size. */ 604 void *buffer; 605 unsigned int size; 606 607 /* The mm that paid for the above buffer. */ 608 struct mm_struct *mm; 609 610 /* The task this context belongs to. */ 611 struct task_struct *task; 612 613 /* The signal to send on a bts buffer overflow. */ 614 unsigned int bts_ovfl_signal; 615 616 /* The work struct to destroy a context. */ 617 struct work_struct work; 618 }; 619 620 static int alloc_bts_buffer(struct bts_context *context, unsigned int size) 621 { 622 void *buffer = NULL; 623 int err = -ENOMEM; 624 625 err = account_locked_memory(current->mm, current->signal->rlim, size); 626 if (err < 0) 627 return err; 628 629 buffer = kzalloc(size, GFP_KERNEL); 630 if (!buffer) 631 goto out_refund; 632 633 context->buffer = buffer; 634 context->size = size; 635 context->mm = get_task_mm(current); 636 637 return 0; 638 639 out_refund: 640 refund_locked_memory(current->mm, size); 641 return err; 642 } 643 644 static inline void free_bts_buffer(struct bts_context *context) 645 { 646 if (!context->buffer) 647 return; 648 649 kfree(context->buffer); 650 context->buffer = NULL; 651 652 refund_locked_memory(context->mm, context->size); 653 context->size = 0; 654 655 mmput(context->mm); 656 context->mm = NULL; 657 } 658 659 static void free_bts_context_work(struct work_struct *w) 660 { 661 struct bts_context *context; 662 663 context = container_of(w, struct bts_context, work); 664 665 ds_release_bts(context->tracer); 666 put_task_struct(context->task); 667 free_bts_buffer(context); 668 kfree(context); 669 } 670 671 static inline void free_bts_context(struct bts_context *context) 672 { 673 INIT_WORK(&context->work, free_bts_context_work); 674 schedule_work(&context->work); 675 } 676 677 static inline struct bts_context *alloc_bts_context(struct task_struct *task) 678 { 679 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL); 680 if (context) { 681 context->task = task; 682 task->bts = context; 683 684 get_task_struct(task); 685 } 686 687 return context; 688 } 689 690 static int ptrace_bts_read_record(struct task_struct *child, size_t index, 691 struct bts_struct __user *out) 692 { 693 struct bts_context *context; 694 const struct bts_trace *trace; 695 struct bts_struct bts; 696 const unsigned char *at; 697 int error; 698 699 context = child->bts; 700 if (!context) 701 return -ESRCH; 702 703 trace = ds_read_bts(context->tracer); 704 if (!trace) 705 return -ESRCH; 706 707 at = trace->ds.top - ((index + 1) * trace->ds.size); 708 if ((void *)at < trace->ds.begin) 709 at += (trace->ds.n * trace->ds.size); 710 711 if (!trace->read) 712 return -EOPNOTSUPP; 713 714 error = trace->read(context->tracer, at, &bts); 715 if (error < 0) 716 return error; 717 718 if (copy_to_user(out, &bts, sizeof(bts))) 719 return -EFAULT; 720 721 return sizeof(bts); 722 } 723 724 static int ptrace_bts_drain(struct task_struct *child, 725 long size, 726 struct bts_struct __user *out) 727 { 728 struct bts_context *context; 729 const struct bts_trace *trace; 730 const unsigned char *at; 731 int error, drained = 0; 732 733 context = child->bts; 734 if (!context) 735 return -ESRCH; 736 737 trace = ds_read_bts(context->tracer); 738 if (!trace) 739 return -ESRCH; 740 741 if (!trace->read) 742 return -EOPNOTSUPP; 743 744 if (size < (trace->ds.top - trace->ds.begin)) 745 return -EIO; 746 747 for (at = trace->ds.begin; (void *)at < trace->ds.top; 748 out++, drained++, at += trace->ds.size) { 749 struct bts_struct bts; 750 751 error = trace->read(context->tracer, at, &bts); 752 if (error < 0) 753 return error; 754 755 if (copy_to_user(out, &bts, sizeof(bts))) 756 return -EFAULT; 757 } 758 759 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 760 761 error = ds_reset_bts(context->tracer); 762 if (error < 0) 763 return error; 764 765 return drained; 766 } 767 768 static int ptrace_bts_config(struct task_struct *child, 769 long cfg_size, 770 const struct ptrace_bts_config __user *ucfg) 771 { 772 struct bts_context *context; 773 struct ptrace_bts_config cfg; 774 unsigned int flags = 0; 775 776 if (cfg_size < sizeof(cfg)) 777 return -EIO; 778 779 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 780 return -EFAULT; 781 782 context = child->bts; 783 if (!context) 784 context = alloc_bts_context(child); 785 if (!context) 786 return -ENOMEM; 787 788 if (cfg.flags & PTRACE_BTS_O_SIGNAL) { 789 if (!cfg.signal) 790 return -EINVAL; 791 792 return -EOPNOTSUPP; 793 context->bts_ovfl_signal = cfg.signal; 794 } 795 796 ds_release_bts(context->tracer); 797 context->tracer = NULL; 798 799 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) { 800 int err; 801 802 free_bts_buffer(context); 803 if (!cfg.size) 804 return 0; 805 806 err = alloc_bts_buffer(context, cfg.size); 807 if (err < 0) 808 return err; 809 } 810 811 if (cfg.flags & PTRACE_BTS_O_TRACE) 812 flags |= BTS_USER; 813 814 if (cfg.flags & PTRACE_BTS_O_SCHED) 815 flags |= BTS_TIMESTAMPS; 816 817 context->tracer = 818 ds_request_bts_task(child, context->buffer, context->size, 819 NULL, (size_t)-1, flags); 820 if (unlikely(IS_ERR(context->tracer))) { 821 int error = PTR_ERR(context->tracer); 822 823 free_bts_buffer(context); 824 context->tracer = NULL; 825 return error; 826 } 827 828 return sizeof(cfg); 829 } 830 831 static int ptrace_bts_status(struct task_struct *child, 832 long cfg_size, 833 struct ptrace_bts_config __user *ucfg) 834 { 835 struct bts_context *context; 836 const struct bts_trace *trace; 837 struct ptrace_bts_config cfg; 838 839 context = child->bts; 840 if (!context) 841 return -ESRCH; 842 843 if (cfg_size < sizeof(cfg)) 844 return -EIO; 845 846 trace = ds_read_bts(context->tracer); 847 if (!trace) 848 return -ESRCH; 849 850 memset(&cfg, 0, sizeof(cfg)); 851 cfg.size = trace->ds.end - trace->ds.begin; 852 cfg.signal = context->bts_ovfl_signal; 853 cfg.bts_size = sizeof(struct bts_struct); 854 855 if (cfg.signal) 856 cfg.flags |= PTRACE_BTS_O_SIGNAL; 857 858 if (trace->ds.flags & BTS_USER) 859 cfg.flags |= PTRACE_BTS_O_TRACE; 860 861 if (trace->ds.flags & BTS_TIMESTAMPS) 862 cfg.flags |= PTRACE_BTS_O_SCHED; 863 864 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 865 return -EFAULT; 866 867 return sizeof(cfg); 868 } 869 870 static int ptrace_bts_clear(struct task_struct *child) 871 { 872 struct bts_context *context; 873 const struct bts_trace *trace; 874 875 context = child->bts; 876 if (!context) 877 return -ESRCH; 878 879 trace = ds_read_bts(context->tracer); 880 if (!trace) 881 return -ESRCH; 882 883 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 884 885 return ds_reset_bts(context->tracer); 886 } 887 888 static int ptrace_bts_size(struct task_struct *child) 889 { 890 struct bts_context *context; 891 const struct bts_trace *trace; 892 893 context = child->bts; 894 if (!context) 895 return -ESRCH; 896 897 trace = ds_read_bts(context->tracer); 898 if (!trace) 899 return -ESRCH; 900 901 return (trace->ds.top - trace->ds.begin) / trace->ds.size; 902 } 903 904 /* 905 * Called from __ptrace_unlink() after the child has been moved back 906 * to its original parent. 907 */ 908 void ptrace_bts_untrace(struct task_struct *child) 909 { 910 if (unlikely(child->bts)) { 911 free_bts_context(child->bts); 912 child->bts = NULL; 913 } 914 } 915 #endif /* CONFIG_X86_PTRACE_BTS */ 916 917 /* 918 * Called by kernel/ptrace.c when detaching.. 919 * 920 * Make sure the single step bit is not set. 921 */ 922 void ptrace_disable(struct task_struct *child) 923 { 924 user_disable_single_step(child); 925 #ifdef TIF_SYSCALL_EMU 926 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 927 #endif 928 } 929 930 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 931 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 932 #endif 933 934 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 935 { 936 int ret; 937 unsigned long __user *datap = (unsigned long __user *)data; 938 939 switch (request) { 940 /* read the word at location addr in the USER area. */ 941 case PTRACE_PEEKUSR: { 942 unsigned long tmp; 943 944 ret = -EIO; 945 if ((addr & (sizeof(data) - 1)) || addr < 0 || 946 addr >= sizeof(struct user)) 947 break; 948 949 tmp = 0; /* Default return condition */ 950 if (addr < sizeof(struct user_regs_struct)) 951 tmp = getreg(child, addr); 952 else if (addr >= offsetof(struct user, u_debugreg[0]) && 953 addr <= offsetof(struct user, u_debugreg[7])) { 954 addr -= offsetof(struct user, u_debugreg[0]); 955 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 956 } 957 ret = put_user(tmp, datap); 958 break; 959 } 960 961 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 962 ret = -EIO; 963 if ((addr & (sizeof(data) - 1)) || addr < 0 || 964 addr >= sizeof(struct user)) 965 break; 966 967 if (addr < sizeof(struct user_regs_struct)) 968 ret = putreg(child, addr, data); 969 else if (addr >= offsetof(struct user, u_debugreg[0]) && 970 addr <= offsetof(struct user, u_debugreg[7])) { 971 addr -= offsetof(struct user, u_debugreg[0]); 972 ret = ptrace_set_debugreg(child, 973 addr / sizeof(data), data); 974 } 975 break; 976 977 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 978 return copy_regset_to_user(child, 979 task_user_regset_view(current), 980 REGSET_GENERAL, 981 0, sizeof(struct user_regs_struct), 982 datap); 983 984 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 985 return copy_regset_from_user(child, 986 task_user_regset_view(current), 987 REGSET_GENERAL, 988 0, sizeof(struct user_regs_struct), 989 datap); 990 991 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 992 return copy_regset_to_user(child, 993 task_user_regset_view(current), 994 REGSET_FP, 995 0, sizeof(struct user_i387_struct), 996 datap); 997 998 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 999 return copy_regset_from_user(child, 1000 task_user_regset_view(current), 1001 REGSET_FP, 1002 0, sizeof(struct user_i387_struct), 1003 datap); 1004 1005 #ifdef CONFIG_X86_32 1006 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1007 return copy_regset_to_user(child, &user_x86_32_view, 1008 REGSET_XFP, 1009 0, sizeof(struct user_fxsr_struct), 1010 datap) ? -EIO : 0; 1011 1012 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1013 return copy_regset_from_user(child, &user_x86_32_view, 1014 REGSET_XFP, 1015 0, sizeof(struct user_fxsr_struct), 1016 datap) ? -EIO : 0; 1017 #endif 1018 1019 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1020 case PTRACE_GET_THREAD_AREA: 1021 if (addr < 0) 1022 return -EIO; 1023 ret = do_get_thread_area(child, addr, 1024 (struct user_desc __user *) data); 1025 break; 1026 1027 case PTRACE_SET_THREAD_AREA: 1028 if (addr < 0) 1029 return -EIO; 1030 ret = do_set_thread_area(child, addr, 1031 (struct user_desc __user *) data, 0); 1032 break; 1033 #endif 1034 1035 #ifdef CONFIG_X86_64 1036 /* normal 64bit interface to access TLS data. 1037 Works just like arch_prctl, except that the arguments 1038 are reversed. */ 1039 case PTRACE_ARCH_PRCTL: 1040 ret = do_arch_prctl(child, data, addr); 1041 break; 1042 #endif 1043 1044 /* 1045 * These bits need more cooking - not enabled yet: 1046 */ 1047 #ifdef CONFIG_X86_PTRACE_BTS 1048 case PTRACE_BTS_CONFIG: 1049 ret = ptrace_bts_config 1050 (child, data, (struct ptrace_bts_config __user *)addr); 1051 break; 1052 1053 case PTRACE_BTS_STATUS: 1054 ret = ptrace_bts_status 1055 (child, data, (struct ptrace_bts_config __user *)addr); 1056 break; 1057 1058 case PTRACE_BTS_SIZE: 1059 ret = ptrace_bts_size(child); 1060 break; 1061 1062 case PTRACE_BTS_GET: 1063 ret = ptrace_bts_read_record 1064 (child, data, (struct bts_struct __user *) addr); 1065 break; 1066 1067 case PTRACE_BTS_CLEAR: 1068 ret = ptrace_bts_clear(child); 1069 break; 1070 1071 case PTRACE_BTS_DRAIN: 1072 ret = ptrace_bts_drain 1073 (child, data, (struct bts_struct __user *) addr); 1074 break; 1075 #endif /* CONFIG_X86_PTRACE_BTS */ 1076 1077 default: 1078 ret = ptrace_request(child, request, addr, data); 1079 break; 1080 } 1081 1082 return ret; 1083 } 1084 1085 #ifdef CONFIG_IA32_EMULATION 1086 1087 #include <linux/compat.h> 1088 #include <linux/syscalls.h> 1089 #include <asm/ia32.h> 1090 #include <asm/user32.h> 1091 1092 #define R32(l,q) \ 1093 case offsetof(struct user32, regs.l): \ 1094 regs->q = value; break 1095 1096 #define SEG32(rs) \ 1097 case offsetof(struct user32, regs.rs): \ 1098 return set_segment_reg(child, \ 1099 offsetof(struct user_regs_struct, rs), \ 1100 value); \ 1101 break 1102 1103 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 1104 { 1105 struct pt_regs *regs = task_pt_regs(child); 1106 1107 switch (regno) { 1108 1109 SEG32(cs); 1110 SEG32(ds); 1111 SEG32(es); 1112 SEG32(fs); 1113 SEG32(gs); 1114 SEG32(ss); 1115 1116 R32(ebx, bx); 1117 R32(ecx, cx); 1118 R32(edx, dx); 1119 R32(edi, di); 1120 R32(esi, si); 1121 R32(ebp, bp); 1122 R32(eax, ax); 1123 R32(eip, ip); 1124 R32(esp, sp); 1125 1126 case offsetof(struct user32, regs.orig_eax): 1127 /* 1128 * Sign-extend the value so that orig_eax = -1 1129 * causes (long)orig_ax < 0 tests to fire correctly. 1130 */ 1131 regs->orig_ax = (long) (s32) value; 1132 break; 1133 1134 case offsetof(struct user32, regs.eflags): 1135 return set_flags(child, value); 1136 1137 case offsetof(struct user32, u_debugreg[0]) ... 1138 offsetof(struct user32, u_debugreg[7]): 1139 regno -= offsetof(struct user32, u_debugreg[0]); 1140 return ptrace_set_debugreg(child, regno / 4, value); 1141 1142 default: 1143 if (regno > sizeof(struct user32) || (regno & 3)) 1144 return -EIO; 1145 1146 /* 1147 * Other dummy fields in the virtual user structure 1148 * are ignored 1149 */ 1150 break; 1151 } 1152 return 0; 1153 } 1154 1155 #undef R32 1156 #undef SEG32 1157 1158 #define R32(l,q) \ 1159 case offsetof(struct user32, regs.l): \ 1160 *val = regs->q; break 1161 1162 #define SEG32(rs) \ 1163 case offsetof(struct user32, regs.rs): \ 1164 *val = get_segment_reg(child, \ 1165 offsetof(struct user_regs_struct, rs)); \ 1166 break 1167 1168 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1169 { 1170 struct pt_regs *regs = task_pt_regs(child); 1171 1172 switch (regno) { 1173 1174 SEG32(ds); 1175 SEG32(es); 1176 SEG32(fs); 1177 SEG32(gs); 1178 1179 R32(cs, cs); 1180 R32(ss, ss); 1181 R32(ebx, bx); 1182 R32(ecx, cx); 1183 R32(edx, dx); 1184 R32(edi, di); 1185 R32(esi, si); 1186 R32(ebp, bp); 1187 R32(eax, ax); 1188 R32(orig_eax, orig_ax); 1189 R32(eip, ip); 1190 R32(esp, sp); 1191 1192 case offsetof(struct user32, regs.eflags): 1193 *val = get_flags(child); 1194 break; 1195 1196 case offsetof(struct user32, u_debugreg[0]) ... 1197 offsetof(struct user32, u_debugreg[7]): 1198 regno -= offsetof(struct user32, u_debugreg[0]); 1199 *val = ptrace_get_debugreg(child, regno / 4); 1200 break; 1201 1202 default: 1203 if (regno > sizeof(struct user32) || (regno & 3)) 1204 return -EIO; 1205 1206 /* 1207 * Other dummy fields in the virtual user structure 1208 * are ignored 1209 */ 1210 *val = 0; 1211 break; 1212 } 1213 return 0; 1214 } 1215 1216 #undef R32 1217 #undef SEG32 1218 1219 static int genregs32_get(struct task_struct *target, 1220 const struct user_regset *regset, 1221 unsigned int pos, unsigned int count, 1222 void *kbuf, void __user *ubuf) 1223 { 1224 if (kbuf) { 1225 compat_ulong_t *k = kbuf; 1226 while (count > 0) { 1227 getreg32(target, pos, k++); 1228 count -= sizeof(*k); 1229 pos += sizeof(*k); 1230 } 1231 } else { 1232 compat_ulong_t __user *u = ubuf; 1233 while (count > 0) { 1234 compat_ulong_t word; 1235 getreg32(target, pos, &word); 1236 if (__put_user(word, u++)) 1237 return -EFAULT; 1238 count -= sizeof(*u); 1239 pos += sizeof(*u); 1240 } 1241 } 1242 1243 return 0; 1244 } 1245 1246 static int genregs32_set(struct task_struct *target, 1247 const struct user_regset *regset, 1248 unsigned int pos, unsigned int count, 1249 const void *kbuf, const void __user *ubuf) 1250 { 1251 int ret = 0; 1252 if (kbuf) { 1253 const compat_ulong_t *k = kbuf; 1254 while (count > 0 && !ret) { 1255 ret = putreg32(target, pos, *k++); 1256 count -= sizeof(*k); 1257 pos += sizeof(*k); 1258 } 1259 } else { 1260 const compat_ulong_t __user *u = ubuf; 1261 while (count > 0 && !ret) { 1262 compat_ulong_t word; 1263 ret = __get_user(word, u++); 1264 if (ret) 1265 break; 1266 ret = putreg32(target, pos, word); 1267 count -= sizeof(*u); 1268 pos += sizeof(*u); 1269 } 1270 } 1271 return ret; 1272 } 1273 1274 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1275 compat_ulong_t caddr, compat_ulong_t cdata) 1276 { 1277 unsigned long addr = caddr; 1278 unsigned long data = cdata; 1279 void __user *datap = compat_ptr(data); 1280 int ret; 1281 __u32 val; 1282 1283 switch (request) { 1284 case PTRACE_PEEKUSR: 1285 ret = getreg32(child, addr, &val); 1286 if (ret == 0) 1287 ret = put_user(val, (__u32 __user *)datap); 1288 break; 1289 1290 case PTRACE_POKEUSR: 1291 ret = putreg32(child, addr, data); 1292 break; 1293 1294 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1295 return copy_regset_to_user(child, &user_x86_32_view, 1296 REGSET_GENERAL, 1297 0, sizeof(struct user_regs_struct32), 1298 datap); 1299 1300 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1301 return copy_regset_from_user(child, &user_x86_32_view, 1302 REGSET_GENERAL, 0, 1303 sizeof(struct user_regs_struct32), 1304 datap); 1305 1306 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1307 return copy_regset_to_user(child, &user_x86_32_view, 1308 REGSET_FP, 0, 1309 sizeof(struct user_i387_ia32_struct), 1310 datap); 1311 1312 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1313 return copy_regset_from_user( 1314 child, &user_x86_32_view, REGSET_FP, 1315 0, sizeof(struct user_i387_ia32_struct), datap); 1316 1317 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1318 return copy_regset_to_user(child, &user_x86_32_view, 1319 REGSET_XFP, 0, 1320 sizeof(struct user32_fxsr_struct), 1321 datap); 1322 1323 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1324 return copy_regset_from_user(child, &user_x86_32_view, 1325 REGSET_XFP, 0, 1326 sizeof(struct user32_fxsr_struct), 1327 datap); 1328 1329 case PTRACE_GET_THREAD_AREA: 1330 case PTRACE_SET_THREAD_AREA: 1331 #ifdef CONFIG_X86_PTRACE_BTS 1332 case PTRACE_BTS_CONFIG: 1333 case PTRACE_BTS_STATUS: 1334 case PTRACE_BTS_SIZE: 1335 case PTRACE_BTS_GET: 1336 case PTRACE_BTS_CLEAR: 1337 case PTRACE_BTS_DRAIN: 1338 #endif /* CONFIG_X86_PTRACE_BTS */ 1339 return arch_ptrace(child, request, addr, data); 1340 1341 default: 1342 return compat_ptrace_request(child, request, addr, data); 1343 } 1344 1345 return ret; 1346 } 1347 1348 #endif /* CONFIG_IA32_EMULATION */ 1349 1350 #ifdef CONFIG_X86_64 1351 1352 static const struct user_regset x86_64_regsets[] = { 1353 [REGSET_GENERAL] = { 1354 .core_note_type = NT_PRSTATUS, 1355 .n = sizeof(struct user_regs_struct) / sizeof(long), 1356 .size = sizeof(long), .align = sizeof(long), 1357 .get = genregs_get, .set = genregs_set 1358 }, 1359 [REGSET_FP] = { 1360 .core_note_type = NT_PRFPREG, 1361 .n = sizeof(struct user_i387_struct) / sizeof(long), 1362 .size = sizeof(long), .align = sizeof(long), 1363 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1364 }, 1365 [REGSET_IOPERM64] = { 1366 .core_note_type = NT_386_IOPERM, 1367 .n = IO_BITMAP_LONGS, 1368 .size = sizeof(long), .align = sizeof(long), 1369 .active = ioperm_active, .get = ioperm_get 1370 }, 1371 }; 1372 1373 static const struct user_regset_view user_x86_64_view = { 1374 .name = "x86_64", .e_machine = EM_X86_64, 1375 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1376 }; 1377 1378 #else /* CONFIG_X86_32 */ 1379 1380 #define user_regs_struct32 user_regs_struct 1381 #define genregs32_get genregs_get 1382 #define genregs32_set genregs_set 1383 1384 #define user_i387_ia32_struct user_i387_struct 1385 #define user32_fxsr_struct user_fxsr_struct 1386 1387 #endif /* CONFIG_X86_64 */ 1388 1389 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1390 static const struct user_regset x86_32_regsets[] = { 1391 [REGSET_GENERAL] = { 1392 .core_note_type = NT_PRSTATUS, 1393 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1394 .size = sizeof(u32), .align = sizeof(u32), 1395 .get = genregs32_get, .set = genregs32_set 1396 }, 1397 [REGSET_FP] = { 1398 .core_note_type = NT_PRFPREG, 1399 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1400 .size = sizeof(u32), .align = sizeof(u32), 1401 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1402 }, 1403 [REGSET_XFP] = { 1404 .core_note_type = NT_PRXFPREG, 1405 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1406 .size = sizeof(u32), .align = sizeof(u32), 1407 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1408 }, 1409 [REGSET_TLS] = { 1410 .core_note_type = NT_386_TLS, 1411 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1412 .size = sizeof(struct user_desc), 1413 .align = sizeof(struct user_desc), 1414 .active = regset_tls_active, 1415 .get = regset_tls_get, .set = regset_tls_set 1416 }, 1417 [REGSET_IOPERM32] = { 1418 .core_note_type = NT_386_IOPERM, 1419 .n = IO_BITMAP_BYTES / sizeof(u32), 1420 .size = sizeof(u32), .align = sizeof(u32), 1421 .active = ioperm_active, .get = ioperm_get 1422 }, 1423 }; 1424 1425 static const struct user_regset_view user_x86_32_view = { 1426 .name = "i386", .e_machine = EM_386, 1427 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1428 }; 1429 #endif 1430 1431 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1432 { 1433 #ifdef CONFIG_IA32_EMULATION 1434 if (test_tsk_thread_flag(task, TIF_IA32)) 1435 #endif 1436 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1437 return &user_x86_32_view; 1438 #endif 1439 #ifdef CONFIG_X86_64 1440 return &user_x86_64_view; 1441 #endif 1442 } 1443 1444 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1445 int error_code, int si_code) 1446 { 1447 struct siginfo info; 1448 1449 tsk->thread.trap_no = 1; 1450 tsk->thread.error_code = error_code; 1451 1452 memset(&info, 0, sizeof(info)); 1453 info.si_signo = SIGTRAP; 1454 info.si_code = si_code; 1455 1456 /* User-mode ip? */ 1457 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1458 1459 /* Send us the fake SIGTRAP */ 1460 force_sig_info(SIGTRAP, &info, tsk); 1461 } 1462 1463 1464 #ifdef CONFIG_X86_32 1465 # define IS_IA32 1 1466 #elif defined CONFIG_IA32_EMULATION 1467 # define IS_IA32 is_compat_task() 1468 #else 1469 # define IS_IA32 0 1470 #endif 1471 1472 /* 1473 * We must return the syscall number to actually look up in the table. 1474 * This can be -1L to skip running any syscall at all. 1475 */ 1476 asmregparm long syscall_trace_enter(struct pt_regs *regs) 1477 { 1478 long ret = 0; 1479 1480 /* 1481 * If we stepped into a sysenter/syscall insn, it trapped in 1482 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1483 * If user-mode had set TF itself, then it's still clear from 1484 * do_debug() and we need to set it again to restore the user 1485 * state. If we entered on the slow path, TF was already set. 1486 */ 1487 if (test_thread_flag(TIF_SINGLESTEP)) 1488 regs->flags |= X86_EFLAGS_TF; 1489 1490 /* do the secure computing check first */ 1491 secure_computing(regs->orig_ax); 1492 1493 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1494 ret = -1L; 1495 1496 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1497 tracehook_report_syscall_entry(regs)) 1498 ret = -1L; 1499 1500 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1501 ftrace_syscall_enter(regs); 1502 1503 if (unlikely(current->audit_context)) { 1504 if (IS_IA32) 1505 audit_syscall_entry(AUDIT_ARCH_I386, 1506 regs->orig_ax, 1507 regs->bx, regs->cx, 1508 regs->dx, regs->si); 1509 #ifdef CONFIG_X86_64 1510 else 1511 audit_syscall_entry(AUDIT_ARCH_X86_64, 1512 regs->orig_ax, 1513 regs->di, regs->si, 1514 regs->dx, regs->r10); 1515 #endif 1516 } 1517 1518 return ret ?: regs->orig_ax; 1519 } 1520 1521 asmregparm void syscall_trace_leave(struct pt_regs *regs) 1522 { 1523 if (unlikely(current->audit_context)) 1524 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1525 1526 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1527 ftrace_syscall_exit(regs); 1528 1529 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1530 tracehook_report_syscall_exit(regs, 0); 1531 1532 /* 1533 * If TIF_SYSCALL_EMU is set, we only get here because of 1534 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1535 * We already reported this syscall instruction in 1536 * syscall_trace_enter(), so don't do any more now. 1537 */ 1538 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1539 return; 1540 1541 /* 1542 * If we are single-stepping, synthesize a trap to follow the 1543 * system call instruction. 1544 */ 1545 if (test_thread_flag(TIF_SINGLESTEP) && 1546 tracehook_consider_fatal_signal(current, SIGTRAP)) 1547 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1548 } 1549