1 /* By Ross Biro 1/23/92 */ 2 /* 3 * Pentium III FXSR, SSE support 4 * Gareth Hughes <gareth@valinux.com>, May 2000 5 * 6 * BTS tracing 7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/sched.h> 12 #include <linux/mm.h> 13 #include <linux/smp.h> 14 #include <linux/errno.h> 15 #include <linux/ptrace.h> 16 #include <linux/regset.h> 17 #include <linux/tracehook.h> 18 #include <linux/user.h> 19 #include <linux/elf.h> 20 #include <linux/security.h> 21 #include <linux/audit.h> 22 #include <linux/seccomp.h> 23 #include <linux/signal.h> 24 25 #include <asm/uaccess.h> 26 #include <asm/pgtable.h> 27 #include <asm/system.h> 28 #include <asm/processor.h> 29 #include <asm/i387.h> 30 #include <asm/debugreg.h> 31 #include <asm/ldt.h> 32 #include <asm/desc.h> 33 #include <asm/prctl.h> 34 #include <asm/proto.h> 35 #include <asm/ds.h> 36 37 #include <trace/syscall.h> 38 39 #include "tls.h" 40 41 enum x86_regset { 42 REGSET_GENERAL, 43 REGSET_FP, 44 REGSET_XFP, 45 REGSET_IOPERM64 = REGSET_XFP, 46 REGSET_TLS, 47 REGSET_IOPERM32, 48 }; 49 50 /* 51 * does not yet catch signals sent when the child dies. 52 * in exit.c or in signal.c. 53 */ 54 55 /* 56 * Determines which flags the user has access to [1 = access, 0 = no access]. 57 */ 58 #define FLAG_MASK_32 ((unsigned long) \ 59 (X86_EFLAGS_CF | X86_EFLAGS_PF | \ 60 X86_EFLAGS_AF | X86_EFLAGS_ZF | \ 61 X86_EFLAGS_SF | X86_EFLAGS_TF | \ 62 X86_EFLAGS_DF | X86_EFLAGS_OF | \ 63 X86_EFLAGS_RF | X86_EFLAGS_AC)) 64 65 /* 66 * Determines whether a value may be installed in a segment register. 67 */ 68 static inline bool invalid_selector(u16 value) 69 { 70 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); 71 } 72 73 #ifdef CONFIG_X86_32 74 75 #define FLAG_MASK FLAG_MASK_32 76 77 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) 78 { 79 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); 80 return ®s->bx + (regno >> 2); 81 } 82 83 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 84 { 85 /* 86 * Returning the value truncates it to 16 bits. 87 */ 88 unsigned int retval; 89 if (offset != offsetof(struct user_regs_struct, gs)) 90 retval = *pt_regs_access(task_pt_regs(task), offset); 91 else { 92 if (task == current) 93 retval = get_user_gs(task_pt_regs(task)); 94 else 95 retval = task_user_gs(task); 96 } 97 return retval; 98 } 99 100 static int set_segment_reg(struct task_struct *task, 101 unsigned long offset, u16 value) 102 { 103 /* 104 * The value argument was already truncated to 16 bits. 105 */ 106 if (invalid_selector(value)) 107 return -EIO; 108 109 /* 110 * For %cs and %ss we cannot permit a null selector. 111 * We can permit a bogus selector as long as it has USER_RPL. 112 * Null selectors are fine for other segment registers, but 113 * we will never get back to user mode with invalid %cs or %ss 114 * and will take the trap in iret instead. Much code relies 115 * on user_mode() to distinguish a user trap frame (which can 116 * safely use invalid selectors) from a kernel trap frame. 117 */ 118 switch (offset) { 119 case offsetof(struct user_regs_struct, cs): 120 case offsetof(struct user_regs_struct, ss): 121 if (unlikely(value == 0)) 122 return -EIO; 123 124 default: 125 *pt_regs_access(task_pt_regs(task), offset) = value; 126 break; 127 128 case offsetof(struct user_regs_struct, gs): 129 if (task == current) 130 set_user_gs(task_pt_regs(task), value); 131 else 132 task_user_gs(task) = value; 133 } 134 135 return 0; 136 } 137 138 static unsigned long debugreg_addr_limit(struct task_struct *task) 139 { 140 return TASK_SIZE - 3; 141 } 142 143 #else /* CONFIG_X86_64 */ 144 145 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) 146 147 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) 148 { 149 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); 150 return ®s->r15 + (offset / sizeof(regs->r15)); 151 } 152 153 static u16 get_segment_reg(struct task_struct *task, unsigned long offset) 154 { 155 /* 156 * Returning the value truncates it to 16 bits. 157 */ 158 unsigned int seg; 159 160 switch (offset) { 161 case offsetof(struct user_regs_struct, fs): 162 if (task == current) { 163 /* Older gas can't assemble movq %?s,%r?? */ 164 asm("movl %%fs,%0" : "=r" (seg)); 165 return seg; 166 } 167 return task->thread.fsindex; 168 case offsetof(struct user_regs_struct, gs): 169 if (task == current) { 170 asm("movl %%gs,%0" : "=r" (seg)); 171 return seg; 172 } 173 return task->thread.gsindex; 174 case offsetof(struct user_regs_struct, ds): 175 if (task == current) { 176 asm("movl %%ds,%0" : "=r" (seg)); 177 return seg; 178 } 179 return task->thread.ds; 180 case offsetof(struct user_regs_struct, es): 181 if (task == current) { 182 asm("movl %%es,%0" : "=r" (seg)); 183 return seg; 184 } 185 return task->thread.es; 186 187 case offsetof(struct user_regs_struct, cs): 188 case offsetof(struct user_regs_struct, ss): 189 break; 190 } 191 return *pt_regs_access(task_pt_regs(task), offset); 192 } 193 194 static int set_segment_reg(struct task_struct *task, 195 unsigned long offset, u16 value) 196 { 197 /* 198 * The value argument was already truncated to 16 bits. 199 */ 200 if (invalid_selector(value)) 201 return -EIO; 202 203 switch (offset) { 204 case offsetof(struct user_regs_struct,fs): 205 /* 206 * If this is setting fs as for normal 64-bit use but 207 * setting fs_base has implicitly changed it, leave it. 208 */ 209 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && 210 task->thread.fs != 0) || 211 (value == 0 && task->thread.fsindex == FS_TLS_SEL && 212 task->thread.fs == 0)) 213 break; 214 task->thread.fsindex = value; 215 if (task == current) 216 loadsegment(fs, task->thread.fsindex); 217 break; 218 case offsetof(struct user_regs_struct,gs): 219 /* 220 * If this is setting gs as for normal 64-bit use but 221 * setting gs_base has implicitly changed it, leave it. 222 */ 223 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && 224 task->thread.gs != 0) || 225 (value == 0 && task->thread.gsindex == GS_TLS_SEL && 226 task->thread.gs == 0)) 227 break; 228 task->thread.gsindex = value; 229 if (task == current) 230 load_gs_index(task->thread.gsindex); 231 break; 232 case offsetof(struct user_regs_struct,ds): 233 task->thread.ds = value; 234 if (task == current) 235 loadsegment(ds, task->thread.ds); 236 break; 237 case offsetof(struct user_regs_struct,es): 238 task->thread.es = value; 239 if (task == current) 240 loadsegment(es, task->thread.es); 241 break; 242 243 /* 244 * Can't actually change these in 64-bit mode. 245 */ 246 case offsetof(struct user_regs_struct,cs): 247 if (unlikely(value == 0)) 248 return -EIO; 249 #ifdef CONFIG_IA32_EMULATION 250 if (test_tsk_thread_flag(task, TIF_IA32)) 251 task_pt_regs(task)->cs = value; 252 #endif 253 break; 254 case offsetof(struct user_regs_struct,ss): 255 if (unlikely(value == 0)) 256 return -EIO; 257 #ifdef CONFIG_IA32_EMULATION 258 if (test_tsk_thread_flag(task, TIF_IA32)) 259 task_pt_regs(task)->ss = value; 260 #endif 261 break; 262 } 263 264 return 0; 265 } 266 267 static unsigned long debugreg_addr_limit(struct task_struct *task) 268 { 269 #ifdef CONFIG_IA32_EMULATION 270 if (test_tsk_thread_flag(task, TIF_IA32)) 271 return IA32_PAGE_OFFSET - 3; 272 #endif 273 return TASK_SIZE_MAX - 7; 274 } 275 276 #endif /* CONFIG_X86_32 */ 277 278 static unsigned long get_flags(struct task_struct *task) 279 { 280 unsigned long retval = task_pt_regs(task)->flags; 281 282 /* 283 * If the debugger set TF, hide it from the readout. 284 */ 285 if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 286 retval &= ~X86_EFLAGS_TF; 287 288 return retval; 289 } 290 291 static int set_flags(struct task_struct *task, unsigned long value) 292 { 293 struct pt_regs *regs = task_pt_regs(task); 294 295 /* 296 * If the user value contains TF, mark that 297 * it was not "us" (the debugger) that set it. 298 * If not, make sure it stays set if we had. 299 */ 300 if (value & X86_EFLAGS_TF) 301 clear_tsk_thread_flag(task, TIF_FORCED_TF); 302 else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) 303 value |= X86_EFLAGS_TF; 304 305 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); 306 307 return 0; 308 } 309 310 static int putreg(struct task_struct *child, 311 unsigned long offset, unsigned long value) 312 { 313 switch (offset) { 314 case offsetof(struct user_regs_struct, cs): 315 case offsetof(struct user_regs_struct, ds): 316 case offsetof(struct user_regs_struct, es): 317 case offsetof(struct user_regs_struct, fs): 318 case offsetof(struct user_regs_struct, gs): 319 case offsetof(struct user_regs_struct, ss): 320 return set_segment_reg(child, offset, value); 321 322 case offsetof(struct user_regs_struct, flags): 323 return set_flags(child, value); 324 325 #ifdef CONFIG_X86_64 326 /* 327 * Orig_ax is really just a flag with small positive and 328 * negative values, so make sure to always sign-extend it 329 * from 32 bits so that it works correctly regardless of 330 * whether we come from a 32-bit environment or not. 331 */ 332 case offsetof(struct user_regs_struct, orig_ax): 333 value = (long) (s32) value; 334 break; 335 336 case offsetof(struct user_regs_struct,fs_base): 337 if (value >= TASK_SIZE_OF(child)) 338 return -EIO; 339 /* 340 * When changing the segment base, use do_arch_prctl 341 * to set either thread.fs or thread.fsindex and the 342 * corresponding GDT slot. 343 */ 344 if (child->thread.fs != value) 345 return do_arch_prctl(child, ARCH_SET_FS, value); 346 return 0; 347 case offsetof(struct user_regs_struct,gs_base): 348 /* 349 * Exactly the same here as the %fs handling above. 350 */ 351 if (value >= TASK_SIZE_OF(child)) 352 return -EIO; 353 if (child->thread.gs != value) 354 return do_arch_prctl(child, ARCH_SET_GS, value); 355 return 0; 356 #endif 357 } 358 359 *pt_regs_access(task_pt_regs(child), offset) = value; 360 return 0; 361 } 362 363 static unsigned long getreg(struct task_struct *task, unsigned long offset) 364 { 365 switch (offset) { 366 case offsetof(struct user_regs_struct, cs): 367 case offsetof(struct user_regs_struct, ds): 368 case offsetof(struct user_regs_struct, es): 369 case offsetof(struct user_regs_struct, fs): 370 case offsetof(struct user_regs_struct, gs): 371 case offsetof(struct user_regs_struct, ss): 372 return get_segment_reg(task, offset); 373 374 case offsetof(struct user_regs_struct, flags): 375 return get_flags(task); 376 377 #ifdef CONFIG_X86_64 378 case offsetof(struct user_regs_struct, fs_base): { 379 /* 380 * do_arch_prctl may have used a GDT slot instead of 381 * the MSR. To userland, it appears the same either 382 * way, except the %fs segment selector might not be 0. 383 */ 384 unsigned int seg = task->thread.fsindex; 385 if (task->thread.fs != 0) 386 return task->thread.fs; 387 if (task == current) 388 asm("movl %%fs,%0" : "=r" (seg)); 389 if (seg != FS_TLS_SEL) 390 return 0; 391 return get_desc_base(&task->thread.tls_array[FS_TLS]); 392 } 393 case offsetof(struct user_regs_struct, gs_base): { 394 /* 395 * Exactly the same here as the %fs handling above. 396 */ 397 unsigned int seg = task->thread.gsindex; 398 if (task->thread.gs != 0) 399 return task->thread.gs; 400 if (task == current) 401 asm("movl %%gs,%0" : "=r" (seg)); 402 if (seg != GS_TLS_SEL) 403 return 0; 404 return get_desc_base(&task->thread.tls_array[GS_TLS]); 405 } 406 #endif 407 } 408 409 return *pt_regs_access(task_pt_regs(task), offset); 410 } 411 412 static int genregs_get(struct task_struct *target, 413 const struct user_regset *regset, 414 unsigned int pos, unsigned int count, 415 void *kbuf, void __user *ubuf) 416 { 417 if (kbuf) { 418 unsigned long *k = kbuf; 419 while (count > 0) { 420 *k++ = getreg(target, pos); 421 count -= sizeof(*k); 422 pos += sizeof(*k); 423 } 424 } else { 425 unsigned long __user *u = ubuf; 426 while (count > 0) { 427 if (__put_user(getreg(target, pos), u++)) 428 return -EFAULT; 429 count -= sizeof(*u); 430 pos += sizeof(*u); 431 } 432 } 433 434 return 0; 435 } 436 437 static int genregs_set(struct task_struct *target, 438 const struct user_regset *regset, 439 unsigned int pos, unsigned int count, 440 const void *kbuf, const void __user *ubuf) 441 { 442 int ret = 0; 443 if (kbuf) { 444 const unsigned long *k = kbuf; 445 while (count > 0 && !ret) { 446 ret = putreg(target, pos, *k++); 447 count -= sizeof(*k); 448 pos += sizeof(*k); 449 } 450 } else { 451 const unsigned long __user *u = ubuf; 452 while (count > 0 && !ret) { 453 unsigned long word; 454 ret = __get_user(word, u++); 455 if (ret) 456 break; 457 ret = putreg(target, pos, word); 458 count -= sizeof(*u); 459 pos += sizeof(*u); 460 } 461 } 462 return ret; 463 } 464 465 /* 466 * This function is trivial and will be inlined by the compiler. 467 * Having it separates the implementation details of debug 468 * registers from the interface details of ptrace. 469 */ 470 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) 471 { 472 switch (n) { 473 case 0: return child->thread.debugreg0; 474 case 1: return child->thread.debugreg1; 475 case 2: return child->thread.debugreg2; 476 case 3: return child->thread.debugreg3; 477 case 6: return child->thread.debugreg6; 478 case 7: return child->thread.debugreg7; 479 } 480 return 0; 481 } 482 483 static int ptrace_set_debugreg(struct task_struct *child, 484 int n, unsigned long data) 485 { 486 int i; 487 488 if (unlikely(n == 4 || n == 5)) 489 return -EIO; 490 491 if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) 492 return -EIO; 493 494 switch (n) { 495 case 0: child->thread.debugreg0 = data; break; 496 case 1: child->thread.debugreg1 = data; break; 497 case 2: child->thread.debugreg2 = data; break; 498 case 3: child->thread.debugreg3 = data; break; 499 500 case 6: 501 if ((data & ~0xffffffffUL) != 0) 502 return -EIO; 503 child->thread.debugreg6 = data; 504 break; 505 506 case 7: 507 /* 508 * Sanity-check data. Take one half-byte at once with 509 * check = (val >> (16 + 4*i)) & 0xf. It contains the 510 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits 511 * 2 and 3 are LENi. Given a list of invalid values, 512 * we do mask |= 1 << invalid_value, so that 513 * (mask >> check) & 1 is a correct test for invalid 514 * values. 515 * 516 * R/Wi contains the type of the breakpoint / 517 * watchpoint, LENi contains the length of the watched 518 * data in the watchpoint case. 519 * 520 * The invalid values are: 521 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] 522 * - R/Wi == 0x10 (break on I/O reads or writes), so 523 * mask |= 0x4444. 524 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= 525 * 0x1110. 526 * 527 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. 528 * 529 * See the Intel Manual "System Programming Guide", 530 * 15.2.4 531 * 532 * Note that LENi == 0x10 is defined on x86_64 in long 533 * mode (i.e. even for 32-bit userspace software, but 534 * 64-bit kernel), so the x86_64 mask value is 0x5454. 535 * See the AMD manual no. 24593 (AMD64 System Programming) 536 */ 537 #ifdef CONFIG_X86_32 538 #define DR7_MASK 0x5f54 539 #else 540 #define DR7_MASK 0x5554 541 #endif 542 data &= ~DR_CONTROL_RESERVED; 543 for (i = 0; i < 4; i++) 544 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) 545 return -EIO; 546 child->thread.debugreg7 = data; 547 if (data) 548 set_tsk_thread_flag(child, TIF_DEBUG); 549 else 550 clear_tsk_thread_flag(child, TIF_DEBUG); 551 break; 552 } 553 554 return 0; 555 } 556 557 /* 558 * These access the current or another (stopped) task's io permission 559 * bitmap for debugging or core dump. 560 */ 561 static int ioperm_active(struct task_struct *target, 562 const struct user_regset *regset) 563 { 564 return target->thread.io_bitmap_max / regset->size; 565 } 566 567 static int ioperm_get(struct task_struct *target, 568 const struct user_regset *regset, 569 unsigned int pos, unsigned int count, 570 void *kbuf, void __user *ubuf) 571 { 572 if (!target->thread.io_bitmap_ptr) 573 return -ENXIO; 574 575 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 576 target->thread.io_bitmap_ptr, 577 0, IO_BITMAP_BYTES); 578 } 579 580 #ifdef CONFIG_X86_PTRACE_BTS 581 static int ptrace_bts_read_record(struct task_struct *child, size_t index, 582 struct bts_struct __user *out) 583 { 584 const struct bts_trace *trace; 585 struct bts_struct bts; 586 const unsigned char *at; 587 int error; 588 589 trace = ds_read_bts(child->bts); 590 if (!trace) 591 return -EPERM; 592 593 at = trace->ds.top - ((index + 1) * trace->ds.size); 594 if ((void *)at < trace->ds.begin) 595 at += (trace->ds.n * trace->ds.size); 596 597 if (!trace->read) 598 return -EOPNOTSUPP; 599 600 error = trace->read(child->bts, at, &bts); 601 if (error < 0) 602 return error; 603 604 if (copy_to_user(out, &bts, sizeof(bts))) 605 return -EFAULT; 606 607 return sizeof(bts); 608 } 609 610 static int ptrace_bts_drain(struct task_struct *child, 611 long size, 612 struct bts_struct __user *out) 613 { 614 const struct bts_trace *trace; 615 const unsigned char *at; 616 int error, drained = 0; 617 618 trace = ds_read_bts(child->bts); 619 if (!trace) 620 return -EPERM; 621 622 if (!trace->read) 623 return -EOPNOTSUPP; 624 625 if (size < (trace->ds.top - trace->ds.begin)) 626 return -EIO; 627 628 for (at = trace->ds.begin; (void *)at < trace->ds.top; 629 out++, drained++, at += trace->ds.size) { 630 struct bts_struct bts; 631 int error; 632 633 error = trace->read(child->bts, at, &bts); 634 if (error < 0) 635 return error; 636 637 if (copy_to_user(out, &bts, sizeof(bts))) 638 return -EFAULT; 639 } 640 641 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 642 643 error = ds_reset_bts(child->bts); 644 if (error < 0) 645 return error; 646 647 return drained; 648 } 649 650 static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size) 651 { 652 child->bts_buffer = alloc_locked_buffer(size); 653 if (!child->bts_buffer) 654 return -ENOMEM; 655 656 child->bts_size = size; 657 658 return 0; 659 } 660 661 static void ptrace_bts_free_buffer(struct task_struct *child) 662 { 663 free_locked_buffer(child->bts_buffer, child->bts_size); 664 child->bts_buffer = NULL; 665 child->bts_size = 0; 666 } 667 668 static int ptrace_bts_config(struct task_struct *child, 669 long cfg_size, 670 const struct ptrace_bts_config __user *ucfg) 671 { 672 struct ptrace_bts_config cfg; 673 unsigned int flags = 0; 674 675 if (cfg_size < sizeof(cfg)) 676 return -EIO; 677 678 if (copy_from_user(&cfg, ucfg, sizeof(cfg))) 679 return -EFAULT; 680 681 if (child->bts) { 682 ds_release_bts(child->bts); 683 child->bts = NULL; 684 } 685 686 if (cfg.flags & PTRACE_BTS_O_SIGNAL) { 687 if (!cfg.signal) 688 return -EINVAL; 689 690 child->thread.bts_ovfl_signal = cfg.signal; 691 return -EOPNOTSUPP; 692 } 693 694 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && 695 (cfg.size != child->bts_size)) { 696 int error; 697 698 ptrace_bts_free_buffer(child); 699 700 error = ptrace_bts_allocate_buffer(child, cfg.size); 701 if (error < 0) 702 return error; 703 } 704 705 if (cfg.flags & PTRACE_BTS_O_TRACE) 706 flags |= BTS_USER; 707 708 if (cfg.flags & PTRACE_BTS_O_SCHED) 709 flags |= BTS_TIMESTAMPS; 710 711 child->bts = ds_request_bts(child, child->bts_buffer, child->bts_size, 712 /* ovfl = */ NULL, /* th = */ (size_t)-1, 713 flags); 714 if (IS_ERR(child->bts)) { 715 int error = PTR_ERR(child->bts); 716 717 ptrace_bts_free_buffer(child); 718 child->bts = NULL; 719 720 return error; 721 } 722 723 return sizeof(cfg); 724 } 725 726 static int ptrace_bts_status(struct task_struct *child, 727 long cfg_size, 728 struct ptrace_bts_config __user *ucfg) 729 { 730 const struct bts_trace *trace; 731 struct ptrace_bts_config cfg; 732 733 if (cfg_size < sizeof(cfg)) 734 return -EIO; 735 736 trace = ds_read_bts(child->bts); 737 if (!trace) 738 return -EPERM; 739 740 memset(&cfg, 0, sizeof(cfg)); 741 cfg.size = trace->ds.end - trace->ds.begin; 742 cfg.signal = child->thread.bts_ovfl_signal; 743 cfg.bts_size = sizeof(struct bts_struct); 744 745 if (cfg.signal) 746 cfg.flags |= PTRACE_BTS_O_SIGNAL; 747 748 if (trace->ds.flags & BTS_USER) 749 cfg.flags |= PTRACE_BTS_O_TRACE; 750 751 if (trace->ds.flags & BTS_TIMESTAMPS) 752 cfg.flags |= PTRACE_BTS_O_SCHED; 753 754 if (copy_to_user(ucfg, &cfg, sizeof(cfg))) 755 return -EFAULT; 756 757 return sizeof(cfg); 758 } 759 760 static int ptrace_bts_clear(struct task_struct *child) 761 { 762 const struct bts_trace *trace; 763 764 trace = ds_read_bts(child->bts); 765 if (!trace) 766 return -EPERM; 767 768 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size); 769 770 return ds_reset_bts(child->bts); 771 } 772 773 static int ptrace_bts_size(struct task_struct *child) 774 { 775 const struct bts_trace *trace; 776 777 trace = ds_read_bts(child->bts); 778 if (!trace) 779 return -EPERM; 780 781 return (trace->ds.top - trace->ds.begin) / trace->ds.size; 782 } 783 784 static void ptrace_bts_fork(struct task_struct *tsk) 785 { 786 tsk->bts = NULL; 787 tsk->bts_buffer = NULL; 788 tsk->bts_size = 0; 789 tsk->thread.bts_ovfl_signal = 0; 790 } 791 792 static void ptrace_bts_untrace(struct task_struct *child) 793 { 794 if (unlikely(child->bts)) { 795 ds_release_bts(child->bts); 796 child->bts = NULL; 797 798 /* We cannot update total_vm and locked_vm since 799 child's mm is already gone. But we can reclaim the 800 memory. */ 801 kfree(child->bts_buffer); 802 child->bts_buffer = NULL; 803 child->bts_size = 0; 804 } 805 } 806 807 static void ptrace_bts_detach(struct task_struct *child) 808 { 809 /* 810 * Ptrace_detach() races with ptrace_untrace() in case 811 * the child dies and is reaped by another thread. 812 * 813 * We only do the memory accounting at this point and 814 * leave the buffer deallocation and the bts tracer 815 * release to ptrace_bts_untrace() which will be called 816 * later on with tasklist_lock held. 817 */ 818 release_locked_buffer(child->bts_buffer, child->bts_size); 819 } 820 #else 821 static inline void ptrace_bts_fork(struct task_struct *tsk) {} 822 static inline void ptrace_bts_detach(struct task_struct *child) {} 823 static inline void ptrace_bts_untrace(struct task_struct *child) {} 824 #endif /* CONFIG_X86_PTRACE_BTS */ 825 826 void x86_ptrace_fork(struct task_struct *child, unsigned long clone_flags) 827 { 828 ptrace_bts_fork(child); 829 } 830 831 void x86_ptrace_untrace(struct task_struct *child) 832 { 833 ptrace_bts_untrace(child); 834 } 835 836 /* 837 * Called by kernel/ptrace.c when detaching.. 838 * 839 * Make sure the single step bit is not set. 840 */ 841 void ptrace_disable(struct task_struct *child) 842 { 843 user_disable_single_step(child); 844 #ifdef TIF_SYSCALL_EMU 845 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 846 #endif 847 ptrace_bts_detach(child); 848 } 849 850 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 851 static const struct user_regset_view user_x86_32_view; /* Initialized below. */ 852 #endif 853 854 long arch_ptrace(struct task_struct *child, long request, long addr, long data) 855 { 856 int ret; 857 unsigned long __user *datap = (unsigned long __user *)data; 858 859 switch (request) { 860 /* read the word at location addr in the USER area. */ 861 case PTRACE_PEEKUSR: { 862 unsigned long tmp; 863 864 ret = -EIO; 865 if ((addr & (sizeof(data) - 1)) || addr < 0 || 866 addr >= sizeof(struct user)) 867 break; 868 869 tmp = 0; /* Default return condition */ 870 if (addr < sizeof(struct user_regs_struct)) 871 tmp = getreg(child, addr); 872 else if (addr >= offsetof(struct user, u_debugreg[0]) && 873 addr <= offsetof(struct user, u_debugreg[7])) { 874 addr -= offsetof(struct user, u_debugreg[0]); 875 tmp = ptrace_get_debugreg(child, addr / sizeof(data)); 876 } 877 ret = put_user(tmp, datap); 878 break; 879 } 880 881 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 882 ret = -EIO; 883 if ((addr & (sizeof(data) - 1)) || addr < 0 || 884 addr >= sizeof(struct user)) 885 break; 886 887 if (addr < sizeof(struct user_regs_struct)) 888 ret = putreg(child, addr, data); 889 else if (addr >= offsetof(struct user, u_debugreg[0]) && 890 addr <= offsetof(struct user, u_debugreg[7])) { 891 addr -= offsetof(struct user, u_debugreg[0]); 892 ret = ptrace_set_debugreg(child, 893 addr / sizeof(data), data); 894 } 895 break; 896 897 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 898 return copy_regset_to_user(child, 899 task_user_regset_view(current), 900 REGSET_GENERAL, 901 0, sizeof(struct user_regs_struct), 902 datap); 903 904 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 905 return copy_regset_from_user(child, 906 task_user_regset_view(current), 907 REGSET_GENERAL, 908 0, sizeof(struct user_regs_struct), 909 datap); 910 911 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 912 return copy_regset_to_user(child, 913 task_user_regset_view(current), 914 REGSET_FP, 915 0, sizeof(struct user_i387_struct), 916 datap); 917 918 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 919 return copy_regset_from_user(child, 920 task_user_regset_view(current), 921 REGSET_FP, 922 0, sizeof(struct user_i387_struct), 923 datap); 924 925 #ifdef CONFIG_X86_32 926 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 927 return copy_regset_to_user(child, &user_x86_32_view, 928 REGSET_XFP, 929 0, sizeof(struct user_fxsr_struct), 930 datap) ? -EIO : 0; 931 932 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 933 return copy_regset_from_user(child, &user_x86_32_view, 934 REGSET_XFP, 935 0, sizeof(struct user_fxsr_struct), 936 datap) ? -EIO : 0; 937 #endif 938 939 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 940 case PTRACE_GET_THREAD_AREA: 941 if (addr < 0) 942 return -EIO; 943 ret = do_get_thread_area(child, addr, 944 (struct user_desc __user *) data); 945 break; 946 947 case PTRACE_SET_THREAD_AREA: 948 if (addr < 0) 949 return -EIO; 950 ret = do_set_thread_area(child, addr, 951 (struct user_desc __user *) data, 0); 952 break; 953 #endif 954 955 #ifdef CONFIG_X86_64 956 /* normal 64bit interface to access TLS data. 957 Works just like arch_prctl, except that the arguments 958 are reversed. */ 959 case PTRACE_ARCH_PRCTL: 960 ret = do_arch_prctl(child, data, addr); 961 break; 962 #endif 963 964 /* 965 * These bits need more cooking - not enabled yet: 966 */ 967 #ifdef CONFIG_X86_PTRACE_BTS 968 case PTRACE_BTS_CONFIG: 969 ret = ptrace_bts_config 970 (child, data, (struct ptrace_bts_config __user *)addr); 971 break; 972 973 case PTRACE_BTS_STATUS: 974 ret = ptrace_bts_status 975 (child, data, (struct ptrace_bts_config __user *)addr); 976 break; 977 978 case PTRACE_BTS_SIZE: 979 ret = ptrace_bts_size(child); 980 break; 981 982 case PTRACE_BTS_GET: 983 ret = ptrace_bts_read_record 984 (child, data, (struct bts_struct __user *) addr); 985 break; 986 987 case PTRACE_BTS_CLEAR: 988 ret = ptrace_bts_clear(child); 989 break; 990 991 case PTRACE_BTS_DRAIN: 992 ret = ptrace_bts_drain 993 (child, data, (struct bts_struct __user *) addr); 994 break; 995 #endif /* CONFIG_X86_PTRACE_BTS */ 996 997 default: 998 ret = ptrace_request(child, request, addr, data); 999 break; 1000 } 1001 1002 return ret; 1003 } 1004 1005 #ifdef CONFIG_IA32_EMULATION 1006 1007 #include <linux/compat.h> 1008 #include <linux/syscalls.h> 1009 #include <asm/ia32.h> 1010 #include <asm/user32.h> 1011 1012 #define R32(l,q) \ 1013 case offsetof(struct user32, regs.l): \ 1014 regs->q = value; break 1015 1016 #define SEG32(rs) \ 1017 case offsetof(struct user32, regs.rs): \ 1018 return set_segment_reg(child, \ 1019 offsetof(struct user_regs_struct, rs), \ 1020 value); \ 1021 break 1022 1023 static int putreg32(struct task_struct *child, unsigned regno, u32 value) 1024 { 1025 struct pt_regs *regs = task_pt_regs(child); 1026 1027 switch (regno) { 1028 1029 SEG32(cs); 1030 SEG32(ds); 1031 SEG32(es); 1032 SEG32(fs); 1033 SEG32(gs); 1034 SEG32(ss); 1035 1036 R32(ebx, bx); 1037 R32(ecx, cx); 1038 R32(edx, dx); 1039 R32(edi, di); 1040 R32(esi, si); 1041 R32(ebp, bp); 1042 R32(eax, ax); 1043 R32(eip, ip); 1044 R32(esp, sp); 1045 1046 case offsetof(struct user32, regs.orig_eax): 1047 /* 1048 * Sign-extend the value so that orig_eax = -1 1049 * causes (long)orig_ax < 0 tests to fire correctly. 1050 */ 1051 regs->orig_ax = (long) (s32) value; 1052 break; 1053 1054 case offsetof(struct user32, regs.eflags): 1055 return set_flags(child, value); 1056 1057 case offsetof(struct user32, u_debugreg[0]) ... 1058 offsetof(struct user32, u_debugreg[7]): 1059 regno -= offsetof(struct user32, u_debugreg[0]); 1060 return ptrace_set_debugreg(child, regno / 4, value); 1061 1062 default: 1063 if (regno > sizeof(struct user32) || (regno & 3)) 1064 return -EIO; 1065 1066 /* 1067 * Other dummy fields in the virtual user structure 1068 * are ignored 1069 */ 1070 break; 1071 } 1072 return 0; 1073 } 1074 1075 #undef R32 1076 #undef SEG32 1077 1078 #define R32(l,q) \ 1079 case offsetof(struct user32, regs.l): \ 1080 *val = regs->q; break 1081 1082 #define SEG32(rs) \ 1083 case offsetof(struct user32, regs.rs): \ 1084 *val = get_segment_reg(child, \ 1085 offsetof(struct user_regs_struct, rs)); \ 1086 break 1087 1088 static int getreg32(struct task_struct *child, unsigned regno, u32 *val) 1089 { 1090 struct pt_regs *regs = task_pt_regs(child); 1091 1092 switch (regno) { 1093 1094 SEG32(ds); 1095 SEG32(es); 1096 SEG32(fs); 1097 SEG32(gs); 1098 1099 R32(cs, cs); 1100 R32(ss, ss); 1101 R32(ebx, bx); 1102 R32(ecx, cx); 1103 R32(edx, dx); 1104 R32(edi, di); 1105 R32(esi, si); 1106 R32(ebp, bp); 1107 R32(eax, ax); 1108 R32(orig_eax, orig_ax); 1109 R32(eip, ip); 1110 R32(esp, sp); 1111 1112 case offsetof(struct user32, regs.eflags): 1113 *val = get_flags(child); 1114 break; 1115 1116 case offsetof(struct user32, u_debugreg[0]) ... 1117 offsetof(struct user32, u_debugreg[7]): 1118 regno -= offsetof(struct user32, u_debugreg[0]); 1119 *val = ptrace_get_debugreg(child, regno / 4); 1120 break; 1121 1122 default: 1123 if (regno > sizeof(struct user32) || (regno & 3)) 1124 return -EIO; 1125 1126 /* 1127 * Other dummy fields in the virtual user structure 1128 * are ignored 1129 */ 1130 *val = 0; 1131 break; 1132 } 1133 return 0; 1134 } 1135 1136 #undef R32 1137 #undef SEG32 1138 1139 static int genregs32_get(struct task_struct *target, 1140 const struct user_regset *regset, 1141 unsigned int pos, unsigned int count, 1142 void *kbuf, void __user *ubuf) 1143 { 1144 if (kbuf) { 1145 compat_ulong_t *k = kbuf; 1146 while (count > 0) { 1147 getreg32(target, pos, k++); 1148 count -= sizeof(*k); 1149 pos += sizeof(*k); 1150 } 1151 } else { 1152 compat_ulong_t __user *u = ubuf; 1153 while (count > 0) { 1154 compat_ulong_t word; 1155 getreg32(target, pos, &word); 1156 if (__put_user(word, u++)) 1157 return -EFAULT; 1158 count -= sizeof(*u); 1159 pos += sizeof(*u); 1160 } 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int genregs32_set(struct task_struct *target, 1167 const struct user_regset *regset, 1168 unsigned int pos, unsigned int count, 1169 const void *kbuf, const void __user *ubuf) 1170 { 1171 int ret = 0; 1172 if (kbuf) { 1173 const compat_ulong_t *k = kbuf; 1174 while (count > 0 && !ret) { 1175 ret = putreg32(target, pos, *k++); 1176 count -= sizeof(*k); 1177 pos += sizeof(*k); 1178 } 1179 } else { 1180 const compat_ulong_t __user *u = ubuf; 1181 while (count > 0 && !ret) { 1182 compat_ulong_t word; 1183 ret = __get_user(word, u++); 1184 if (ret) 1185 break; 1186 ret = putreg32(target, pos, word); 1187 count -= sizeof(*u); 1188 pos += sizeof(*u); 1189 } 1190 } 1191 return ret; 1192 } 1193 1194 long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 1195 compat_ulong_t caddr, compat_ulong_t cdata) 1196 { 1197 unsigned long addr = caddr; 1198 unsigned long data = cdata; 1199 void __user *datap = compat_ptr(data); 1200 int ret; 1201 __u32 val; 1202 1203 switch (request) { 1204 case PTRACE_PEEKUSR: 1205 ret = getreg32(child, addr, &val); 1206 if (ret == 0) 1207 ret = put_user(val, (__u32 __user *)datap); 1208 break; 1209 1210 case PTRACE_POKEUSR: 1211 ret = putreg32(child, addr, data); 1212 break; 1213 1214 case PTRACE_GETREGS: /* Get all gp regs from the child. */ 1215 return copy_regset_to_user(child, &user_x86_32_view, 1216 REGSET_GENERAL, 1217 0, sizeof(struct user_regs_struct32), 1218 datap); 1219 1220 case PTRACE_SETREGS: /* Set all gp regs in the child. */ 1221 return copy_regset_from_user(child, &user_x86_32_view, 1222 REGSET_GENERAL, 0, 1223 sizeof(struct user_regs_struct32), 1224 datap); 1225 1226 case PTRACE_GETFPREGS: /* Get the child FPU state. */ 1227 return copy_regset_to_user(child, &user_x86_32_view, 1228 REGSET_FP, 0, 1229 sizeof(struct user_i387_ia32_struct), 1230 datap); 1231 1232 case PTRACE_SETFPREGS: /* Set the child FPU state. */ 1233 return copy_regset_from_user( 1234 child, &user_x86_32_view, REGSET_FP, 1235 0, sizeof(struct user_i387_ia32_struct), datap); 1236 1237 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ 1238 return copy_regset_to_user(child, &user_x86_32_view, 1239 REGSET_XFP, 0, 1240 sizeof(struct user32_fxsr_struct), 1241 datap); 1242 1243 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ 1244 return copy_regset_from_user(child, &user_x86_32_view, 1245 REGSET_XFP, 0, 1246 sizeof(struct user32_fxsr_struct), 1247 datap); 1248 1249 case PTRACE_GET_THREAD_AREA: 1250 case PTRACE_SET_THREAD_AREA: 1251 #ifdef CONFIG_X86_PTRACE_BTS 1252 case PTRACE_BTS_CONFIG: 1253 case PTRACE_BTS_STATUS: 1254 case PTRACE_BTS_SIZE: 1255 case PTRACE_BTS_GET: 1256 case PTRACE_BTS_CLEAR: 1257 case PTRACE_BTS_DRAIN: 1258 #endif /* CONFIG_X86_PTRACE_BTS */ 1259 return arch_ptrace(child, request, addr, data); 1260 1261 default: 1262 return compat_ptrace_request(child, request, addr, data); 1263 } 1264 1265 return ret; 1266 } 1267 1268 #endif /* CONFIG_IA32_EMULATION */ 1269 1270 #ifdef CONFIG_X86_64 1271 1272 static const struct user_regset x86_64_regsets[] = { 1273 [REGSET_GENERAL] = { 1274 .core_note_type = NT_PRSTATUS, 1275 .n = sizeof(struct user_regs_struct) / sizeof(long), 1276 .size = sizeof(long), .align = sizeof(long), 1277 .get = genregs_get, .set = genregs_set 1278 }, 1279 [REGSET_FP] = { 1280 .core_note_type = NT_PRFPREG, 1281 .n = sizeof(struct user_i387_struct) / sizeof(long), 1282 .size = sizeof(long), .align = sizeof(long), 1283 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1284 }, 1285 [REGSET_IOPERM64] = { 1286 .core_note_type = NT_386_IOPERM, 1287 .n = IO_BITMAP_LONGS, 1288 .size = sizeof(long), .align = sizeof(long), 1289 .active = ioperm_active, .get = ioperm_get 1290 }, 1291 }; 1292 1293 static const struct user_regset_view user_x86_64_view = { 1294 .name = "x86_64", .e_machine = EM_X86_64, 1295 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) 1296 }; 1297 1298 #else /* CONFIG_X86_32 */ 1299 1300 #define user_regs_struct32 user_regs_struct 1301 #define genregs32_get genregs_get 1302 #define genregs32_set genregs_set 1303 1304 #define user_i387_ia32_struct user_i387_struct 1305 #define user32_fxsr_struct user_fxsr_struct 1306 1307 #endif /* CONFIG_X86_64 */ 1308 1309 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1310 static const struct user_regset x86_32_regsets[] = { 1311 [REGSET_GENERAL] = { 1312 .core_note_type = NT_PRSTATUS, 1313 .n = sizeof(struct user_regs_struct32) / sizeof(u32), 1314 .size = sizeof(u32), .align = sizeof(u32), 1315 .get = genregs32_get, .set = genregs32_set 1316 }, 1317 [REGSET_FP] = { 1318 .core_note_type = NT_PRFPREG, 1319 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), 1320 .size = sizeof(u32), .align = sizeof(u32), 1321 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set 1322 }, 1323 [REGSET_XFP] = { 1324 .core_note_type = NT_PRXFPREG, 1325 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), 1326 .size = sizeof(u32), .align = sizeof(u32), 1327 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set 1328 }, 1329 [REGSET_TLS] = { 1330 .core_note_type = NT_386_TLS, 1331 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, 1332 .size = sizeof(struct user_desc), 1333 .align = sizeof(struct user_desc), 1334 .active = regset_tls_active, 1335 .get = regset_tls_get, .set = regset_tls_set 1336 }, 1337 [REGSET_IOPERM32] = { 1338 .core_note_type = NT_386_IOPERM, 1339 .n = IO_BITMAP_BYTES / sizeof(u32), 1340 .size = sizeof(u32), .align = sizeof(u32), 1341 .active = ioperm_active, .get = ioperm_get 1342 }, 1343 }; 1344 1345 static const struct user_regset_view user_x86_32_view = { 1346 .name = "i386", .e_machine = EM_386, 1347 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) 1348 }; 1349 #endif 1350 1351 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 1352 { 1353 #ifdef CONFIG_IA32_EMULATION 1354 if (test_tsk_thread_flag(task, TIF_IA32)) 1355 #endif 1356 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION 1357 return &user_x86_32_view; 1358 #endif 1359 #ifdef CONFIG_X86_64 1360 return &user_x86_64_view; 1361 #endif 1362 } 1363 1364 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 1365 int error_code, int si_code) 1366 { 1367 struct siginfo info; 1368 1369 tsk->thread.trap_no = 1; 1370 tsk->thread.error_code = error_code; 1371 1372 memset(&info, 0, sizeof(info)); 1373 info.si_signo = SIGTRAP; 1374 info.si_code = si_code; 1375 1376 /* User-mode ip? */ 1377 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL; 1378 1379 /* Send us the fake SIGTRAP */ 1380 force_sig_info(SIGTRAP, &info, tsk); 1381 } 1382 1383 1384 #ifdef CONFIG_X86_32 1385 # define IS_IA32 1 1386 #elif defined CONFIG_IA32_EMULATION 1387 # define IS_IA32 is_compat_task() 1388 #else 1389 # define IS_IA32 0 1390 #endif 1391 1392 /* 1393 * We must return the syscall number to actually look up in the table. 1394 * This can be -1L to skip running any syscall at all. 1395 */ 1396 asmregparm long syscall_trace_enter(struct pt_regs *regs) 1397 { 1398 long ret = 0; 1399 1400 /* 1401 * If we stepped into a sysenter/syscall insn, it trapped in 1402 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. 1403 * If user-mode had set TF itself, then it's still clear from 1404 * do_debug() and we need to set it again to restore the user 1405 * state. If we entered on the slow path, TF was already set. 1406 */ 1407 if (test_thread_flag(TIF_SINGLESTEP)) 1408 regs->flags |= X86_EFLAGS_TF; 1409 1410 /* do the secure computing check first */ 1411 secure_computing(regs->orig_ax); 1412 1413 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1414 ret = -1L; 1415 1416 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && 1417 tracehook_report_syscall_entry(regs)) 1418 ret = -1L; 1419 1420 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1421 ftrace_syscall_enter(regs); 1422 1423 if (unlikely(current->audit_context)) { 1424 if (IS_IA32) 1425 audit_syscall_entry(AUDIT_ARCH_I386, 1426 regs->orig_ax, 1427 regs->bx, regs->cx, 1428 regs->dx, regs->si); 1429 #ifdef CONFIG_X86_64 1430 else 1431 audit_syscall_entry(AUDIT_ARCH_X86_64, 1432 regs->orig_ax, 1433 regs->di, regs->si, 1434 regs->dx, regs->r10); 1435 #endif 1436 } 1437 1438 return ret ?: regs->orig_ax; 1439 } 1440 1441 asmregparm void syscall_trace_leave(struct pt_regs *regs) 1442 { 1443 if (unlikely(current->audit_context)) 1444 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1445 1446 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1447 ftrace_syscall_exit(regs); 1448 1449 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1450 tracehook_report_syscall_exit(regs, 0); 1451 1452 /* 1453 * If TIF_SYSCALL_EMU is set, we only get here because of 1454 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). 1455 * We already reported this syscall instruction in 1456 * syscall_trace_enter(), so don't do any more now. 1457 */ 1458 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) 1459 return; 1460 1461 /* 1462 * If we are single-stepping, synthesize a trap to follow the 1463 * system call instruction. 1464 */ 1465 if (test_thread_flag(TIF_SINGLESTEP) && 1466 tracehook_consider_fatal_signal(current, SIGTRAP)) 1467 send_sigtrap(current, regs, 0, TRAP_BRKPT); 1468 } 1469