1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/capability.h> 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/errno.h> 14 #include <linux/mm.h> 15 #include <linux/highmem.h> 16 #include <linux/pagemap.h> 17 #include <linux/smp_lock.h> 18 #include <linux/ptrace.h> 19 #include <linux/security.h> 20 #include <linux/signal.h> 21 #include <linux/audit.h> 22 #include <linux/pid_namespace.h> 23 #include <linux/syscalls.h> 24 25 #include <asm/pgtable.h> 26 #include <asm/uaccess.h> 27 28 /* 29 * ptrace a task: make the debugger its new parent and 30 * move it to the ptrace list. 31 * 32 * Must be called with the tasklist lock write-held. 33 */ 34 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) 35 { 36 BUG_ON(!list_empty(&child->ptrace_entry)); 37 list_add(&child->ptrace_entry, &new_parent->ptraced); 38 child->parent = new_parent; 39 } 40 41 /* 42 * Turn a tracing stop into a normal stop now, since with no tracer there 43 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 44 * signal sent that would resume the child, but didn't because it was in 45 * TASK_TRACED, resume it now. 46 * Requires that irqs be disabled. 47 */ 48 void ptrace_untrace(struct task_struct *child) 49 { 50 spin_lock(&child->sighand->siglock); 51 if (task_is_traced(child)) { 52 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 53 __set_task_state(child, TASK_STOPPED); 54 } else { 55 signal_wake_up(child, 1); 56 } 57 } 58 spin_unlock(&child->sighand->siglock); 59 } 60 61 /* 62 * unptrace a task: move it back to its original parent and 63 * remove it from the ptrace list. 64 * 65 * Must be called with the tasklist lock write-held. 66 */ 67 void __ptrace_unlink(struct task_struct *child) 68 { 69 BUG_ON(!child->ptrace); 70 71 child->ptrace = 0; 72 child->parent = child->real_parent; 73 list_del_init(&child->ptrace_entry); 74 75 if (task_is_traced(child)) 76 ptrace_untrace(child); 77 } 78 79 /* 80 * Check that we have indeed attached to the thing.. 81 */ 82 int ptrace_check_attach(struct task_struct *child, int kill) 83 { 84 int ret = -ESRCH; 85 86 /* 87 * We take the read lock around doing both checks to close a 88 * possible race where someone else was tracing our child and 89 * detached between these two checks. After this locked check, 90 * we are sure that this is our traced child and that can only 91 * be changed by us so it's not changing right after this. 92 */ 93 read_lock(&tasklist_lock); 94 if ((child->ptrace & PT_PTRACED) && child->parent == current) { 95 ret = 0; 96 /* 97 * child->sighand can't be NULL, release_task() 98 * does ptrace_unlink() before __exit_signal(). 99 */ 100 spin_lock_irq(&child->sighand->siglock); 101 if (task_is_stopped(child)) 102 child->state = TASK_TRACED; 103 else if (!task_is_traced(child) && !kill) 104 ret = -ESRCH; 105 spin_unlock_irq(&child->sighand->siglock); 106 } 107 read_unlock(&tasklist_lock); 108 109 if (!ret && !kill) 110 wait_task_inactive(child); 111 112 /* All systems go.. */ 113 return ret; 114 } 115 116 int __ptrace_may_access(struct task_struct *task, unsigned int mode) 117 { 118 /* May we inspect the given task? 119 * This check is used both for attaching with ptrace 120 * and for allowing access to sensitive information in /proc. 121 * 122 * ptrace_attach denies several cases that /proc allows 123 * because setting up the necessary parent/child relationship 124 * or halting the specified task is impossible. 125 */ 126 int dumpable = 0; 127 /* Don't let security modules deny introspection */ 128 if (task == current) 129 return 0; 130 if (((current->uid != task->euid) || 131 (current->uid != task->suid) || 132 (current->uid != task->uid) || 133 (current->gid != task->egid) || 134 (current->gid != task->sgid) || 135 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) 136 return -EPERM; 137 smp_rmb(); 138 if (task->mm) 139 dumpable = get_dumpable(task->mm); 140 if (!dumpable && !capable(CAP_SYS_PTRACE)) 141 return -EPERM; 142 143 return security_ptrace(current, task, mode); 144 } 145 146 bool ptrace_may_access(struct task_struct *task, unsigned int mode) 147 { 148 int err; 149 task_lock(task); 150 err = __ptrace_may_access(task, mode); 151 task_unlock(task); 152 return (!err ? true : false); 153 } 154 155 int ptrace_attach(struct task_struct *task) 156 { 157 int retval; 158 unsigned long flags; 159 160 audit_ptrace(task); 161 162 retval = -EPERM; 163 if (same_thread_group(task, current)) 164 goto out; 165 166 repeat: 167 /* 168 * Nasty, nasty. 169 * 170 * We want to hold both the task-lock and the 171 * tasklist_lock for writing at the same time. 172 * But that's against the rules (tasklist_lock 173 * is taken for reading by interrupts on other 174 * cpu's that may have task_lock). 175 */ 176 task_lock(task); 177 if (!write_trylock_irqsave(&tasklist_lock, flags)) { 178 task_unlock(task); 179 do { 180 cpu_relax(); 181 } while (!write_can_lock(&tasklist_lock)); 182 goto repeat; 183 } 184 185 if (!task->mm) 186 goto bad; 187 /* the same process cannot be attached many times */ 188 if (task->ptrace & PT_PTRACED) 189 goto bad; 190 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH); 191 if (retval) 192 goto bad; 193 194 /* Go */ 195 task->ptrace |= PT_PTRACED; 196 if (capable(CAP_SYS_PTRACE)) 197 task->ptrace |= PT_PTRACE_CAP; 198 199 __ptrace_link(task, current); 200 201 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task); 202 bad: 203 write_unlock_irqrestore(&tasklist_lock, flags); 204 task_unlock(task); 205 out: 206 return retval; 207 } 208 209 static inline void __ptrace_detach(struct task_struct *child, unsigned int data) 210 { 211 child->exit_code = data; 212 /* .. re-parent .. */ 213 __ptrace_unlink(child); 214 /* .. and wake it up. */ 215 if (child->exit_state != EXIT_ZOMBIE) 216 wake_up_process(child); 217 } 218 219 int ptrace_detach(struct task_struct *child, unsigned int data) 220 { 221 if (!valid_signal(data)) 222 return -EIO; 223 224 /* Architecture-specific hardware disable .. */ 225 ptrace_disable(child); 226 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 227 228 write_lock_irq(&tasklist_lock); 229 /* protect against de_thread()->release_task() */ 230 if (child->ptrace) 231 __ptrace_detach(child, data); 232 write_unlock_irq(&tasklist_lock); 233 234 return 0; 235 } 236 237 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 238 { 239 int copied = 0; 240 241 while (len > 0) { 242 char buf[128]; 243 int this_len, retval; 244 245 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 246 retval = access_process_vm(tsk, src, buf, this_len, 0); 247 if (!retval) { 248 if (copied) 249 break; 250 return -EIO; 251 } 252 if (copy_to_user(dst, buf, retval)) 253 return -EFAULT; 254 copied += retval; 255 src += retval; 256 dst += retval; 257 len -= retval; 258 } 259 return copied; 260 } 261 262 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 263 { 264 int copied = 0; 265 266 while (len > 0) { 267 char buf[128]; 268 int this_len, retval; 269 270 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 271 if (copy_from_user(buf, src, this_len)) 272 return -EFAULT; 273 retval = access_process_vm(tsk, dst, buf, this_len, 1); 274 if (!retval) { 275 if (copied) 276 break; 277 return -EIO; 278 } 279 copied += retval; 280 src += retval; 281 dst += retval; 282 len -= retval; 283 } 284 return copied; 285 } 286 287 static int ptrace_setoptions(struct task_struct *child, long data) 288 { 289 child->ptrace &= ~PT_TRACE_MASK; 290 291 if (data & PTRACE_O_TRACESYSGOOD) 292 child->ptrace |= PT_TRACESYSGOOD; 293 294 if (data & PTRACE_O_TRACEFORK) 295 child->ptrace |= PT_TRACE_FORK; 296 297 if (data & PTRACE_O_TRACEVFORK) 298 child->ptrace |= PT_TRACE_VFORK; 299 300 if (data & PTRACE_O_TRACECLONE) 301 child->ptrace |= PT_TRACE_CLONE; 302 303 if (data & PTRACE_O_TRACEEXEC) 304 child->ptrace |= PT_TRACE_EXEC; 305 306 if (data & PTRACE_O_TRACEVFORKDONE) 307 child->ptrace |= PT_TRACE_VFORK_DONE; 308 309 if (data & PTRACE_O_TRACEEXIT) 310 child->ptrace |= PT_TRACE_EXIT; 311 312 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 313 } 314 315 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info) 316 { 317 int error = -ESRCH; 318 319 read_lock(&tasklist_lock); 320 if (likely(child->sighand != NULL)) { 321 error = -EINVAL; 322 spin_lock_irq(&child->sighand->siglock); 323 if (likely(child->last_siginfo != NULL)) { 324 *info = *child->last_siginfo; 325 error = 0; 326 } 327 spin_unlock_irq(&child->sighand->siglock); 328 } 329 read_unlock(&tasklist_lock); 330 return error; 331 } 332 333 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info) 334 { 335 int error = -ESRCH; 336 337 read_lock(&tasklist_lock); 338 if (likely(child->sighand != NULL)) { 339 error = -EINVAL; 340 spin_lock_irq(&child->sighand->siglock); 341 if (likely(child->last_siginfo != NULL)) { 342 *child->last_siginfo = *info; 343 error = 0; 344 } 345 spin_unlock_irq(&child->sighand->siglock); 346 } 347 read_unlock(&tasklist_lock); 348 return error; 349 } 350 351 352 #ifdef PTRACE_SINGLESTEP 353 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 354 #else 355 #define is_singlestep(request) 0 356 #endif 357 358 #ifdef PTRACE_SINGLEBLOCK 359 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK) 360 #else 361 #define is_singleblock(request) 0 362 #endif 363 364 #ifdef PTRACE_SYSEMU 365 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP) 366 #else 367 #define is_sysemu_singlestep(request) 0 368 #endif 369 370 static int ptrace_resume(struct task_struct *child, long request, long data) 371 { 372 if (!valid_signal(data)) 373 return -EIO; 374 375 if (request == PTRACE_SYSCALL) 376 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 377 else 378 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 379 380 #ifdef TIF_SYSCALL_EMU 381 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP) 382 set_tsk_thread_flag(child, TIF_SYSCALL_EMU); 383 else 384 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 385 #endif 386 387 if (is_singleblock(request)) { 388 if (unlikely(!arch_has_block_step())) 389 return -EIO; 390 user_enable_block_step(child); 391 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) { 392 if (unlikely(!arch_has_single_step())) 393 return -EIO; 394 user_enable_single_step(child); 395 } 396 else 397 user_disable_single_step(child); 398 399 child->exit_code = data; 400 wake_up_process(child); 401 402 return 0; 403 } 404 405 int ptrace_request(struct task_struct *child, long request, 406 long addr, long data) 407 { 408 int ret = -EIO; 409 siginfo_t siginfo; 410 411 switch (request) { 412 case PTRACE_PEEKTEXT: 413 case PTRACE_PEEKDATA: 414 return generic_ptrace_peekdata(child, addr, data); 415 case PTRACE_POKETEXT: 416 case PTRACE_POKEDATA: 417 return generic_ptrace_pokedata(child, addr, data); 418 419 #ifdef PTRACE_OLDSETOPTIONS 420 case PTRACE_OLDSETOPTIONS: 421 #endif 422 case PTRACE_SETOPTIONS: 423 ret = ptrace_setoptions(child, data); 424 break; 425 case PTRACE_GETEVENTMSG: 426 ret = put_user(child->ptrace_message, (unsigned long __user *) data); 427 break; 428 429 case PTRACE_GETSIGINFO: 430 ret = ptrace_getsiginfo(child, &siginfo); 431 if (!ret) 432 ret = copy_siginfo_to_user((siginfo_t __user *) data, 433 &siginfo); 434 break; 435 436 case PTRACE_SETSIGINFO: 437 if (copy_from_user(&siginfo, (siginfo_t __user *) data, 438 sizeof siginfo)) 439 ret = -EFAULT; 440 else 441 ret = ptrace_setsiginfo(child, &siginfo); 442 break; 443 444 case PTRACE_DETACH: /* detach a process that was attached. */ 445 ret = ptrace_detach(child, data); 446 break; 447 448 #ifdef PTRACE_SINGLESTEP 449 case PTRACE_SINGLESTEP: 450 #endif 451 #ifdef PTRACE_SINGLEBLOCK 452 case PTRACE_SINGLEBLOCK: 453 #endif 454 #ifdef PTRACE_SYSEMU 455 case PTRACE_SYSEMU: 456 case PTRACE_SYSEMU_SINGLESTEP: 457 #endif 458 case PTRACE_SYSCALL: 459 case PTRACE_CONT: 460 return ptrace_resume(child, request, data); 461 462 case PTRACE_KILL: 463 if (child->exit_state) /* already dead */ 464 return 0; 465 return ptrace_resume(child, request, SIGKILL); 466 467 default: 468 break; 469 } 470 471 return ret; 472 } 473 474 /** 475 * ptrace_traceme -- helper for PTRACE_TRACEME 476 * 477 * Performs checks and sets PT_PTRACED. 478 * Should be used by all ptrace implementations for PTRACE_TRACEME. 479 */ 480 int ptrace_traceme(void) 481 { 482 int ret = -EPERM; 483 484 /* 485 * Are we already being traced? 486 */ 487 repeat: 488 task_lock(current); 489 if (!(current->ptrace & PT_PTRACED)) { 490 /* 491 * See ptrace_attach() comments about the locking here. 492 */ 493 unsigned long flags; 494 if (!write_trylock_irqsave(&tasklist_lock, flags)) { 495 task_unlock(current); 496 do { 497 cpu_relax(); 498 } while (!write_can_lock(&tasklist_lock)); 499 goto repeat; 500 } 501 502 ret = security_ptrace(current->parent, current, 503 PTRACE_MODE_ATTACH); 504 505 /* 506 * Set the ptrace bit in the process ptrace flags. 507 * Then link us on our parent's ptraced list. 508 */ 509 if (!ret) { 510 current->ptrace |= PT_PTRACED; 511 __ptrace_link(current, current->real_parent); 512 } 513 514 write_unlock_irqrestore(&tasklist_lock, flags); 515 } 516 task_unlock(current); 517 return ret; 518 } 519 520 /** 521 * ptrace_get_task_struct -- grab a task struct reference for ptrace 522 * @pid: process id to grab a task_struct reference of 523 * 524 * This function is a helper for ptrace implementations. It checks 525 * permissions and then grabs a task struct for use of the actual 526 * ptrace implementation. 527 * 528 * Returns the task_struct for @pid or an ERR_PTR() on failure. 529 */ 530 struct task_struct *ptrace_get_task_struct(pid_t pid) 531 { 532 struct task_struct *child; 533 534 read_lock(&tasklist_lock); 535 child = find_task_by_vpid(pid); 536 if (child) 537 get_task_struct(child); 538 539 read_unlock(&tasklist_lock); 540 if (!child) 541 return ERR_PTR(-ESRCH); 542 return child; 543 } 544 545 #ifndef arch_ptrace_attach 546 #define arch_ptrace_attach(child) do { } while (0) 547 #endif 548 549 asmlinkage long sys_ptrace(long request, long pid, long addr, long data) 550 { 551 struct task_struct *child; 552 long ret; 553 554 /* 555 * This lock_kernel fixes a subtle race with suid exec 556 */ 557 lock_kernel(); 558 if (request == PTRACE_TRACEME) { 559 ret = ptrace_traceme(); 560 if (!ret) 561 arch_ptrace_attach(current); 562 goto out; 563 } 564 565 child = ptrace_get_task_struct(pid); 566 if (IS_ERR(child)) { 567 ret = PTR_ERR(child); 568 goto out; 569 } 570 571 if (request == PTRACE_ATTACH) { 572 ret = ptrace_attach(child); 573 /* 574 * Some architectures need to do book-keeping after 575 * a ptrace attach. 576 */ 577 if (!ret) 578 arch_ptrace_attach(child); 579 goto out_put_task_struct; 580 } 581 582 ret = ptrace_check_attach(child, request == PTRACE_KILL); 583 if (ret < 0) 584 goto out_put_task_struct; 585 586 ret = arch_ptrace(child, request, addr, data); 587 if (ret < 0) 588 goto out_put_task_struct; 589 590 out_put_task_struct: 591 put_task_struct(child); 592 out: 593 unlock_kernel(); 594 return ret; 595 } 596 597 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data) 598 { 599 unsigned long tmp; 600 int copied; 601 602 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); 603 if (copied != sizeof(tmp)) 604 return -EIO; 605 return put_user(tmp, (unsigned long __user *)data); 606 } 607 608 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) 609 { 610 int copied; 611 612 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1); 613 return (copied == sizeof(data)) ? 0 : -EIO; 614 } 615 616 #if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE 617 #include <linux/compat.h> 618 619 int compat_ptrace_request(struct task_struct *child, compat_long_t request, 620 compat_ulong_t addr, compat_ulong_t data) 621 { 622 compat_ulong_t __user *datap = compat_ptr(data); 623 compat_ulong_t word; 624 siginfo_t siginfo; 625 int ret; 626 627 switch (request) { 628 case PTRACE_PEEKTEXT: 629 case PTRACE_PEEKDATA: 630 ret = access_process_vm(child, addr, &word, sizeof(word), 0); 631 if (ret != sizeof(word)) 632 ret = -EIO; 633 else 634 ret = put_user(word, datap); 635 break; 636 637 case PTRACE_POKETEXT: 638 case PTRACE_POKEDATA: 639 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 640 ret = (ret != sizeof(data) ? -EIO : 0); 641 break; 642 643 case PTRACE_GETEVENTMSG: 644 ret = put_user((compat_ulong_t) child->ptrace_message, datap); 645 break; 646 647 case PTRACE_GETSIGINFO: 648 ret = ptrace_getsiginfo(child, &siginfo); 649 if (!ret) 650 ret = copy_siginfo_to_user32( 651 (struct compat_siginfo __user *) datap, 652 &siginfo); 653 break; 654 655 case PTRACE_SETSIGINFO: 656 memset(&siginfo, 0, sizeof siginfo); 657 if (copy_siginfo_from_user32( 658 &siginfo, (struct compat_siginfo __user *) datap)) 659 ret = -EFAULT; 660 else 661 ret = ptrace_setsiginfo(child, &siginfo); 662 break; 663 664 default: 665 ret = ptrace_request(child, request, addr, data); 666 } 667 668 return ret; 669 } 670 671 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 672 compat_long_t addr, compat_long_t data) 673 { 674 struct task_struct *child; 675 long ret; 676 677 /* 678 * This lock_kernel fixes a subtle race with suid exec 679 */ 680 lock_kernel(); 681 if (request == PTRACE_TRACEME) { 682 ret = ptrace_traceme(); 683 goto out; 684 } 685 686 child = ptrace_get_task_struct(pid); 687 if (IS_ERR(child)) { 688 ret = PTR_ERR(child); 689 goto out; 690 } 691 692 if (request == PTRACE_ATTACH) { 693 ret = ptrace_attach(child); 694 /* 695 * Some architectures need to do book-keeping after 696 * a ptrace attach. 697 */ 698 if (!ret) 699 arch_ptrace_attach(child); 700 goto out_put_task_struct; 701 } 702 703 ret = ptrace_check_attach(child, request == PTRACE_KILL); 704 if (!ret) 705 ret = compat_arch_ptrace(child, request, addr, data); 706 707 out_put_task_struct: 708 put_task_struct(child); 709 out: 710 unlock_kernel(); 711 return ret; 712 } 713 #endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */ 714