1 /* 2 * linux/kernel/ptrace.c 3 * 4 * (C) Copyright 1999 Linus Torvalds 5 * 6 * Common interfaces for "ptrace()" which we do not want 7 * to continually duplicate across every architecture. 8 */ 9 10 #include <linux/module.h> 11 #include <linux/sched.h> 12 #include <linux/errno.h> 13 #include <linux/mm.h> 14 #include <linux/highmem.h> 15 #include <linux/pagemap.h> 16 #include <linux/smp_lock.h> 17 #include <linux/ptrace.h> 18 #include <linux/security.h> 19 #include <linux/signal.h> 20 21 #include <asm/pgtable.h> 22 #include <asm/uaccess.h> 23 24 /* 25 * ptrace a task: make the debugger its new parent and 26 * move it to the ptrace list. 27 * 28 * Must be called with the tasklist lock write-held. 29 */ 30 void __ptrace_link(task_t *child, task_t *new_parent) 31 { 32 if (!list_empty(&child->ptrace_list)) 33 BUG(); 34 if (child->parent == new_parent) 35 return; 36 list_add(&child->ptrace_list, &child->parent->ptrace_children); 37 REMOVE_LINKS(child); 38 child->parent = new_parent; 39 SET_LINKS(child); 40 } 41 42 /* 43 * Turn a tracing stop into a normal stop now, since with no tracer there 44 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 45 * signal sent that would resume the child, but didn't because it was in 46 * TASK_TRACED, resume it now. 47 * Requires that irqs be disabled. 48 */ 49 void ptrace_untrace(task_t *child) 50 { 51 spin_lock(&child->sighand->siglock); 52 if (child->state == TASK_TRACED) { 53 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 54 child->state = TASK_STOPPED; 55 } else { 56 signal_wake_up(child, 1); 57 } 58 } 59 if (child->signal->flags & SIGNAL_GROUP_EXIT) { 60 sigaddset(&child->pending.signal, SIGKILL); 61 signal_wake_up(child, 1); 62 } 63 spin_unlock(&child->sighand->siglock); 64 } 65 66 /* 67 * unptrace a task: move it back to its original parent and 68 * remove it from the ptrace list. 69 * 70 * Must be called with the tasklist lock write-held. 71 */ 72 void __ptrace_unlink(task_t *child) 73 { 74 if (!child->ptrace) 75 BUG(); 76 child->ptrace = 0; 77 if (!list_empty(&child->ptrace_list)) { 78 list_del_init(&child->ptrace_list); 79 REMOVE_LINKS(child); 80 child->parent = child->real_parent; 81 SET_LINKS(child); 82 } 83 84 ptrace_untrace(child); 85 } 86 87 /* 88 * Check that we have indeed attached to the thing.. 89 */ 90 int ptrace_check_attach(struct task_struct *child, int kill) 91 { 92 int ret = -ESRCH; 93 94 /* 95 * We take the read lock around doing both checks to close a 96 * possible race where someone else was tracing our child and 97 * detached between these two checks. After this locked check, 98 * we are sure that this is our traced child and that can only 99 * be changed by us so it's not changing right after this. 100 */ 101 read_lock(&tasklist_lock); 102 if ((child->ptrace & PT_PTRACED) && child->parent == current && 103 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current) 104 && child->signal != NULL) { 105 ret = 0; 106 spin_lock_irq(&child->sighand->siglock); 107 if (child->state == TASK_STOPPED) { 108 child->state = TASK_TRACED; 109 } else if (child->state != TASK_TRACED && !kill) { 110 ret = -ESRCH; 111 } 112 spin_unlock_irq(&child->sighand->siglock); 113 } 114 read_unlock(&tasklist_lock); 115 116 if (!ret && !kill) { 117 wait_task_inactive(child); 118 } 119 120 /* All systems go.. */ 121 return ret; 122 } 123 124 static int may_attach(struct task_struct *task) 125 { 126 if (!task->mm) 127 return -EPERM; 128 if (((current->uid != task->euid) || 129 (current->uid != task->suid) || 130 (current->uid != task->uid) || 131 (current->gid != task->egid) || 132 (current->gid != task->sgid) || 133 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) 134 return -EPERM; 135 smp_rmb(); 136 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE)) 137 return -EPERM; 138 139 return security_ptrace(current, task); 140 } 141 142 int ptrace_may_attach(struct task_struct *task) 143 { 144 int err; 145 task_lock(task); 146 err = may_attach(task); 147 task_unlock(task); 148 return !err; 149 } 150 151 int ptrace_attach(struct task_struct *task) 152 { 153 int retval; 154 task_lock(task); 155 retval = -EPERM; 156 if (task->pid <= 1) 157 goto bad; 158 if (task->tgid == current->tgid) 159 goto bad; 160 /* the same process cannot be attached many times */ 161 if (task->ptrace & PT_PTRACED) 162 goto bad; 163 retval = may_attach(task); 164 if (retval) 165 goto bad; 166 167 /* Go */ 168 task->ptrace |= PT_PTRACED | ((task->real_parent != current) 169 ? PT_ATTACHED : 0); 170 if (capable(CAP_SYS_PTRACE)) 171 task->ptrace |= PT_PTRACE_CAP; 172 task_unlock(task); 173 174 write_lock_irq(&tasklist_lock); 175 __ptrace_link(task, current); 176 write_unlock_irq(&tasklist_lock); 177 178 force_sig_specific(SIGSTOP, task); 179 return 0; 180 181 bad: 182 task_unlock(task); 183 return retval; 184 } 185 186 int ptrace_detach(struct task_struct *child, unsigned int data) 187 { 188 if (!valid_signal(data)) 189 return -EIO; 190 191 /* Architecture-specific hardware disable .. */ 192 ptrace_disable(child); 193 194 /* .. re-parent .. */ 195 child->exit_code = data; 196 197 write_lock_irq(&tasklist_lock); 198 __ptrace_unlink(child); 199 /* .. and wake it up. */ 200 if (child->exit_state != EXIT_ZOMBIE) 201 wake_up_process(child); 202 write_unlock_irq(&tasklist_lock); 203 204 return 0; 205 } 206 207 /* 208 * Access another process' address space. 209 * Source/target buffer must be kernel space, 210 * Do not walk the page table directly, use get_user_pages 211 */ 212 213 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 214 { 215 struct mm_struct *mm; 216 struct vm_area_struct *vma; 217 struct page *page; 218 void *old_buf = buf; 219 220 mm = get_task_mm(tsk); 221 if (!mm) 222 return 0; 223 224 down_read(&mm->mmap_sem); 225 /* ignore errors, just check how much was sucessfully transfered */ 226 while (len) { 227 int bytes, ret, offset; 228 void *maddr; 229 230 ret = get_user_pages(tsk, mm, addr, 1, 231 write, 1, &page, &vma); 232 if (ret <= 0) 233 break; 234 235 bytes = len; 236 offset = addr & (PAGE_SIZE-1); 237 if (bytes > PAGE_SIZE-offset) 238 bytes = PAGE_SIZE-offset; 239 240 maddr = kmap(page); 241 if (write) { 242 copy_to_user_page(vma, page, addr, 243 maddr + offset, buf, bytes); 244 if (!PageCompound(page)) 245 set_page_dirty_lock(page); 246 } else { 247 copy_from_user_page(vma, page, addr, 248 buf, maddr + offset, bytes); 249 } 250 kunmap(page); 251 page_cache_release(page); 252 len -= bytes; 253 buf += bytes; 254 addr += bytes; 255 } 256 up_read(&mm->mmap_sem); 257 mmput(mm); 258 259 return buf - old_buf; 260 } 261 262 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 263 { 264 int copied = 0; 265 266 while (len > 0) { 267 char buf[128]; 268 int this_len, retval; 269 270 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 271 retval = access_process_vm(tsk, src, buf, this_len, 0); 272 if (!retval) { 273 if (copied) 274 break; 275 return -EIO; 276 } 277 if (copy_to_user(dst, buf, retval)) 278 return -EFAULT; 279 copied += retval; 280 src += retval; 281 dst += retval; 282 len -= retval; 283 } 284 return copied; 285 } 286 287 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) 288 { 289 int copied = 0; 290 291 while (len > 0) { 292 char buf[128]; 293 int this_len, retval; 294 295 this_len = (len > sizeof(buf)) ? sizeof(buf) : len; 296 if (copy_from_user(buf, src, this_len)) 297 return -EFAULT; 298 retval = access_process_vm(tsk, dst, buf, this_len, 1); 299 if (!retval) { 300 if (copied) 301 break; 302 return -EIO; 303 } 304 copied += retval; 305 src += retval; 306 dst += retval; 307 len -= retval; 308 } 309 return copied; 310 } 311 312 static int ptrace_setoptions(struct task_struct *child, long data) 313 { 314 child->ptrace &= ~PT_TRACE_MASK; 315 316 if (data & PTRACE_O_TRACESYSGOOD) 317 child->ptrace |= PT_TRACESYSGOOD; 318 319 if (data & PTRACE_O_TRACEFORK) 320 child->ptrace |= PT_TRACE_FORK; 321 322 if (data & PTRACE_O_TRACEVFORK) 323 child->ptrace |= PT_TRACE_VFORK; 324 325 if (data & PTRACE_O_TRACECLONE) 326 child->ptrace |= PT_TRACE_CLONE; 327 328 if (data & PTRACE_O_TRACEEXEC) 329 child->ptrace |= PT_TRACE_EXEC; 330 331 if (data & PTRACE_O_TRACEVFORKDONE) 332 child->ptrace |= PT_TRACE_VFORK_DONE; 333 334 if (data & PTRACE_O_TRACEEXIT) 335 child->ptrace |= PT_TRACE_EXIT; 336 337 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0; 338 } 339 340 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data) 341 { 342 siginfo_t lastinfo; 343 int error = -ESRCH; 344 345 read_lock(&tasklist_lock); 346 if (likely(child->sighand != NULL)) { 347 error = -EINVAL; 348 spin_lock_irq(&child->sighand->siglock); 349 if (likely(child->last_siginfo != NULL)) { 350 lastinfo = *child->last_siginfo; 351 error = 0; 352 } 353 spin_unlock_irq(&child->sighand->siglock); 354 } 355 read_unlock(&tasklist_lock); 356 if (!error) 357 return copy_siginfo_to_user(data, &lastinfo); 358 return error; 359 } 360 361 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data) 362 { 363 siginfo_t newinfo; 364 int error = -ESRCH; 365 366 if (copy_from_user(&newinfo, data, sizeof (siginfo_t))) 367 return -EFAULT; 368 369 read_lock(&tasklist_lock); 370 if (likely(child->sighand != NULL)) { 371 error = -EINVAL; 372 spin_lock_irq(&child->sighand->siglock); 373 if (likely(child->last_siginfo != NULL)) { 374 *child->last_siginfo = newinfo; 375 error = 0; 376 } 377 spin_unlock_irq(&child->sighand->siglock); 378 } 379 read_unlock(&tasklist_lock); 380 return error; 381 } 382 383 int ptrace_request(struct task_struct *child, long request, 384 long addr, long data) 385 { 386 int ret = -EIO; 387 388 switch (request) { 389 #ifdef PTRACE_OLDSETOPTIONS 390 case PTRACE_OLDSETOPTIONS: 391 #endif 392 case PTRACE_SETOPTIONS: 393 ret = ptrace_setoptions(child, data); 394 break; 395 case PTRACE_GETEVENTMSG: 396 ret = put_user(child->ptrace_message, (unsigned long __user *) data); 397 break; 398 case PTRACE_GETSIGINFO: 399 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data); 400 break; 401 case PTRACE_SETSIGINFO: 402 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data); 403 break; 404 default: 405 break; 406 } 407 408 return ret; 409 } 410 411 /** 412 * ptrace_traceme -- helper for PTRACE_TRACEME 413 * 414 * Performs checks and sets PT_PTRACED. 415 * Should be used by all ptrace implementations for PTRACE_TRACEME. 416 */ 417 int ptrace_traceme(void) 418 { 419 int ret; 420 421 /* 422 * Are we already being traced? 423 */ 424 if (current->ptrace & PT_PTRACED) 425 return -EPERM; 426 ret = security_ptrace(current->parent, current); 427 if (ret) 428 return -EPERM; 429 /* 430 * Set the ptrace bit in the process ptrace flags. 431 */ 432 current->ptrace |= PT_PTRACED; 433 return 0; 434 } 435 436 /** 437 * ptrace_get_task_struct -- grab a task struct reference for ptrace 438 * @pid: process id to grab a task_struct reference of 439 * 440 * This function is a helper for ptrace implementations. It checks 441 * permissions and then grabs a task struct for use of the actual 442 * ptrace implementation. 443 * 444 * Returns the task_struct for @pid or an ERR_PTR() on failure. 445 */ 446 struct task_struct *ptrace_get_task_struct(pid_t pid) 447 { 448 struct task_struct *child; 449 450 /* 451 * Tracing init is not allowed. 452 */ 453 if (pid == 1) 454 return ERR_PTR(-EPERM); 455 456 read_lock(&tasklist_lock); 457 child = find_task_by_pid(pid); 458 if (child) 459 get_task_struct(child); 460 read_unlock(&tasklist_lock); 461 if (!child) 462 return ERR_PTR(-ESRCH); 463 return child; 464 } 465 466 #ifndef __ARCH_SYS_PTRACE 467 asmlinkage long sys_ptrace(long request, long pid, long addr, long data) 468 { 469 struct task_struct *child; 470 long ret; 471 472 /* 473 * This lock_kernel fixes a subtle race with suid exec 474 */ 475 lock_kernel(); 476 if (request == PTRACE_TRACEME) { 477 ret = ptrace_traceme(); 478 goto out; 479 } 480 481 child = ptrace_get_task_struct(pid); 482 if (IS_ERR(child)) { 483 ret = PTR_ERR(child); 484 goto out; 485 } 486 487 if (request == PTRACE_ATTACH) { 488 ret = ptrace_attach(child); 489 goto out_put_task_struct; 490 } 491 492 ret = ptrace_check_attach(child, request == PTRACE_KILL); 493 if (ret < 0) 494 goto out_put_task_struct; 495 496 ret = arch_ptrace(child, request, addr, data); 497 if (ret < 0) 498 goto out_put_task_struct; 499 500 out_put_task_struct: 501 put_task_struct(child); 502 out: 503 unlock_kernel(); 504 return ret; 505 } 506 #endif /* __ARCH_SYS_PTRACE */ 507