1 /* 2 * linux/fs/proc/base.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * proc base directory handling functions 7 * 8 * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. 9 * Instead of using magical inumbers to determine the kind of object 10 * we allocate and fill in-core inodes upon lookup. They don't even 11 * go into icache. We cache the reference to task_struct upon lookup too. 12 * Eventually it should become a filesystem in its own. We don't use the 13 * rest of procfs anymore. 14 * 15 * 16 * Changelog: 17 * 17-Jan-2005 18 * Allan Bezerra 19 * Bruna Moreira <bruna.moreira@indt.org.br> 20 * Edjard Mota <edjard.mota@indt.org.br> 21 * Ilias Biris <ilias.biris@indt.org.br> 22 * Mauricio Lin <mauricio.lin@indt.org.br> 23 * 24 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 25 * 26 * A new process specific entry (smaps) included in /proc. It shows the 27 * size of rss for each memory area. The maps entry lacks information 28 * about physical memory size (rss) for each mapped file, i.e., 29 * rss information for executables and library files. 30 * This additional information is useful for any tools that need to know 31 * about physical memory consumption for a process specific library. 32 * 33 * Changelog: 34 * 21-Feb-2005 35 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT 36 * Pud inclusion in the page table walking. 37 * 38 * ChangeLog: 39 * 10-Mar-2005 40 * 10LE Instituto Nokia de Tecnologia - INdT: 41 * A better way to walks through the page table as suggested by Hugh Dickins. 42 * 43 * Simo Piiroinen <simo.piiroinen@nokia.com>: 44 * Smaps information related to shared, private, clean and dirty pages. 45 * 46 * Paul Mundt <paul.mundt@nokia.com>: 47 * Overall revision about smaps. 48 */ 49 50 #include <linux/uaccess.h> 51 52 #include <linux/errno.h> 53 #include <linux/time.h> 54 #include <linux/proc_fs.h> 55 #include <linux/stat.h> 56 #include <linux/task_io_accounting_ops.h> 57 #include <linux/init.h> 58 #include <linux/capability.h> 59 #include <linux/file.h> 60 #include <linux/fdtable.h> 61 #include <linux/string.h> 62 #include <linux/seq_file.h> 63 #include <linux/namei.h> 64 #include <linux/mnt_namespace.h> 65 #include <linux/mm.h> 66 #include <linux/swap.h> 67 #include <linux/rcupdate.h> 68 #include <linux/kallsyms.h> 69 #include <linux/stacktrace.h> 70 #include <linux/resource.h> 71 #include <linux/module.h> 72 #include <linux/mount.h> 73 #include <linux/security.h> 74 #include <linux/ptrace.h> 75 #include <linux/tracehook.h> 76 #include <linux/printk.h> 77 #include <linux/cgroup.h> 78 #include <linux/cpuset.h> 79 #include <linux/audit.h> 80 #include <linux/poll.h> 81 #include <linux/nsproxy.h> 82 #include <linux/oom.h> 83 #include <linux/elf.h> 84 #include <linux/pid_namespace.h> 85 #include <linux/user_namespace.h> 86 #include <linux/fs_struct.h> 87 #include <linux/slab.h> 88 #include <linux/sched/autogroup.h> 89 #include <linux/sched/mm.h> 90 #include <linux/sched/coredump.h> 91 #include <linux/sched/debug.h> 92 #include <linux/flex_array.h> 93 #include <linux/posix-timers.h> 94 #ifdef CONFIG_HARDWALL 95 #include <asm/hardwall.h> 96 #endif 97 #include <trace/events/oom.h> 98 #include "internal.h" 99 #include "fd.h" 100 101 /* NOTE: 102 * Implementing inode permission operations in /proc is almost 103 * certainly an error. Permission checks need to happen during 104 * each system call not at open time. The reason is that most of 105 * what we wish to check for permissions in /proc varies at runtime. 106 * 107 * The classic example of a problem is opening file descriptors 108 * in /proc for a task before it execs a suid executable. 109 */ 110 111 static u8 nlink_tid; 112 static u8 nlink_tgid; 113 114 struct pid_entry { 115 const char *name; 116 unsigned int len; 117 umode_t mode; 118 const struct inode_operations *iop; 119 const struct file_operations *fop; 120 union proc_op op; 121 }; 122 123 #define NOD(NAME, MODE, IOP, FOP, OP) { \ 124 .name = (NAME), \ 125 .len = sizeof(NAME) - 1, \ 126 .mode = MODE, \ 127 .iop = IOP, \ 128 .fop = FOP, \ 129 .op = OP, \ 130 } 131 132 #define DIR(NAME, MODE, iops, fops) \ 133 NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} ) 134 #define LNK(NAME, get_link) \ 135 NOD(NAME, (S_IFLNK|S_IRWXUGO), \ 136 &proc_pid_link_inode_operations, NULL, \ 137 { .proc_get_link = get_link } ) 138 #define REG(NAME, MODE, fops) \ 139 NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {}) 140 #define ONE(NAME, MODE, show) \ 141 NOD(NAME, (S_IFREG|(MODE)), \ 142 NULL, &proc_single_file_operations, \ 143 { .proc_show = show } ) 144 145 /* 146 * Count the number of hardlinks for the pid_entry table, excluding the . 147 * and .. links. 148 */ 149 static unsigned int __init pid_entry_nlink(const struct pid_entry *entries, 150 unsigned int n) 151 { 152 unsigned int i; 153 unsigned int count; 154 155 count = 2; 156 for (i = 0; i < n; ++i) { 157 if (S_ISDIR(entries[i].mode)) 158 ++count; 159 } 160 161 return count; 162 } 163 164 static int get_task_root(struct task_struct *task, struct path *root) 165 { 166 int result = -ENOENT; 167 168 task_lock(task); 169 if (task->fs) { 170 get_fs_root(task->fs, root); 171 result = 0; 172 } 173 task_unlock(task); 174 return result; 175 } 176 177 static int proc_cwd_link(struct dentry *dentry, struct path *path) 178 { 179 struct task_struct *task = get_proc_task(d_inode(dentry)); 180 int result = -ENOENT; 181 182 if (task) { 183 task_lock(task); 184 if (task->fs) { 185 get_fs_pwd(task->fs, path); 186 result = 0; 187 } 188 task_unlock(task); 189 put_task_struct(task); 190 } 191 return result; 192 } 193 194 static int proc_root_link(struct dentry *dentry, struct path *path) 195 { 196 struct task_struct *task = get_proc_task(d_inode(dentry)); 197 int result = -ENOENT; 198 199 if (task) { 200 result = get_task_root(task, path); 201 put_task_struct(task); 202 } 203 return result; 204 } 205 206 static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf, 207 size_t _count, loff_t *pos) 208 { 209 struct task_struct *tsk; 210 struct mm_struct *mm; 211 char *page; 212 unsigned long count = _count; 213 unsigned long arg_start, arg_end, env_start, env_end; 214 unsigned long len1, len2, len; 215 unsigned long p; 216 char c; 217 ssize_t rv; 218 219 BUG_ON(*pos < 0); 220 221 tsk = get_proc_task(file_inode(file)); 222 if (!tsk) 223 return -ESRCH; 224 mm = get_task_mm(tsk); 225 put_task_struct(tsk); 226 if (!mm) 227 return 0; 228 /* Check if process spawned far enough to have cmdline. */ 229 if (!mm->env_end) { 230 rv = 0; 231 goto out_mmput; 232 } 233 234 page = (char *)__get_free_page(GFP_TEMPORARY); 235 if (!page) { 236 rv = -ENOMEM; 237 goto out_mmput; 238 } 239 240 down_read(&mm->mmap_sem); 241 arg_start = mm->arg_start; 242 arg_end = mm->arg_end; 243 env_start = mm->env_start; 244 env_end = mm->env_end; 245 up_read(&mm->mmap_sem); 246 247 BUG_ON(arg_start > arg_end); 248 BUG_ON(env_start > env_end); 249 250 len1 = arg_end - arg_start; 251 len2 = env_end - env_start; 252 253 /* Empty ARGV. */ 254 if (len1 == 0) { 255 rv = 0; 256 goto out_free_page; 257 } 258 /* 259 * Inherently racy -- command line shares address space 260 * with code and data. 261 */ 262 rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0); 263 if (rv <= 0) 264 goto out_free_page; 265 266 rv = 0; 267 268 if (c == '\0') { 269 /* Command line (set of strings) occupies whole ARGV. */ 270 if (len1 <= *pos) 271 goto out_free_page; 272 273 p = arg_start + *pos; 274 len = len1 - *pos; 275 while (count > 0 && len > 0) { 276 unsigned int _count; 277 int nr_read; 278 279 _count = min3(count, len, PAGE_SIZE); 280 nr_read = access_remote_vm(mm, p, page, _count, 0); 281 if (nr_read < 0) 282 rv = nr_read; 283 if (nr_read <= 0) 284 goto out_free_page; 285 286 if (copy_to_user(buf, page, nr_read)) { 287 rv = -EFAULT; 288 goto out_free_page; 289 } 290 291 p += nr_read; 292 len -= nr_read; 293 buf += nr_read; 294 count -= nr_read; 295 rv += nr_read; 296 } 297 } else { 298 /* 299 * Command line (1 string) occupies ARGV and 300 * extends into ENVP. 301 */ 302 struct { 303 unsigned long p; 304 unsigned long len; 305 } cmdline[2] = { 306 { .p = arg_start, .len = len1 }, 307 { .p = env_start, .len = len2 }, 308 }; 309 loff_t pos1 = *pos; 310 unsigned int i; 311 312 i = 0; 313 while (i < 2 && pos1 >= cmdline[i].len) { 314 pos1 -= cmdline[i].len; 315 i++; 316 } 317 while (i < 2) { 318 p = cmdline[i].p + pos1; 319 len = cmdline[i].len - pos1; 320 while (count > 0 && len > 0) { 321 unsigned int _count, l; 322 int nr_read; 323 bool final; 324 325 _count = min3(count, len, PAGE_SIZE); 326 nr_read = access_remote_vm(mm, p, page, _count, 0); 327 if (nr_read < 0) 328 rv = nr_read; 329 if (nr_read <= 0) 330 goto out_free_page; 331 332 /* 333 * Command line can be shorter than whole ARGV 334 * even if last "marker" byte says it is not. 335 */ 336 final = false; 337 l = strnlen(page, nr_read); 338 if (l < nr_read) { 339 nr_read = l; 340 final = true; 341 } 342 343 if (copy_to_user(buf, page, nr_read)) { 344 rv = -EFAULT; 345 goto out_free_page; 346 } 347 348 p += nr_read; 349 len -= nr_read; 350 buf += nr_read; 351 count -= nr_read; 352 rv += nr_read; 353 354 if (final) 355 goto out_free_page; 356 } 357 358 /* Only first chunk can be read partially. */ 359 pos1 = 0; 360 i++; 361 } 362 } 363 364 out_free_page: 365 free_page((unsigned long)page); 366 out_mmput: 367 mmput(mm); 368 if (rv > 0) 369 *pos += rv; 370 return rv; 371 } 372 373 static const struct file_operations proc_pid_cmdline_ops = { 374 .read = proc_pid_cmdline_read, 375 .llseek = generic_file_llseek, 376 }; 377 378 #ifdef CONFIG_KALLSYMS 379 /* 380 * Provides a wchan file via kallsyms in a proper one-value-per-file format. 381 * Returns the resolved symbol. If that fails, simply return the address. 382 */ 383 static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, 384 struct pid *pid, struct task_struct *task) 385 { 386 unsigned long wchan; 387 char symname[KSYM_NAME_LEN]; 388 389 wchan = get_wchan(task); 390 391 if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS) 392 && !lookup_symbol_name(wchan, symname)) 393 seq_printf(m, "%s", symname); 394 else 395 seq_putc(m, '0'); 396 397 return 0; 398 } 399 #endif /* CONFIG_KALLSYMS */ 400 401 static int lock_trace(struct task_struct *task) 402 { 403 int err = mutex_lock_killable(&task->signal->cred_guard_mutex); 404 if (err) 405 return err; 406 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) { 407 mutex_unlock(&task->signal->cred_guard_mutex); 408 return -EPERM; 409 } 410 return 0; 411 } 412 413 static void unlock_trace(struct task_struct *task) 414 { 415 mutex_unlock(&task->signal->cred_guard_mutex); 416 } 417 418 #ifdef CONFIG_STACKTRACE 419 420 #define MAX_STACK_TRACE_DEPTH 64 421 422 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, 423 struct pid *pid, struct task_struct *task) 424 { 425 struct stack_trace trace; 426 unsigned long *entries; 427 int err; 428 int i; 429 430 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL); 431 if (!entries) 432 return -ENOMEM; 433 434 trace.nr_entries = 0; 435 trace.max_entries = MAX_STACK_TRACE_DEPTH; 436 trace.entries = entries; 437 trace.skip = 0; 438 439 err = lock_trace(task); 440 if (!err) { 441 save_stack_trace_tsk(task, &trace); 442 443 for (i = 0; i < trace.nr_entries; i++) { 444 seq_printf(m, "[<%pK>] %pB\n", 445 (void *)entries[i], (void *)entries[i]); 446 } 447 unlock_trace(task); 448 } 449 kfree(entries); 450 451 return err; 452 } 453 #endif 454 455 #ifdef CONFIG_SCHED_INFO 456 /* 457 * Provides /proc/PID/schedstat 458 */ 459 static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, 460 struct pid *pid, struct task_struct *task) 461 { 462 if (unlikely(!sched_info_on())) 463 seq_printf(m, "0 0 0\n"); 464 else 465 seq_printf(m, "%llu %llu %lu\n", 466 (unsigned long long)task->se.sum_exec_runtime, 467 (unsigned long long)task->sched_info.run_delay, 468 task->sched_info.pcount); 469 470 return 0; 471 } 472 #endif 473 474 #ifdef CONFIG_LATENCYTOP 475 static int lstats_show_proc(struct seq_file *m, void *v) 476 { 477 int i; 478 struct inode *inode = m->private; 479 struct task_struct *task = get_proc_task(inode); 480 481 if (!task) 482 return -ESRCH; 483 seq_puts(m, "Latency Top version : v0.1\n"); 484 for (i = 0; i < 32; i++) { 485 struct latency_record *lr = &task->latency_record[i]; 486 if (lr->backtrace[0]) { 487 int q; 488 seq_printf(m, "%i %li %li", 489 lr->count, lr->time, lr->max); 490 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 491 unsigned long bt = lr->backtrace[q]; 492 if (!bt) 493 break; 494 if (bt == ULONG_MAX) 495 break; 496 seq_printf(m, " %ps", (void *)bt); 497 } 498 seq_putc(m, '\n'); 499 } 500 501 } 502 put_task_struct(task); 503 return 0; 504 } 505 506 static int lstats_open(struct inode *inode, struct file *file) 507 { 508 return single_open(file, lstats_show_proc, inode); 509 } 510 511 static ssize_t lstats_write(struct file *file, const char __user *buf, 512 size_t count, loff_t *offs) 513 { 514 struct task_struct *task = get_proc_task(file_inode(file)); 515 516 if (!task) 517 return -ESRCH; 518 clear_all_latency_tracing(task); 519 put_task_struct(task); 520 521 return count; 522 } 523 524 static const struct file_operations proc_lstats_operations = { 525 .open = lstats_open, 526 .read = seq_read, 527 .write = lstats_write, 528 .llseek = seq_lseek, 529 .release = single_release, 530 }; 531 532 #endif 533 534 static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, 535 struct pid *pid, struct task_struct *task) 536 { 537 unsigned long totalpages = totalram_pages + total_swap_pages; 538 unsigned long points = 0; 539 540 points = oom_badness(task, NULL, NULL, totalpages) * 541 1000 / totalpages; 542 seq_printf(m, "%lu\n", points); 543 544 return 0; 545 } 546 547 struct limit_names { 548 const char *name; 549 const char *unit; 550 }; 551 552 static const struct limit_names lnames[RLIM_NLIMITS] = { 553 [RLIMIT_CPU] = {"Max cpu time", "seconds"}, 554 [RLIMIT_FSIZE] = {"Max file size", "bytes"}, 555 [RLIMIT_DATA] = {"Max data size", "bytes"}, 556 [RLIMIT_STACK] = {"Max stack size", "bytes"}, 557 [RLIMIT_CORE] = {"Max core file size", "bytes"}, 558 [RLIMIT_RSS] = {"Max resident set", "bytes"}, 559 [RLIMIT_NPROC] = {"Max processes", "processes"}, 560 [RLIMIT_NOFILE] = {"Max open files", "files"}, 561 [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"}, 562 [RLIMIT_AS] = {"Max address space", "bytes"}, 563 [RLIMIT_LOCKS] = {"Max file locks", "locks"}, 564 [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"}, 565 [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"}, 566 [RLIMIT_NICE] = {"Max nice priority", NULL}, 567 [RLIMIT_RTPRIO] = {"Max realtime priority", NULL}, 568 [RLIMIT_RTTIME] = {"Max realtime timeout", "us"}, 569 }; 570 571 /* Display limits for a process */ 572 static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns, 573 struct pid *pid, struct task_struct *task) 574 { 575 unsigned int i; 576 unsigned long flags; 577 578 struct rlimit rlim[RLIM_NLIMITS]; 579 580 if (!lock_task_sighand(task, &flags)) 581 return 0; 582 memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); 583 unlock_task_sighand(task, &flags); 584 585 /* 586 * print the file header 587 */ 588 seq_printf(m, "%-25s %-20s %-20s %-10s\n", 589 "Limit", "Soft Limit", "Hard Limit", "Units"); 590 591 for (i = 0; i < RLIM_NLIMITS; i++) { 592 if (rlim[i].rlim_cur == RLIM_INFINITY) 593 seq_printf(m, "%-25s %-20s ", 594 lnames[i].name, "unlimited"); 595 else 596 seq_printf(m, "%-25s %-20lu ", 597 lnames[i].name, rlim[i].rlim_cur); 598 599 if (rlim[i].rlim_max == RLIM_INFINITY) 600 seq_printf(m, "%-20s ", "unlimited"); 601 else 602 seq_printf(m, "%-20lu ", rlim[i].rlim_max); 603 604 if (lnames[i].unit) 605 seq_printf(m, "%-10s\n", lnames[i].unit); 606 else 607 seq_putc(m, '\n'); 608 } 609 610 return 0; 611 } 612 613 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 614 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, 615 struct pid *pid, struct task_struct *task) 616 { 617 long nr; 618 unsigned long args[6], sp, pc; 619 int res; 620 621 res = lock_trace(task); 622 if (res) 623 return res; 624 625 if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) 626 seq_puts(m, "running\n"); 627 else if (nr < 0) 628 seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc); 629 else 630 seq_printf(m, 631 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", 632 nr, 633 args[0], args[1], args[2], args[3], args[4], args[5], 634 sp, pc); 635 unlock_trace(task); 636 637 return 0; 638 } 639 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 640 641 /************************************************************************/ 642 /* Here the fs part begins */ 643 /************************************************************************/ 644 645 /* permission checks */ 646 static int proc_fd_access_allowed(struct inode *inode) 647 { 648 struct task_struct *task; 649 int allowed = 0; 650 /* Allow access to a task's file descriptors if it is us or we 651 * may use ptrace attach to the process and find out that 652 * information. 653 */ 654 task = get_proc_task(inode); 655 if (task) { 656 allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); 657 put_task_struct(task); 658 } 659 return allowed; 660 } 661 662 int proc_setattr(struct dentry *dentry, struct iattr *attr) 663 { 664 int error; 665 struct inode *inode = d_inode(dentry); 666 667 if (attr->ia_valid & ATTR_MODE) 668 return -EPERM; 669 670 error = setattr_prepare(dentry, attr); 671 if (error) 672 return error; 673 674 setattr_copy(inode, attr); 675 mark_inode_dirty(inode); 676 return 0; 677 } 678 679 /* 680 * May current process learn task's sched/cmdline info (for hide_pid_min=1) 681 * or euid/egid (for hide_pid_min=2)? 682 */ 683 static bool has_pid_permissions(struct pid_namespace *pid, 684 struct task_struct *task, 685 int hide_pid_min) 686 { 687 if (pid->hide_pid < hide_pid_min) 688 return true; 689 if (in_group_p(pid->pid_gid)) 690 return true; 691 return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS); 692 } 693 694 695 static int proc_pid_permission(struct inode *inode, int mask) 696 { 697 struct pid_namespace *pid = inode->i_sb->s_fs_info; 698 struct task_struct *task; 699 bool has_perms; 700 701 task = get_proc_task(inode); 702 if (!task) 703 return -ESRCH; 704 has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS); 705 put_task_struct(task); 706 707 if (!has_perms) { 708 if (pid->hide_pid == HIDEPID_INVISIBLE) { 709 /* 710 * Let's make getdents(), stat(), and open() 711 * consistent with each other. If a process 712 * may not stat() a file, it shouldn't be seen 713 * in procfs at all. 714 */ 715 return -ENOENT; 716 } 717 718 return -EPERM; 719 } 720 return generic_permission(inode, mask); 721 } 722 723 724 725 static const struct inode_operations proc_def_inode_operations = { 726 .setattr = proc_setattr, 727 }; 728 729 static int proc_single_show(struct seq_file *m, void *v) 730 { 731 struct inode *inode = m->private; 732 struct pid_namespace *ns; 733 struct pid *pid; 734 struct task_struct *task; 735 int ret; 736 737 ns = inode->i_sb->s_fs_info; 738 pid = proc_pid(inode); 739 task = get_pid_task(pid, PIDTYPE_PID); 740 if (!task) 741 return -ESRCH; 742 743 ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); 744 745 put_task_struct(task); 746 return ret; 747 } 748 749 static int proc_single_open(struct inode *inode, struct file *filp) 750 { 751 return single_open(filp, proc_single_show, inode); 752 } 753 754 static const struct file_operations proc_single_file_operations = { 755 .open = proc_single_open, 756 .read = seq_read, 757 .llseek = seq_lseek, 758 .release = single_release, 759 }; 760 761 762 struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode) 763 { 764 struct task_struct *task = get_proc_task(inode); 765 struct mm_struct *mm = ERR_PTR(-ESRCH); 766 767 if (task) { 768 mm = mm_access(task, mode | PTRACE_MODE_FSCREDS); 769 put_task_struct(task); 770 771 if (!IS_ERR_OR_NULL(mm)) { 772 /* ensure this mm_struct can't be freed */ 773 mmgrab(mm); 774 /* but do not pin its memory */ 775 mmput(mm); 776 } 777 } 778 779 return mm; 780 } 781 782 static int __mem_open(struct inode *inode, struct file *file, unsigned int mode) 783 { 784 struct mm_struct *mm = proc_mem_open(inode, mode); 785 786 if (IS_ERR(mm)) 787 return PTR_ERR(mm); 788 789 file->private_data = mm; 790 return 0; 791 } 792 793 static int mem_open(struct inode *inode, struct file *file) 794 { 795 int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH); 796 797 /* OK to pass negative loff_t, we can catch out-of-range */ 798 file->f_mode |= FMODE_UNSIGNED_OFFSET; 799 800 return ret; 801 } 802 803 static ssize_t mem_rw(struct file *file, char __user *buf, 804 size_t count, loff_t *ppos, int write) 805 { 806 struct mm_struct *mm = file->private_data; 807 unsigned long addr = *ppos; 808 ssize_t copied; 809 char *page; 810 unsigned int flags; 811 812 if (!mm) 813 return 0; 814 815 page = (char *)__get_free_page(GFP_TEMPORARY); 816 if (!page) 817 return -ENOMEM; 818 819 copied = 0; 820 if (!mmget_not_zero(mm)) 821 goto free; 822 823 /* Maybe we should limit FOLL_FORCE to actual ptrace users? */ 824 flags = FOLL_FORCE; 825 if (write) 826 flags |= FOLL_WRITE; 827 828 while (count > 0) { 829 int this_len = min_t(int, count, PAGE_SIZE); 830 831 if (write && copy_from_user(page, buf, this_len)) { 832 copied = -EFAULT; 833 break; 834 } 835 836 this_len = access_remote_vm(mm, addr, page, this_len, flags); 837 if (!this_len) { 838 if (!copied) 839 copied = -EIO; 840 break; 841 } 842 843 if (!write && copy_to_user(buf, page, this_len)) { 844 copied = -EFAULT; 845 break; 846 } 847 848 buf += this_len; 849 addr += this_len; 850 copied += this_len; 851 count -= this_len; 852 } 853 *ppos = addr; 854 855 mmput(mm); 856 free: 857 free_page((unsigned long) page); 858 return copied; 859 } 860 861 static ssize_t mem_read(struct file *file, char __user *buf, 862 size_t count, loff_t *ppos) 863 { 864 return mem_rw(file, buf, count, ppos, 0); 865 } 866 867 static ssize_t mem_write(struct file *file, const char __user *buf, 868 size_t count, loff_t *ppos) 869 { 870 return mem_rw(file, (char __user*)buf, count, ppos, 1); 871 } 872 873 loff_t mem_lseek(struct file *file, loff_t offset, int orig) 874 { 875 switch (orig) { 876 case 0: 877 file->f_pos = offset; 878 break; 879 case 1: 880 file->f_pos += offset; 881 break; 882 default: 883 return -EINVAL; 884 } 885 force_successful_syscall_return(); 886 return file->f_pos; 887 } 888 889 static int mem_release(struct inode *inode, struct file *file) 890 { 891 struct mm_struct *mm = file->private_data; 892 if (mm) 893 mmdrop(mm); 894 return 0; 895 } 896 897 static const struct file_operations proc_mem_operations = { 898 .llseek = mem_lseek, 899 .read = mem_read, 900 .write = mem_write, 901 .open = mem_open, 902 .release = mem_release, 903 }; 904 905 static int environ_open(struct inode *inode, struct file *file) 906 { 907 return __mem_open(inode, file, PTRACE_MODE_READ); 908 } 909 910 static ssize_t environ_read(struct file *file, char __user *buf, 911 size_t count, loff_t *ppos) 912 { 913 char *page; 914 unsigned long src = *ppos; 915 int ret = 0; 916 struct mm_struct *mm = file->private_data; 917 unsigned long env_start, env_end; 918 919 /* Ensure the process spawned far enough to have an environment. */ 920 if (!mm || !mm->env_end) 921 return 0; 922 923 page = (char *)__get_free_page(GFP_TEMPORARY); 924 if (!page) 925 return -ENOMEM; 926 927 ret = 0; 928 if (!mmget_not_zero(mm)) 929 goto free; 930 931 down_read(&mm->mmap_sem); 932 env_start = mm->env_start; 933 env_end = mm->env_end; 934 up_read(&mm->mmap_sem); 935 936 while (count > 0) { 937 size_t this_len, max_len; 938 int retval; 939 940 if (src >= (env_end - env_start)) 941 break; 942 943 this_len = env_end - (env_start + src); 944 945 max_len = min_t(size_t, PAGE_SIZE, count); 946 this_len = min(max_len, this_len); 947 948 retval = access_remote_vm(mm, (env_start + src), page, this_len, 0); 949 950 if (retval <= 0) { 951 ret = retval; 952 break; 953 } 954 955 if (copy_to_user(buf, page, retval)) { 956 ret = -EFAULT; 957 break; 958 } 959 960 ret += retval; 961 src += retval; 962 buf += retval; 963 count -= retval; 964 } 965 *ppos = src; 966 mmput(mm); 967 968 free: 969 free_page((unsigned long) page); 970 return ret; 971 } 972 973 static const struct file_operations proc_environ_operations = { 974 .open = environ_open, 975 .read = environ_read, 976 .llseek = generic_file_llseek, 977 .release = mem_release, 978 }; 979 980 static int auxv_open(struct inode *inode, struct file *file) 981 { 982 return __mem_open(inode, file, PTRACE_MODE_READ_FSCREDS); 983 } 984 985 static ssize_t auxv_read(struct file *file, char __user *buf, 986 size_t count, loff_t *ppos) 987 { 988 struct mm_struct *mm = file->private_data; 989 unsigned int nwords = 0; 990 991 if (!mm) 992 return 0; 993 do { 994 nwords += 2; 995 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ 996 return simple_read_from_buffer(buf, count, ppos, mm->saved_auxv, 997 nwords * sizeof(mm->saved_auxv[0])); 998 } 999 1000 static const struct file_operations proc_auxv_operations = { 1001 .open = auxv_open, 1002 .read = auxv_read, 1003 .llseek = generic_file_llseek, 1004 .release = mem_release, 1005 }; 1006 1007 static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, 1008 loff_t *ppos) 1009 { 1010 struct task_struct *task = get_proc_task(file_inode(file)); 1011 char buffer[PROC_NUMBUF]; 1012 int oom_adj = OOM_ADJUST_MIN; 1013 size_t len; 1014 1015 if (!task) 1016 return -ESRCH; 1017 if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) 1018 oom_adj = OOM_ADJUST_MAX; 1019 else 1020 oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / 1021 OOM_SCORE_ADJ_MAX; 1022 put_task_struct(task); 1023 len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); 1024 return simple_read_from_buffer(buf, count, ppos, buffer, len); 1025 } 1026 1027 static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) 1028 { 1029 static DEFINE_MUTEX(oom_adj_mutex); 1030 struct mm_struct *mm = NULL; 1031 struct task_struct *task; 1032 int err = 0; 1033 1034 task = get_proc_task(file_inode(file)); 1035 if (!task) 1036 return -ESRCH; 1037 1038 mutex_lock(&oom_adj_mutex); 1039 if (legacy) { 1040 if (oom_adj < task->signal->oom_score_adj && 1041 !capable(CAP_SYS_RESOURCE)) { 1042 err = -EACCES; 1043 goto err_unlock; 1044 } 1045 /* 1046 * /proc/pid/oom_adj is provided for legacy purposes, ask users to use 1047 * /proc/pid/oom_score_adj instead. 1048 */ 1049 pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", 1050 current->comm, task_pid_nr(current), task_pid_nr(task), 1051 task_pid_nr(task)); 1052 } else { 1053 if ((short)oom_adj < task->signal->oom_score_adj_min && 1054 !capable(CAP_SYS_RESOURCE)) { 1055 err = -EACCES; 1056 goto err_unlock; 1057 } 1058 } 1059 1060 /* 1061 * Make sure we will check other processes sharing the mm if this is 1062 * not vfrok which wants its own oom_score_adj. 1063 * pin the mm so it doesn't go away and get reused after task_unlock 1064 */ 1065 if (!task->vfork_done) { 1066 struct task_struct *p = find_lock_task_mm(task); 1067 1068 if (p) { 1069 if (atomic_read(&p->mm->mm_users) > 1) { 1070 mm = p->mm; 1071 mmgrab(mm); 1072 } 1073 task_unlock(p); 1074 } 1075 } 1076 1077 task->signal->oom_score_adj = oom_adj; 1078 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) 1079 task->signal->oom_score_adj_min = (short)oom_adj; 1080 trace_oom_score_adj_update(task); 1081 1082 if (mm) { 1083 struct task_struct *p; 1084 1085 rcu_read_lock(); 1086 for_each_process(p) { 1087 if (same_thread_group(task, p)) 1088 continue; 1089 1090 /* do not touch kernel threads or the global init */ 1091 if (p->flags & PF_KTHREAD || is_global_init(p)) 1092 continue; 1093 1094 task_lock(p); 1095 if (!p->vfork_done && process_shares_mm(p, mm)) { 1096 pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", 1097 task_pid_nr(p), p->comm, 1098 p->signal->oom_score_adj, oom_adj, 1099 task_pid_nr(task), task->comm); 1100 p->signal->oom_score_adj = oom_adj; 1101 if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) 1102 p->signal->oom_score_adj_min = (short)oom_adj; 1103 } 1104 task_unlock(p); 1105 } 1106 rcu_read_unlock(); 1107 mmdrop(mm); 1108 } 1109 err_unlock: 1110 mutex_unlock(&oom_adj_mutex); 1111 put_task_struct(task); 1112 return err; 1113 } 1114 1115 /* 1116 * /proc/pid/oom_adj exists solely for backwards compatibility with previous 1117 * kernels. The effective policy is defined by oom_score_adj, which has a 1118 * different scale: oom_adj grew exponentially and oom_score_adj grows linearly. 1119 * Values written to oom_adj are simply mapped linearly to oom_score_adj. 1120 * Processes that become oom disabled via oom_adj will still be oom disabled 1121 * with this implementation. 1122 * 1123 * oom_adj cannot be removed since existing userspace binaries use it. 1124 */ 1125 static ssize_t oom_adj_write(struct file *file, const char __user *buf, 1126 size_t count, loff_t *ppos) 1127 { 1128 char buffer[PROC_NUMBUF]; 1129 int oom_adj; 1130 int err; 1131 1132 memset(buffer, 0, sizeof(buffer)); 1133 if (count > sizeof(buffer) - 1) 1134 count = sizeof(buffer) - 1; 1135 if (copy_from_user(buffer, buf, count)) { 1136 err = -EFAULT; 1137 goto out; 1138 } 1139 1140 err = kstrtoint(strstrip(buffer), 0, &oom_adj); 1141 if (err) 1142 goto out; 1143 if ((oom_adj < OOM_ADJUST_MIN || oom_adj > OOM_ADJUST_MAX) && 1144 oom_adj != OOM_DISABLE) { 1145 err = -EINVAL; 1146 goto out; 1147 } 1148 1149 /* 1150 * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum 1151 * value is always attainable. 1152 */ 1153 if (oom_adj == OOM_ADJUST_MAX) 1154 oom_adj = OOM_SCORE_ADJ_MAX; 1155 else 1156 oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; 1157 1158 err = __set_oom_adj(file, oom_adj, true); 1159 out: 1160 return err < 0 ? err : count; 1161 } 1162 1163 static const struct file_operations proc_oom_adj_operations = { 1164 .read = oom_adj_read, 1165 .write = oom_adj_write, 1166 .llseek = generic_file_llseek, 1167 }; 1168 1169 static ssize_t oom_score_adj_read(struct file *file, char __user *buf, 1170 size_t count, loff_t *ppos) 1171 { 1172 struct task_struct *task = get_proc_task(file_inode(file)); 1173 char buffer[PROC_NUMBUF]; 1174 short oom_score_adj = OOM_SCORE_ADJ_MIN; 1175 size_t len; 1176 1177 if (!task) 1178 return -ESRCH; 1179 oom_score_adj = task->signal->oom_score_adj; 1180 put_task_struct(task); 1181 len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj); 1182 return simple_read_from_buffer(buf, count, ppos, buffer, len); 1183 } 1184 1185 static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, 1186 size_t count, loff_t *ppos) 1187 { 1188 char buffer[PROC_NUMBUF]; 1189 int oom_score_adj; 1190 int err; 1191 1192 memset(buffer, 0, sizeof(buffer)); 1193 if (count > sizeof(buffer) - 1) 1194 count = sizeof(buffer) - 1; 1195 if (copy_from_user(buffer, buf, count)) { 1196 err = -EFAULT; 1197 goto out; 1198 } 1199 1200 err = kstrtoint(strstrip(buffer), 0, &oom_score_adj); 1201 if (err) 1202 goto out; 1203 if (oom_score_adj < OOM_SCORE_ADJ_MIN || 1204 oom_score_adj > OOM_SCORE_ADJ_MAX) { 1205 err = -EINVAL; 1206 goto out; 1207 } 1208 1209 err = __set_oom_adj(file, oom_score_adj, false); 1210 out: 1211 return err < 0 ? err : count; 1212 } 1213 1214 static const struct file_operations proc_oom_score_adj_operations = { 1215 .read = oom_score_adj_read, 1216 .write = oom_score_adj_write, 1217 .llseek = default_llseek, 1218 }; 1219 1220 #ifdef CONFIG_AUDITSYSCALL 1221 #define TMPBUFLEN 11 1222 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, 1223 size_t count, loff_t *ppos) 1224 { 1225 struct inode * inode = file_inode(file); 1226 struct task_struct *task = get_proc_task(inode); 1227 ssize_t length; 1228 char tmpbuf[TMPBUFLEN]; 1229 1230 if (!task) 1231 return -ESRCH; 1232 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1233 from_kuid(file->f_cred->user_ns, 1234 audit_get_loginuid(task))); 1235 put_task_struct(task); 1236 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1237 } 1238 1239 static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, 1240 size_t count, loff_t *ppos) 1241 { 1242 struct inode * inode = file_inode(file); 1243 uid_t loginuid; 1244 kuid_t kloginuid; 1245 int rv; 1246 1247 rcu_read_lock(); 1248 if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { 1249 rcu_read_unlock(); 1250 return -EPERM; 1251 } 1252 rcu_read_unlock(); 1253 1254 if (*ppos != 0) { 1255 /* No partial writes. */ 1256 return -EINVAL; 1257 } 1258 1259 rv = kstrtou32_from_user(buf, count, 10, &loginuid); 1260 if (rv < 0) 1261 return rv; 1262 1263 /* is userspace tring to explicitly UNSET the loginuid? */ 1264 if (loginuid == AUDIT_UID_UNSET) { 1265 kloginuid = INVALID_UID; 1266 } else { 1267 kloginuid = make_kuid(file->f_cred->user_ns, loginuid); 1268 if (!uid_valid(kloginuid)) 1269 return -EINVAL; 1270 } 1271 1272 rv = audit_set_loginuid(kloginuid); 1273 if (rv < 0) 1274 return rv; 1275 return count; 1276 } 1277 1278 static const struct file_operations proc_loginuid_operations = { 1279 .read = proc_loginuid_read, 1280 .write = proc_loginuid_write, 1281 .llseek = generic_file_llseek, 1282 }; 1283 1284 static ssize_t proc_sessionid_read(struct file * file, char __user * buf, 1285 size_t count, loff_t *ppos) 1286 { 1287 struct inode * inode = file_inode(file); 1288 struct task_struct *task = get_proc_task(inode); 1289 ssize_t length; 1290 char tmpbuf[TMPBUFLEN]; 1291 1292 if (!task) 1293 return -ESRCH; 1294 length = scnprintf(tmpbuf, TMPBUFLEN, "%u", 1295 audit_get_sessionid(task)); 1296 put_task_struct(task); 1297 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); 1298 } 1299 1300 static const struct file_operations proc_sessionid_operations = { 1301 .read = proc_sessionid_read, 1302 .llseek = generic_file_llseek, 1303 }; 1304 #endif 1305 1306 #ifdef CONFIG_FAULT_INJECTION 1307 static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, 1308 size_t count, loff_t *ppos) 1309 { 1310 struct task_struct *task = get_proc_task(file_inode(file)); 1311 char buffer[PROC_NUMBUF]; 1312 size_t len; 1313 int make_it_fail; 1314 1315 if (!task) 1316 return -ESRCH; 1317 make_it_fail = task->make_it_fail; 1318 put_task_struct(task); 1319 1320 len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); 1321 1322 return simple_read_from_buffer(buf, count, ppos, buffer, len); 1323 } 1324 1325 static ssize_t proc_fault_inject_write(struct file * file, 1326 const char __user * buf, size_t count, loff_t *ppos) 1327 { 1328 struct task_struct *task; 1329 char buffer[PROC_NUMBUF]; 1330 int make_it_fail; 1331 int rv; 1332 1333 if (!capable(CAP_SYS_RESOURCE)) 1334 return -EPERM; 1335 memset(buffer, 0, sizeof(buffer)); 1336 if (count > sizeof(buffer) - 1) 1337 count = sizeof(buffer) - 1; 1338 if (copy_from_user(buffer, buf, count)) 1339 return -EFAULT; 1340 rv = kstrtoint(strstrip(buffer), 0, &make_it_fail); 1341 if (rv < 0) 1342 return rv; 1343 if (make_it_fail < 0 || make_it_fail > 1) 1344 return -EINVAL; 1345 1346 task = get_proc_task(file_inode(file)); 1347 if (!task) 1348 return -ESRCH; 1349 task->make_it_fail = make_it_fail; 1350 put_task_struct(task); 1351 1352 return count; 1353 } 1354 1355 static const struct file_operations proc_fault_inject_operations = { 1356 .read = proc_fault_inject_read, 1357 .write = proc_fault_inject_write, 1358 .llseek = generic_file_llseek, 1359 }; 1360 #endif 1361 1362 1363 #ifdef CONFIG_SCHED_DEBUG 1364 /* 1365 * Print out various scheduling related per-task fields: 1366 */ 1367 static int sched_show(struct seq_file *m, void *v) 1368 { 1369 struct inode *inode = m->private; 1370 struct task_struct *p; 1371 1372 p = get_proc_task(inode); 1373 if (!p) 1374 return -ESRCH; 1375 proc_sched_show_task(p, m); 1376 1377 put_task_struct(p); 1378 1379 return 0; 1380 } 1381 1382 static ssize_t 1383 sched_write(struct file *file, const char __user *buf, 1384 size_t count, loff_t *offset) 1385 { 1386 struct inode *inode = file_inode(file); 1387 struct task_struct *p; 1388 1389 p = get_proc_task(inode); 1390 if (!p) 1391 return -ESRCH; 1392 proc_sched_set_task(p); 1393 1394 put_task_struct(p); 1395 1396 return count; 1397 } 1398 1399 static int sched_open(struct inode *inode, struct file *filp) 1400 { 1401 return single_open(filp, sched_show, inode); 1402 } 1403 1404 static const struct file_operations proc_pid_sched_operations = { 1405 .open = sched_open, 1406 .read = seq_read, 1407 .write = sched_write, 1408 .llseek = seq_lseek, 1409 .release = single_release, 1410 }; 1411 1412 #endif 1413 1414 #ifdef CONFIG_SCHED_AUTOGROUP 1415 /* 1416 * Print out autogroup related information: 1417 */ 1418 static int sched_autogroup_show(struct seq_file *m, void *v) 1419 { 1420 struct inode *inode = m->private; 1421 struct task_struct *p; 1422 1423 p = get_proc_task(inode); 1424 if (!p) 1425 return -ESRCH; 1426 proc_sched_autogroup_show_task(p, m); 1427 1428 put_task_struct(p); 1429 1430 return 0; 1431 } 1432 1433 static ssize_t 1434 sched_autogroup_write(struct file *file, const char __user *buf, 1435 size_t count, loff_t *offset) 1436 { 1437 struct inode *inode = file_inode(file); 1438 struct task_struct *p; 1439 char buffer[PROC_NUMBUF]; 1440 int nice; 1441 int err; 1442 1443 memset(buffer, 0, sizeof(buffer)); 1444 if (count > sizeof(buffer) - 1) 1445 count = sizeof(buffer) - 1; 1446 if (copy_from_user(buffer, buf, count)) 1447 return -EFAULT; 1448 1449 err = kstrtoint(strstrip(buffer), 0, &nice); 1450 if (err < 0) 1451 return err; 1452 1453 p = get_proc_task(inode); 1454 if (!p) 1455 return -ESRCH; 1456 1457 err = proc_sched_autogroup_set_nice(p, nice); 1458 if (err) 1459 count = err; 1460 1461 put_task_struct(p); 1462 1463 return count; 1464 } 1465 1466 static int sched_autogroup_open(struct inode *inode, struct file *filp) 1467 { 1468 int ret; 1469 1470 ret = single_open(filp, sched_autogroup_show, NULL); 1471 if (!ret) { 1472 struct seq_file *m = filp->private_data; 1473 1474 m->private = inode; 1475 } 1476 return ret; 1477 } 1478 1479 static const struct file_operations proc_pid_sched_autogroup_operations = { 1480 .open = sched_autogroup_open, 1481 .read = seq_read, 1482 .write = sched_autogroup_write, 1483 .llseek = seq_lseek, 1484 .release = single_release, 1485 }; 1486 1487 #endif /* CONFIG_SCHED_AUTOGROUP */ 1488 1489 static ssize_t comm_write(struct file *file, const char __user *buf, 1490 size_t count, loff_t *offset) 1491 { 1492 struct inode *inode = file_inode(file); 1493 struct task_struct *p; 1494 char buffer[TASK_COMM_LEN]; 1495 const size_t maxlen = sizeof(buffer) - 1; 1496 1497 memset(buffer, 0, sizeof(buffer)); 1498 if (copy_from_user(buffer, buf, count > maxlen ? maxlen : count)) 1499 return -EFAULT; 1500 1501 p = get_proc_task(inode); 1502 if (!p) 1503 return -ESRCH; 1504 1505 if (same_thread_group(current, p)) 1506 set_task_comm(p, buffer); 1507 else 1508 count = -EINVAL; 1509 1510 put_task_struct(p); 1511 1512 return count; 1513 } 1514 1515 static int comm_show(struct seq_file *m, void *v) 1516 { 1517 struct inode *inode = m->private; 1518 struct task_struct *p; 1519 1520 p = get_proc_task(inode); 1521 if (!p) 1522 return -ESRCH; 1523 1524 task_lock(p); 1525 seq_printf(m, "%s\n", p->comm); 1526 task_unlock(p); 1527 1528 put_task_struct(p); 1529 1530 return 0; 1531 } 1532 1533 static int comm_open(struct inode *inode, struct file *filp) 1534 { 1535 return single_open(filp, comm_show, inode); 1536 } 1537 1538 static const struct file_operations proc_pid_set_comm_operations = { 1539 .open = comm_open, 1540 .read = seq_read, 1541 .write = comm_write, 1542 .llseek = seq_lseek, 1543 .release = single_release, 1544 }; 1545 1546 static int proc_exe_link(struct dentry *dentry, struct path *exe_path) 1547 { 1548 struct task_struct *task; 1549 struct file *exe_file; 1550 1551 task = get_proc_task(d_inode(dentry)); 1552 if (!task) 1553 return -ENOENT; 1554 exe_file = get_task_exe_file(task); 1555 put_task_struct(task); 1556 if (exe_file) { 1557 *exe_path = exe_file->f_path; 1558 path_get(&exe_file->f_path); 1559 fput(exe_file); 1560 return 0; 1561 } else 1562 return -ENOENT; 1563 } 1564 1565 static const char *proc_pid_get_link(struct dentry *dentry, 1566 struct inode *inode, 1567 struct delayed_call *done) 1568 { 1569 struct path path; 1570 int error = -EACCES; 1571 1572 if (!dentry) 1573 return ERR_PTR(-ECHILD); 1574 1575 /* Are we allowed to snoop on the tasks file descriptors? */ 1576 if (!proc_fd_access_allowed(inode)) 1577 goto out; 1578 1579 error = PROC_I(inode)->op.proc_get_link(dentry, &path); 1580 if (error) 1581 goto out; 1582 1583 nd_jump_link(&path); 1584 return NULL; 1585 out: 1586 return ERR_PTR(error); 1587 } 1588 1589 static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) 1590 { 1591 char *tmp = (char*)__get_free_page(GFP_TEMPORARY); 1592 char *pathname; 1593 int len; 1594 1595 if (!tmp) 1596 return -ENOMEM; 1597 1598 pathname = d_path(path, tmp, PAGE_SIZE); 1599 len = PTR_ERR(pathname); 1600 if (IS_ERR(pathname)) 1601 goto out; 1602 len = tmp + PAGE_SIZE - 1 - pathname; 1603 1604 if (len > buflen) 1605 len = buflen; 1606 if (copy_to_user(buffer, pathname, len)) 1607 len = -EFAULT; 1608 out: 1609 free_page((unsigned long)tmp); 1610 return len; 1611 } 1612 1613 static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) 1614 { 1615 int error = -EACCES; 1616 struct inode *inode = d_inode(dentry); 1617 struct path path; 1618 1619 /* Are we allowed to snoop on the tasks file descriptors? */ 1620 if (!proc_fd_access_allowed(inode)) 1621 goto out; 1622 1623 error = PROC_I(inode)->op.proc_get_link(dentry, &path); 1624 if (error) 1625 goto out; 1626 1627 error = do_proc_readlink(&path, buffer, buflen); 1628 path_put(&path); 1629 out: 1630 return error; 1631 } 1632 1633 const struct inode_operations proc_pid_link_inode_operations = { 1634 .readlink = proc_pid_readlink, 1635 .get_link = proc_pid_get_link, 1636 .setattr = proc_setattr, 1637 }; 1638 1639 1640 /* building an inode */ 1641 1642 void task_dump_owner(struct task_struct *task, mode_t mode, 1643 kuid_t *ruid, kgid_t *rgid) 1644 { 1645 /* Depending on the state of dumpable compute who should own a 1646 * proc file for a task. 1647 */ 1648 const struct cred *cred; 1649 kuid_t uid; 1650 kgid_t gid; 1651 1652 /* Default to the tasks effective ownership */ 1653 rcu_read_lock(); 1654 cred = __task_cred(task); 1655 uid = cred->euid; 1656 gid = cred->egid; 1657 rcu_read_unlock(); 1658 1659 /* 1660 * Before the /proc/pid/status file was created the only way to read 1661 * the effective uid of a /process was to stat /proc/pid. Reading 1662 * /proc/pid/status is slow enough that procps and other packages 1663 * kept stating /proc/pid. To keep the rules in /proc simple I have 1664 * made this apply to all per process world readable and executable 1665 * directories. 1666 */ 1667 if (mode != (S_IFDIR|S_IRUGO|S_IXUGO)) { 1668 struct mm_struct *mm; 1669 task_lock(task); 1670 mm = task->mm; 1671 /* Make non-dumpable tasks owned by some root */ 1672 if (mm) { 1673 if (get_dumpable(mm) != SUID_DUMP_USER) { 1674 struct user_namespace *user_ns = mm->user_ns; 1675 1676 uid = make_kuid(user_ns, 0); 1677 if (!uid_valid(uid)) 1678 uid = GLOBAL_ROOT_UID; 1679 1680 gid = make_kgid(user_ns, 0); 1681 if (!gid_valid(gid)) 1682 gid = GLOBAL_ROOT_GID; 1683 } 1684 } else { 1685 uid = GLOBAL_ROOT_UID; 1686 gid = GLOBAL_ROOT_GID; 1687 } 1688 task_unlock(task); 1689 } 1690 *ruid = uid; 1691 *rgid = gid; 1692 } 1693 1694 struct inode *proc_pid_make_inode(struct super_block * sb, 1695 struct task_struct *task, umode_t mode) 1696 { 1697 struct inode * inode; 1698 struct proc_inode *ei; 1699 1700 /* We need a new inode */ 1701 1702 inode = new_inode(sb); 1703 if (!inode) 1704 goto out; 1705 1706 /* Common stuff */ 1707 ei = PROC_I(inode); 1708 inode->i_mode = mode; 1709 inode->i_ino = get_next_ino(); 1710 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 1711 inode->i_op = &proc_def_inode_operations; 1712 1713 /* 1714 * grab the reference to task. 1715 */ 1716 ei->pid = get_task_pid(task, PIDTYPE_PID); 1717 if (!ei->pid) 1718 goto out_unlock; 1719 1720 task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); 1721 security_task_to_inode(task, inode); 1722 1723 out: 1724 return inode; 1725 1726 out_unlock: 1727 iput(inode); 1728 return NULL; 1729 } 1730 1731 int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 1732 { 1733 struct inode *inode = d_inode(dentry); 1734 struct task_struct *task; 1735 struct pid_namespace *pid = dentry->d_sb->s_fs_info; 1736 1737 generic_fillattr(inode, stat); 1738 1739 rcu_read_lock(); 1740 stat->uid = GLOBAL_ROOT_UID; 1741 stat->gid = GLOBAL_ROOT_GID; 1742 task = pid_task(proc_pid(inode), PIDTYPE_PID); 1743 if (task) { 1744 if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) { 1745 rcu_read_unlock(); 1746 /* 1747 * This doesn't prevent learning whether PID exists, 1748 * it only makes getattr() consistent with readdir(). 1749 */ 1750 return -ENOENT; 1751 } 1752 task_dump_owner(task, inode->i_mode, &stat->uid, &stat->gid); 1753 } 1754 rcu_read_unlock(); 1755 return 0; 1756 } 1757 1758 /* dentry stuff */ 1759 1760 /* 1761 * Exceptional case: normally we are not allowed to unhash a busy 1762 * directory. In this case, however, we can do it - no aliasing problems 1763 * due to the way we treat inodes. 1764 * 1765 * Rewrite the inode's ownerships here because the owning task may have 1766 * performed a setuid(), etc. 1767 * 1768 */ 1769 int pid_revalidate(struct dentry *dentry, unsigned int flags) 1770 { 1771 struct inode *inode; 1772 struct task_struct *task; 1773 1774 if (flags & LOOKUP_RCU) 1775 return -ECHILD; 1776 1777 inode = d_inode(dentry); 1778 task = get_proc_task(inode); 1779 1780 if (task) { 1781 task_dump_owner(task, inode->i_mode, &inode->i_uid, &inode->i_gid); 1782 1783 inode->i_mode &= ~(S_ISUID | S_ISGID); 1784 security_task_to_inode(task, inode); 1785 put_task_struct(task); 1786 return 1; 1787 } 1788 return 0; 1789 } 1790 1791 static inline bool proc_inode_is_dead(struct inode *inode) 1792 { 1793 return !proc_pid(inode)->tasks[PIDTYPE_PID].first; 1794 } 1795 1796 int pid_delete_dentry(const struct dentry *dentry) 1797 { 1798 /* Is the task we represent dead? 1799 * If so, then don't put the dentry on the lru list, 1800 * kill it immediately. 1801 */ 1802 return proc_inode_is_dead(d_inode(dentry)); 1803 } 1804 1805 const struct dentry_operations pid_dentry_operations = 1806 { 1807 .d_revalidate = pid_revalidate, 1808 .d_delete = pid_delete_dentry, 1809 }; 1810 1811 /* Lookups */ 1812 1813 /* 1814 * Fill a directory entry. 1815 * 1816 * If possible create the dcache entry and derive our inode number and 1817 * file type from dcache entry. 1818 * 1819 * Since all of the proc inode numbers are dynamically generated, the inode 1820 * numbers do not exist until the inode is cache. This means creating the 1821 * the dcache entry in readdir is necessary to keep the inode numbers 1822 * reported by readdir in sync with the inode numbers reported 1823 * by stat. 1824 */ 1825 bool proc_fill_cache(struct file *file, struct dir_context *ctx, 1826 const char *name, int len, 1827 instantiate_t instantiate, struct task_struct *task, const void *ptr) 1828 { 1829 struct dentry *child, *dir = file->f_path.dentry; 1830 struct qstr qname = QSTR_INIT(name, len); 1831 struct inode *inode; 1832 unsigned type; 1833 ino_t ino; 1834 1835 child = d_hash_and_lookup(dir, &qname); 1836 if (!child) { 1837 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 1838 child = d_alloc_parallel(dir, &qname, &wq); 1839 if (IS_ERR(child)) 1840 goto end_instantiate; 1841 if (d_in_lookup(child)) { 1842 int err = instantiate(d_inode(dir), child, task, ptr); 1843 d_lookup_done(child); 1844 if (err < 0) { 1845 dput(child); 1846 goto end_instantiate; 1847 } 1848 } 1849 } 1850 inode = d_inode(child); 1851 ino = inode->i_ino; 1852 type = inode->i_mode >> 12; 1853 dput(child); 1854 return dir_emit(ctx, name, len, ino, type); 1855 1856 end_instantiate: 1857 return dir_emit(ctx, name, len, 1, DT_UNKNOWN); 1858 } 1859 1860 /* 1861 * dname_to_vma_addr - maps a dentry name into two unsigned longs 1862 * which represent vma start and end addresses. 1863 */ 1864 static int dname_to_vma_addr(struct dentry *dentry, 1865 unsigned long *start, unsigned long *end) 1866 { 1867 if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2) 1868 return -EINVAL; 1869 1870 return 0; 1871 } 1872 1873 static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) 1874 { 1875 unsigned long vm_start, vm_end; 1876 bool exact_vma_exists = false; 1877 struct mm_struct *mm = NULL; 1878 struct task_struct *task; 1879 struct inode *inode; 1880 int status = 0; 1881 1882 if (flags & LOOKUP_RCU) 1883 return -ECHILD; 1884 1885 inode = d_inode(dentry); 1886 task = get_proc_task(inode); 1887 if (!task) 1888 goto out_notask; 1889 1890 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1891 if (IS_ERR_OR_NULL(mm)) 1892 goto out; 1893 1894 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { 1895 down_read(&mm->mmap_sem); 1896 exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end); 1897 up_read(&mm->mmap_sem); 1898 } 1899 1900 mmput(mm); 1901 1902 if (exact_vma_exists) { 1903 task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); 1904 1905 security_task_to_inode(task, inode); 1906 status = 1; 1907 } 1908 1909 out: 1910 put_task_struct(task); 1911 1912 out_notask: 1913 return status; 1914 } 1915 1916 static const struct dentry_operations tid_map_files_dentry_operations = { 1917 .d_revalidate = map_files_d_revalidate, 1918 .d_delete = pid_delete_dentry, 1919 }; 1920 1921 static int map_files_get_link(struct dentry *dentry, struct path *path) 1922 { 1923 unsigned long vm_start, vm_end; 1924 struct vm_area_struct *vma; 1925 struct task_struct *task; 1926 struct mm_struct *mm; 1927 int rc; 1928 1929 rc = -ENOENT; 1930 task = get_proc_task(d_inode(dentry)); 1931 if (!task) 1932 goto out; 1933 1934 mm = get_task_mm(task); 1935 put_task_struct(task); 1936 if (!mm) 1937 goto out; 1938 1939 rc = dname_to_vma_addr(dentry, &vm_start, &vm_end); 1940 if (rc) 1941 goto out_mmput; 1942 1943 rc = -ENOENT; 1944 down_read(&mm->mmap_sem); 1945 vma = find_exact_vma(mm, vm_start, vm_end); 1946 if (vma && vma->vm_file) { 1947 *path = vma->vm_file->f_path; 1948 path_get(path); 1949 rc = 0; 1950 } 1951 up_read(&mm->mmap_sem); 1952 1953 out_mmput: 1954 mmput(mm); 1955 out: 1956 return rc; 1957 } 1958 1959 struct map_files_info { 1960 fmode_t mode; 1961 unsigned int len; 1962 unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */ 1963 }; 1964 1965 /* 1966 * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the 1967 * symlinks may be used to bypass permissions on ancestor directories in the 1968 * path to the file in question. 1969 */ 1970 static const char * 1971 proc_map_files_get_link(struct dentry *dentry, 1972 struct inode *inode, 1973 struct delayed_call *done) 1974 { 1975 if (!capable(CAP_SYS_ADMIN)) 1976 return ERR_PTR(-EPERM); 1977 1978 return proc_pid_get_link(dentry, inode, done); 1979 } 1980 1981 /* 1982 * Identical to proc_pid_link_inode_operations except for get_link() 1983 */ 1984 static const struct inode_operations proc_map_files_link_inode_operations = { 1985 .readlink = proc_pid_readlink, 1986 .get_link = proc_map_files_get_link, 1987 .setattr = proc_setattr, 1988 }; 1989 1990 static int 1991 proc_map_files_instantiate(struct inode *dir, struct dentry *dentry, 1992 struct task_struct *task, const void *ptr) 1993 { 1994 fmode_t mode = (fmode_t)(unsigned long)ptr; 1995 struct proc_inode *ei; 1996 struct inode *inode; 1997 1998 inode = proc_pid_make_inode(dir->i_sb, task, S_IFLNK | 1999 ((mode & FMODE_READ ) ? S_IRUSR : 0) | 2000 ((mode & FMODE_WRITE) ? S_IWUSR : 0)); 2001 if (!inode) 2002 return -ENOENT; 2003 2004 ei = PROC_I(inode); 2005 ei->op.proc_get_link = map_files_get_link; 2006 2007 inode->i_op = &proc_map_files_link_inode_operations; 2008 inode->i_size = 64; 2009 2010 d_set_d_op(dentry, &tid_map_files_dentry_operations); 2011 d_add(dentry, inode); 2012 2013 return 0; 2014 } 2015 2016 static struct dentry *proc_map_files_lookup(struct inode *dir, 2017 struct dentry *dentry, unsigned int flags) 2018 { 2019 unsigned long vm_start, vm_end; 2020 struct vm_area_struct *vma; 2021 struct task_struct *task; 2022 int result; 2023 struct mm_struct *mm; 2024 2025 result = -ENOENT; 2026 task = get_proc_task(dir); 2027 if (!task) 2028 goto out; 2029 2030 result = -EACCES; 2031 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) 2032 goto out_put_task; 2033 2034 result = -ENOENT; 2035 if (dname_to_vma_addr(dentry, &vm_start, &vm_end)) 2036 goto out_put_task; 2037 2038 mm = get_task_mm(task); 2039 if (!mm) 2040 goto out_put_task; 2041 2042 down_read(&mm->mmap_sem); 2043 vma = find_exact_vma(mm, vm_start, vm_end); 2044 if (!vma) 2045 goto out_no_vma; 2046 2047 if (vma->vm_file) 2048 result = proc_map_files_instantiate(dir, dentry, task, 2049 (void *)(unsigned long)vma->vm_file->f_mode); 2050 2051 out_no_vma: 2052 up_read(&mm->mmap_sem); 2053 mmput(mm); 2054 out_put_task: 2055 put_task_struct(task); 2056 out: 2057 return ERR_PTR(result); 2058 } 2059 2060 static const struct inode_operations proc_map_files_inode_operations = { 2061 .lookup = proc_map_files_lookup, 2062 .permission = proc_fd_permission, 2063 .setattr = proc_setattr, 2064 }; 2065 2066 static int 2067 proc_map_files_readdir(struct file *file, struct dir_context *ctx) 2068 { 2069 struct vm_area_struct *vma; 2070 struct task_struct *task; 2071 struct mm_struct *mm; 2072 unsigned long nr_files, pos, i; 2073 struct flex_array *fa = NULL; 2074 struct map_files_info info; 2075 struct map_files_info *p; 2076 int ret; 2077 2078 ret = -ENOENT; 2079 task = get_proc_task(file_inode(file)); 2080 if (!task) 2081 goto out; 2082 2083 ret = -EACCES; 2084 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) 2085 goto out_put_task; 2086 2087 ret = 0; 2088 if (!dir_emit_dots(file, ctx)) 2089 goto out_put_task; 2090 2091 mm = get_task_mm(task); 2092 if (!mm) 2093 goto out_put_task; 2094 down_read(&mm->mmap_sem); 2095 2096 nr_files = 0; 2097 2098 /* 2099 * We need two passes here: 2100 * 2101 * 1) Collect vmas of mapped files with mmap_sem taken 2102 * 2) Release mmap_sem and instantiate entries 2103 * 2104 * otherwise we get lockdep complained, since filldir() 2105 * routine might require mmap_sem taken in might_fault(). 2106 */ 2107 2108 for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) { 2109 if (vma->vm_file && ++pos > ctx->pos) 2110 nr_files++; 2111 } 2112 2113 if (nr_files) { 2114 fa = flex_array_alloc(sizeof(info), nr_files, 2115 GFP_KERNEL); 2116 if (!fa || flex_array_prealloc(fa, 0, nr_files, 2117 GFP_KERNEL)) { 2118 ret = -ENOMEM; 2119 if (fa) 2120 flex_array_free(fa); 2121 up_read(&mm->mmap_sem); 2122 mmput(mm); 2123 goto out_put_task; 2124 } 2125 for (i = 0, vma = mm->mmap, pos = 2; vma; 2126 vma = vma->vm_next) { 2127 if (!vma->vm_file) 2128 continue; 2129 if (++pos <= ctx->pos) 2130 continue; 2131 2132 info.mode = vma->vm_file->f_mode; 2133 info.len = snprintf(info.name, 2134 sizeof(info.name), "%lx-%lx", 2135 vma->vm_start, vma->vm_end); 2136 if (flex_array_put(fa, i++, &info, GFP_KERNEL)) 2137 BUG(); 2138 } 2139 } 2140 up_read(&mm->mmap_sem); 2141 2142 for (i = 0; i < nr_files; i++) { 2143 p = flex_array_get(fa, i); 2144 if (!proc_fill_cache(file, ctx, 2145 p->name, p->len, 2146 proc_map_files_instantiate, 2147 task, 2148 (void *)(unsigned long)p->mode)) 2149 break; 2150 ctx->pos++; 2151 } 2152 if (fa) 2153 flex_array_free(fa); 2154 mmput(mm); 2155 2156 out_put_task: 2157 put_task_struct(task); 2158 out: 2159 return ret; 2160 } 2161 2162 static const struct file_operations proc_map_files_operations = { 2163 .read = generic_read_dir, 2164 .iterate_shared = proc_map_files_readdir, 2165 .llseek = generic_file_llseek, 2166 }; 2167 2168 #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS) 2169 struct timers_private { 2170 struct pid *pid; 2171 struct task_struct *task; 2172 struct sighand_struct *sighand; 2173 struct pid_namespace *ns; 2174 unsigned long flags; 2175 }; 2176 2177 static void *timers_start(struct seq_file *m, loff_t *pos) 2178 { 2179 struct timers_private *tp = m->private; 2180 2181 tp->task = get_pid_task(tp->pid, PIDTYPE_PID); 2182 if (!tp->task) 2183 return ERR_PTR(-ESRCH); 2184 2185 tp->sighand = lock_task_sighand(tp->task, &tp->flags); 2186 if (!tp->sighand) 2187 return ERR_PTR(-ESRCH); 2188 2189 return seq_list_start(&tp->task->signal->posix_timers, *pos); 2190 } 2191 2192 static void *timers_next(struct seq_file *m, void *v, loff_t *pos) 2193 { 2194 struct timers_private *tp = m->private; 2195 return seq_list_next(v, &tp->task->signal->posix_timers, pos); 2196 } 2197 2198 static void timers_stop(struct seq_file *m, void *v) 2199 { 2200 struct timers_private *tp = m->private; 2201 2202 if (tp->sighand) { 2203 unlock_task_sighand(tp->task, &tp->flags); 2204 tp->sighand = NULL; 2205 } 2206 2207 if (tp->task) { 2208 put_task_struct(tp->task); 2209 tp->task = NULL; 2210 } 2211 } 2212 2213 static int show_timer(struct seq_file *m, void *v) 2214 { 2215 struct k_itimer *timer; 2216 struct timers_private *tp = m->private; 2217 int notify; 2218 static const char * const nstr[] = { 2219 [SIGEV_SIGNAL] = "signal", 2220 [SIGEV_NONE] = "none", 2221 [SIGEV_THREAD] = "thread", 2222 }; 2223 2224 timer = list_entry((struct list_head *)v, struct k_itimer, list); 2225 notify = timer->it_sigev_notify; 2226 2227 seq_printf(m, "ID: %d\n", timer->it_id); 2228 seq_printf(m, "signal: %d/%p\n", 2229 timer->sigq->info.si_signo, 2230 timer->sigq->info.si_value.sival_ptr); 2231 seq_printf(m, "notify: %s/%s.%d\n", 2232 nstr[notify & ~SIGEV_THREAD_ID], 2233 (notify & SIGEV_THREAD_ID) ? "tid" : "pid", 2234 pid_nr_ns(timer->it_pid, tp->ns)); 2235 seq_printf(m, "ClockID: %d\n", timer->it_clock); 2236 2237 return 0; 2238 } 2239 2240 static const struct seq_operations proc_timers_seq_ops = { 2241 .start = timers_start, 2242 .next = timers_next, 2243 .stop = timers_stop, 2244 .show = show_timer, 2245 }; 2246 2247 static int proc_timers_open(struct inode *inode, struct file *file) 2248 { 2249 struct timers_private *tp; 2250 2251 tp = __seq_open_private(file, &proc_timers_seq_ops, 2252 sizeof(struct timers_private)); 2253 if (!tp) 2254 return -ENOMEM; 2255 2256 tp->pid = proc_pid(inode); 2257 tp->ns = inode->i_sb->s_fs_info; 2258 return 0; 2259 } 2260 2261 static const struct file_operations proc_timers_operations = { 2262 .open = proc_timers_open, 2263 .read = seq_read, 2264 .llseek = seq_lseek, 2265 .release = seq_release_private, 2266 }; 2267 #endif 2268 2269 static ssize_t timerslack_ns_write(struct file *file, const char __user *buf, 2270 size_t count, loff_t *offset) 2271 { 2272 struct inode *inode = file_inode(file); 2273 struct task_struct *p; 2274 u64 slack_ns; 2275 int err; 2276 2277 err = kstrtoull_from_user(buf, count, 10, &slack_ns); 2278 if (err < 0) 2279 return err; 2280 2281 p = get_proc_task(inode); 2282 if (!p) 2283 return -ESRCH; 2284 2285 if (p != current) { 2286 if (!capable(CAP_SYS_NICE)) { 2287 count = -EPERM; 2288 goto out; 2289 } 2290 2291 err = security_task_setscheduler(p); 2292 if (err) { 2293 count = err; 2294 goto out; 2295 } 2296 } 2297 2298 task_lock(p); 2299 if (slack_ns == 0) 2300 p->timer_slack_ns = p->default_timer_slack_ns; 2301 else 2302 p->timer_slack_ns = slack_ns; 2303 task_unlock(p); 2304 2305 out: 2306 put_task_struct(p); 2307 2308 return count; 2309 } 2310 2311 static int timerslack_ns_show(struct seq_file *m, void *v) 2312 { 2313 struct inode *inode = m->private; 2314 struct task_struct *p; 2315 int err = 0; 2316 2317 p = get_proc_task(inode); 2318 if (!p) 2319 return -ESRCH; 2320 2321 if (p != current) { 2322 2323 if (!capable(CAP_SYS_NICE)) { 2324 err = -EPERM; 2325 goto out; 2326 } 2327 err = security_task_getscheduler(p); 2328 if (err) 2329 goto out; 2330 } 2331 2332 task_lock(p); 2333 seq_printf(m, "%llu\n", p->timer_slack_ns); 2334 task_unlock(p); 2335 2336 out: 2337 put_task_struct(p); 2338 2339 return err; 2340 } 2341 2342 static int timerslack_ns_open(struct inode *inode, struct file *filp) 2343 { 2344 return single_open(filp, timerslack_ns_show, inode); 2345 } 2346 2347 static const struct file_operations proc_pid_set_timerslack_ns_operations = { 2348 .open = timerslack_ns_open, 2349 .read = seq_read, 2350 .write = timerslack_ns_write, 2351 .llseek = seq_lseek, 2352 .release = single_release, 2353 }; 2354 2355 static int proc_pident_instantiate(struct inode *dir, 2356 struct dentry *dentry, struct task_struct *task, const void *ptr) 2357 { 2358 const struct pid_entry *p = ptr; 2359 struct inode *inode; 2360 struct proc_inode *ei; 2361 2362 inode = proc_pid_make_inode(dir->i_sb, task, p->mode); 2363 if (!inode) 2364 goto out; 2365 2366 ei = PROC_I(inode); 2367 if (S_ISDIR(inode->i_mode)) 2368 set_nlink(inode, 2); /* Use getattr to fix if necessary */ 2369 if (p->iop) 2370 inode->i_op = p->iop; 2371 if (p->fop) 2372 inode->i_fop = p->fop; 2373 ei->op = p->op; 2374 d_set_d_op(dentry, &pid_dentry_operations); 2375 d_add(dentry, inode); 2376 /* Close the race of the process dying before we return the dentry */ 2377 if (pid_revalidate(dentry, 0)) 2378 return 0; 2379 out: 2380 return -ENOENT; 2381 } 2382 2383 static struct dentry *proc_pident_lookup(struct inode *dir, 2384 struct dentry *dentry, 2385 const struct pid_entry *ents, 2386 unsigned int nents) 2387 { 2388 int error; 2389 struct task_struct *task = get_proc_task(dir); 2390 const struct pid_entry *p, *last; 2391 2392 error = -ENOENT; 2393 2394 if (!task) 2395 goto out_no_task; 2396 2397 /* 2398 * Yes, it does not scale. And it should not. Don't add 2399 * new entries into /proc/<tgid>/ without very good reasons. 2400 */ 2401 last = &ents[nents]; 2402 for (p = ents; p < last; p++) { 2403 if (p->len != dentry->d_name.len) 2404 continue; 2405 if (!memcmp(dentry->d_name.name, p->name, p->len)) 2406 break; 2407 } 2408 if (p >= last) 2409 goto out; 2410 2411 error = proc_pident_instantiate(dir, dentry, task, p); 2412 out: 2413 put_task_struct(task); 2414 out_no_task: 2415 return ERR_PTR(error); 2416 } 2417 2418 static int proc_pident_readdir(struct file *file, struct dir_context *ctx, 2419 const struct pid_entry *ents, unsigned int nents) 2420 { 2421 struct task_struct *task = get_proc_task(file_inode(file)); 2422 const struct pid_entry *p; 2423 2424 if (!task) 2425 return -ENOENT; 2426 2427 if (!dir_emit_dots(file, ctx)) 2428 goto out; 2429 2430 if (ctx->pos >= nents + 2) 2431 goto out; 2432 2433 for (p = ents + (ctx->pos - 2); p < ents + nents; p++) { 2434 if (!proc_fill_cache(file, ctx, p->name, p->len, 2435 proc_pident_instantiate, task, p)) 2436 break; 2437 ctx->pos++; 2438 } 2439 out: 2440 put_task_struct(task); 2441 return 0; 2442 } 2443 2444 #ifdef CONFIG_SECURITY 2445 static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, 2446 size_t count, loff_t *ppos) 2447 { 2448 struct inode * inode = file_inode(file); 2449 char *p = NULL; 2450 ssize_t length; 2451 struct task_struct *task = get_proc_task(inode); 2452 2453 if (!task) 2454 return -ESRCH; 2455 2456 length = security_getprocattr(task, 2457 (char*)file->f_path.dentry->d_name.name, 2458 &p); 2459 put_task_struct(task); 2460 if (length > 0) 2461 length = simple_read_from_buffer(buf, count, ppos, p, length); 2462 kfree(p); 2463 return length; 2464 } 2465 2466 static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, 2467 size_t count, loff_t *ppos) 2468 { 2469 struct inode * inode = file_inode(file); 2470 void *page; 2471 ssize_t length; 2472 struct task_struct *task = get_proc_task(inode); 2473 2474 length = -ESRCH; 2475 if (!task) 2476 goto out_no_task; 2477 2478 /* A task may only write its own attributes. */ 2479 length = -EACCES; 2480 if (current != task) 2481 goto out; 2482 2483 if (count > PAGE_SIZE) 2484 count = PAGE_SIZE; 2485 2486 /* No partial writes. */ 2487 length = -EINVAL; 2488 if (*ppos != 0) 2489 goto out; 2490 2491 page = memdup_user(buf, count); 2492 if (IS_ERR(page)) { 2493 length = PTR_ERR(page); 2494 goto out; 2495 } 2496 2497 /* Guard against adverse ptrace interaction */ 2498 length = mutex_lock_interruptible(¤t->signal->cred_guard_mutex); 2499 if (length < 0) 2500 goto out_free; 2501 2502 length = security_setprocattr(file->f_path.dentry->d_name.name, 2503 page, count); 2504 mutex_unlock(¤t->signal->cred_guard_mutex); 2505 out_free: 2506 kfree(page); 2507 out: 2508 put_task_struct(task); 2509 out_no_task: 2510 return length; 2511 } 2512 2513 static const struct file_operations proc_pid_attr_operations = { 2514 .read = proc_pid_attr_read, 2515 .write = proc_pid_attr_write, 2516 .llseek = generic_file_llseek, 2517 }; 2518 2519 static const struct pid_entry attr_dir_stuff[] = { 2520 REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2521 REG("prev", S_IRUGO, proc_pid_attr_operations), 2522 REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2523 REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2524 REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2525 REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), 2526 }; 2527 2528 static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx) 2529 { 2530 return proc_pident_readdir(file, ctx, 2531 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 2532 } 2533 2534 static const struct file_operations proc_attr_dir_operations = { 2535 .read = generic_read_dir, 2536 .iterate_shared = proc_attr_dir_readdir, 2537 .llseek = generic_file_llseek, 2538 }; 2539 2540 static struct dentry *proc_attr_dir_lookup(struct inode *dir, 2541 struct dentry *dentry, unsigned int flags) 2542 { 2543 return proc_pident_lookup(dir, dentry, 2544 attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); 2545 } 2546 2547 static const struct inode_operations proc_attr_dir_inode_operations = { 2548 .lookup = proc_attr_dir_lookup, 2549 .getattr = pid_getattr, 2550 .setattr = proc_setattr, 2551 }; 2552 2553 #endif 2554 2555 #ifdef CONFIG_ELF_CORE 2556 static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, 2557 size_t count, loff_t *ppos) 2558 { 2559 struct task_struct *task = get_proc_task(file_inode(file)); 2560 struct mm_struct *mm; 2561 char buffer[PROC_NUMBUF]; 2562 size_t len; 2563 int ret; 2564 2565 if (!task) 2566 return -ESRCH; 2567 2568 ret = 0; 2569 mm = get_task_mm(task); 2570 if (mm) { 2571 len = snprintf(buffer, sizeof(buffer), "%08lx\n", 2572 ((mm->flags & MMF_DUMP_FILTER_MASK) >> 2573 MMF_DUMP_FILTER_SHIFT)); 2574 mmput(mm); 2575 ret = simple_read_from_buffer(buf, count, ppos, buffer, len); 2576 } 2577 2578 put_task_struct(task); 2579 2580 return ret; 2581 } 2582 2583 static ssize_t proc_coredump_filter_write(struct file *file, 2584 const char __user *buf, 2585 size_t count, 2586 loff_t *ppos) 2587 { 2588 struct task_struct *task; 2589 struct mm_struct *mm; 2590 unsigned int val; 2591 int ret; 2592 int i; 2593 unsigned long mask; 2594 2595 ret = kstrtouint_from_user(buf, count, 0, &val); 2596 if (ret < 0) 2597 return ret; 2598 2599 ret = -ESRCH; 2600 task = get_proc_task(file_inode(file)); 2601 if (!task) 2602 goto out_no_task; 2603 2604 mm = get_task_mm(task); 2605 if (!mm) 2606 goto out_no_mm; 2607 ret = 0; 2608 2609 for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { 2610 if (val & mask) 2611 set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2612 else 2613 clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); 2614 } 2615 2616 mmput(mm); 2617 out_no_mm: 2618 put_task_struct(task); 2619 out_no_task: 2620 if (ret < 0) 2621 return ret; 2622 return count; 2623 } 2624 2625 static const struct file_operations proc_coredump_filter_operations = { 2626 .read = proc_coredump_filter_read, 2627 .write = proc_coredump_filter_write, 2628 .llseek = generic_file_llseek, 2629 }; 2630 #endif 2631 2632 #ifdef CONFIG_TASK_IO_ACCOUNTING 2633 static int do_io_accounting(struct task_struct *task, struct seq_file *m, int whole) 2634 { 2635 struct task_io_accounting acct = task->ioac; 2636 unsigned long flags; 2637 int result; 2638 2639 result = mutex_lock_killable(&task->signal->cred_guard_mutex); 2640 if (result) 2641 return result; 2642 2643 if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { 2644 result = -EACCES; 2645 goto out_unlock; 2646 } 2647 2648 if (whole && lock_task_sighand(task, &flags)) { 2649 struct task_struct *t = task; 2650 2651 task_io_accounting_add(&acct, &task->signal->ioac); 2652 while_each_thread(task, t) 2653 task_io_accounting_add(&acct, &t->ioac); 2654 2655 unlock_task_sighand(task, &flags); 2656 } 2657 seq_printf(m, 2658 "rchar: %llu\n" 2659 "wchar: %llu\n" 2660 "syscr: %llu\n" 2661 "syscw: %llu\n" 2662 "read_bytes: %llu\n" 2663 "write_bytes: %llu\n" 2664 "cancelled_write_bytes: %llu\n", 2665 (unsigned long long)acct.rchar, 2666 (unsigned long long)acct.wchar, 2667 (unsigned long long)acct.syscr, 2668 (unsigned long long)acct.syscw, 2669 (unsigned long long)acct.read_bytes, 2670 (unsigned long long)acct.write_bytes, 2671 (unsigned long long)acct.cancelled_write_bytes); 2672 result = 0; 2673 2674 out_unlock: 2675 mutex_unlock(&task->signal->cred_guard_mutex); 2676 return result; 2677 } 2678 2679 static int proc_tid_io_accounting(struct seq_file *m, struct pid_namespace *ns, 2680 struct pid *pid, struct task_struct *task) 2681 { 2682 return do_io_accounting(task, m, 0); 2683 } 2684 2685 static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns, 2686 struct pid *pid, struct task_struct *task) 2687 { 2688 return do_io_accounting(task, m, 1); 2689 } 2690 #endif /* CONFIG_TASK_IO_ACCOUNTING */ 2691 2692 #ifdef CONFIG_USER_NS 2693 static int proc_id_map_open(struct inode *inode, struct file *file, 2694 const struct seq_operations *seq_ops) 2695 { 2696 struct user_namespace *ns = NULL; 2697 struct task_struct *task; 2698 struct seq_file *seq; 2699 int ret = -EINVAL; 2700 2701 task = get_proc_task(inode); 2702 if (task) { 2703 rcu_read_lock(); 2704 ns = get_user_ns(task_cred_xxx(task, user_ns)); 2705 rcu_read_unlock(); 2706 put_task_struct(task); 2707 } 2708 if (!ns) 2709 goto err; 2710 2711 ret = seq_open(file, seq_ops); 2712 if (ret) 2713 goto err_put_ns; 2714 2715 seq = file->private_data; 2716 seq->private = ns; 2717 2718 return 0; 2719 err_put_ns: 2720 put_user_ns(ns); 2721 err: 2722 return ret; 2723 } 2724 2725 static int proc_id_map_release(struct inode *inode, struct file *file) 2726 { 2727 struct seq_file *seq = file->private_data; 2728 struct user_namespace *ns = seq->private; 2729 put_user_ns(ns); 2730 return seq_release(inode, file); 2731 } 2732 2733 static int proc_uid_map_open(struct inode *inode, struct file *file) 2734 { 2735 return proc_id_map_open(inode, file, &proc_uid_seq_operations); 2736 } 2737 2738 static int proc_gid_map_open(struct inode *inode, struct file *file) 2739 { 2740 return proc_id_map_open(inode, file, &proc_gid_seq_operations); 2741 } 2742 2743 static int proc_projid_map_open(struct inode *inode, struct file *file) 2744 { 2745 return proc_id_map_open(inode, file, &proc_projid_seq_operations); 2746 } 2747 2748 static const struct file_operations proc_uid_map_operations = { 2749 .open = proc_uid_map_open, 2750 .write = proc_uid_map_write, 2751 .read = seq_read, 2752 .llseek = seq_lseek, 2753 .release = proc_id_map_release, 2754 }; 2755 2756 static const struct file_operations proc_gid_map_operations = { 2757 .open = proc_gid_map_open, 2758 .write = proc_gid_map_write, 2759 .read = seq_read, 2760 .llseek = seq_lseek, 2761 .release = proc_id_map_release, 2762 }; 2763 2764 static const struct file_operations proc_projid_map_operations = { 2765 .open = proc_projid_map_open, 2766 .write = proc_projid_map_write, 2767 .read = seq_read, 2768 .llseek = seq_lseek, 2769 .release = proc_id_map_release, 2770 }; 2771 2772 static int proc_setgroups_open(struct inode *inode, struct file *file) 2773 { 2774 struct user_namespace *ns = NULL; 2775 struct task_struct *task; 2776 int ret; 2777 2778 ret = -ESRCH; 2779 task = get_proc_task(inode); 2780 if (task) { 2781 rcu_read_lock(); 2782 ns = get_user_ns(task_cred_xxx(task, user_ns)); 2783 rcu_read_unlock(); 2784 put_task_struct(task); 2785 } 2786 if (!ns) 2787 goto err; 2788 2789 if (file->f_mode & FMODE_WRITE) { 2790 ret = -EACCES; 2791 if (!ns_capable(ns, CAP_SYS_ADMIN)) 2792 goto err_put_ns; 2793 } 2794 2795 ret = single_open(file, &proc_setgroups_show, ns); 2796 if (ret) 2797 goto err_put_ns; 2798 2799 return 0; 2800 err_put_ns: 2801 put_user_ns(ns); 2802 err: 2803 return ret; 2804 } 2805 2806 static int proc_setgroups_release(struct inode *inode, struct file *file) 2807 { 2808 struct seq_file *seq = file->private_data; 2809 struct user_namespace *ns = seq->private; 2810 int ret = single_release(inode, file); 2811 put_user_ns(ns); 2812 return ret; 2813 } 2814 2815 static const struct file_operations proc_setgroups_operations = { 2816 .open = proc_setgroups_open, 2817 .write = proc_setgroups_write, 2818 .read = seq_read, 2819 .llseek = seq_lseek, 2820 .release = proc_setgroups_release, 2821 }; 2822 #endif /* CONFIG_USER_NS */ 2823 2824 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, 2825 struct pid *pid, struct task_struct *task) 2826 { 2827 int err = lock_trace(task); 2828 if (!err) { 2829 seq_printf(m, "%08x\n", task->personality); 2830 unlock_trace(task); 2831 } 2832 return err; 2833 } 2834 2835 /* 2836 * Thread groups 2837 */ 2838 static const struct file_operations proc_task_operations; 2839 static const struct inode_operations proc_task_inode_operations; 2840 2841 static const struct pid_entry tgid_base_stuff[] = { 2842 DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), 2843 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 2844 DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations), 2845 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 2846 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), 2847 #ifdef CONFIG_NET 2848 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), 2849 #endif 2850 REG("environ", S_IRUSR, proc_environ_operations), 2851 REG("auxv", S_IRUSR, proc_auxv_operations), 2852 ONE("status", S_IRUGO, proc_pid_status), 2853 ONE("personality", S_IRUSR, proc_pid_personality), 2854 ONE("limits", S_IRUGO, proc_pid_limits), 2855 #ifdef CONFIG_SCHED_DEBUG 2856 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2857 #endif 2858 #ifdef CONFIG_SCHED_AUTOGROUP 2859 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations), 2860 #endif 2861 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 2862 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2863 ONE("syscall", S_IRUSR, proc_pid_syscall), 2864 #endif 2865 REG("cmdline", S_IRUGO, proc_pid_cmdline_ops), 2866 ONE("stat", S_IRUGO, proc_tgid_stat), 2867 ONE("statm", S_IRUGO, proc_pid_statm), 2868 REG("maps", S_IRUGO, proc_pid_maps_operations), 2869 #ifdef CONFIG_NUMA 2870 REG("numa_maps", S_IRUGO, proc_pid_numa_maps_operations), 2871 #endif 2872 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), 2873 LNK("cwd", proc_cwd_link), 2874 LNK("root", proc_root_link), 2875 LNK("exe", proc_exe_link), 2876 REG("mounts", S_IRUGO, proc_mounts_operations), 2877 REG("mountinfo", S_IRUGO, proc_mountinfo_operations), 2878 REG("mountstats", S_IRUSR, proc_mountstats_operations), 2879 #ifdef CONFIG_PROC_PAGE_MONITOR 2880 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 2881 REG("smaps", S_IRUGO, proc_pid_smaps_operations), 2882 REG("pagemap", S_IRUSR, proc_pagemap_operations), 2883 #endif 2884 #ifdef CONFIG_SECURITY 2885 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 2886 #endif 2887 #ifdef CONFIG_KALLSYMS 2888 ONE("wchan", S_IRUGO, proc_pid_wchan), 2889 #endif 2890 #ifdef CONFIG_STACKTRACE 2891 ONE("stack", S_IRUSR, proc_pid_stack), 2892 #endif 2893 #ifdef CONFIG_SCHED_INFO 2894 ONE("schedstat", S_IRUGO, proc_pid_schedstat), 2895 #endif 2896 #ifdef CONFIG_LATENCYTOP 2897 REG("latency", S_IRUGO, proc_lstats_operations), 2898 #endif 2899 #ifdef CONFIG_PROC_PID_CPUSET 2900 ONE("cpuset", S_IRUGO, proc_cpuset_show), 2901 #endif 2902 #ifdef CONFIG_CGROUPS 2903 ONE("cgroup", S_IRUGO, proc_cgroup_show), 2904 #endif 2905 ONE("oom_score", S_IRUGO, proc_oom_score), 2906 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), 2907 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 2908 #ifdef CONFIG_AUDITSYSCALL 2909 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 2910 REG("sessionid", S_IRUGO, proc_sessionid_operations), 2911 #endif 2912 #ifdef CONFIG_FAULT_INJECTION 2913 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 2914 #endif 2915 #ifdef CONFIG_ELF_CORE 2916 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), 2917 #endif 2918 #ifdef CONFIG_TASK_IO_ACCOUNTING 2919 ONE("io", S_IRUSR, proc_tgid_io_accounting), 2920 #endif 2921 #ifdef CONFIG_HARDWALL 2922 ONE("hardwall", S_IRUGO, proc_pid_hardwall), 2923 #endif 2924 #ifdef CONFIG_USER_NS 2925 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 2926 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 2927 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 2928 REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), 2929 #endif 2930 #if defined(CONFIG_CHECKPOINT_RESTORE) && defined(CONFIG_POSIX_TIMERS) 2931 REG("timers", S_IRUGO, proc_timers_operations), 2932 #endif 2933 REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), 2934 }; 2935 2936 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) 2937 { 2938 return proc_pident_readdir(file, ctx, 2939 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 2940 } 2941 2942 static const struct file_operations proc_tgid_base_operations = { 2943 .read = generic_read_dir, 2944 .iterate_shared = proc_tgid_base_readdir, 2945 .llseek = generic_file_llseek, 2946 }; 2947 2948 static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 2949 { 2950 return proc_pident_lookup(dir, dentry, 2951 tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 2952 } 2953 2954 static const struct inode_operations proc_tgid_base_inode_operations = { 2955 .lookup = proc_tgid_base_lookup, 2956 .getattr = pid_getattr, 2957 .setattr = proc_setattr, 2958 .permission = proc_pid_permission, 2959 }; 2960 2961 static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) 2962 { 2963 struct dentry *dentry, *leader, *dir; 2964 char buf[PROC_NUMBUF]; 2965 struct qstr name; 2966 2967 name.name = buf; 2968 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2969 /* no ->d_hash() rejects on procfs */ 2970 dentry = d_hash_and_lookup(mnt->mnt_root, &name); 2971 if (dentry) { 2972 d_invalidate(dentry); 2973 dput(dentry); 2974 } 2975 2976 if (pid == tgid) 2977 return; 2978 2979 name.name = buf; 2980 name.len = snprintf(buf, sizeof(buf), "%d", tgid); 2981 leader = d_hash_and_lookup(mnt->mnt_root, &name); 2982 if (!leader) 2983 goto out; 2984 2985 name.name = "task"; 2986 name.len = strlen(name.name); 2987 dir = d_hash_and_lookup(leader, &name); 2988 if (!dir) 2989 goto out_put_leader; 2990 2991 name.name = buf; 2992 name.len = snprintf(buf, sizeof(buf), "%d", pid); 2993 dentry = d_hash_and_lookup(dir, &name); 2994 if (dentry) { 2995 d_invalidate(dentry); 2996 dput(dentry); 2997 } 2998 2999 dput(dir); 3000 out_put_leader: 3001 dput(leader); 3002 out: 3003 return; 3004 } 3005 3006 /** 3007 * proc_flush_task - Remove dcache entries for @task from the /proc dcache. 3008 * @task: task that should be flushed. 3009 * 3010 * When flushing dentries from proc, one needs to flush them from global 3011 * proc (proc_mnt) and from all the namespaces' procs this task was seen 3012 * in. This call is supposed to do all of this job. 3013 * 3014 * Looks in the dcache for 3015 * /proc/@pid 3016 * /proc/@tgid/task/@pid 3017 * if either directory is present flushes it and all of it'ts children 3018 * from the dcache. 3019 * 3020 * It is safe and reasonable to cache /proc entries for a task until 3021 * that task exits. After that they just clog up the dcache with 3022 * useless entries, possibly causing useful dcache entries to be 3023 * flushed instead. This routine is proved to flush those useless 3024 * dcache entries at process exit time. 3025 * 3026 * NOTE: This routine is just an optimization so it does not guarantee 3027 * that no dcache entries will exist at process exit time it 3028 * just makes it very unlikely that any will persist. 3029 */ 3030 3031 void proc_flush_task(struct task_struct *task) 3032 { 3033 int i; 3034 struct pid *pid, *tgid; 3035 struct upid *upid; 3036 3037 pid = task_pid(task); 3038 tgid = task_tgid(task); 3039 3040 for (i = 0; i <= pid->level; i++) { 3041 upid = &pid->numbers[i]; 3042 proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, 3043 tgid->numbers[i].nr); 3044 } 3045 } 3046 3047 static int proc_pid_instantiate(struct inode *dir, 3048 struct dentry * dentry, 3049 struct task_struct *task, const void *ptr) 3050 { 3051 struct inode *inode; 3052 3053 inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); 3054 if (!inode) 3055 goto out; 3056 3057 inode->i_op = &proc_tgid_base_inode_operations; 3058 inode->i_fop = &proc_tgid_base_operations; 3059 inode->i_flags|=S_IMMUTABLE; 3060 3061 set_nlink(inode, nlink_tgid); 3062 3063 d_set_d_op(dentry, &pid_dentry_operations); 3064 3065 d_add(dentry, inode); 3066 /* Close the race of the process dying before we return the dentry */ 3067 if (pid_revalidate(dentry, 0)) 3068 return 0; 3069 out: 3070 return -ENOENT; 3071 } 3072 3073 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) 3074 { 3075 int result = -ENOENT; 3076 struct task_struct *task; 3077 unsigned tgid; 3078 struct pid_namespace *ns; 3079 3080 tgid = name_to_int(&dentry->d_name); 3081 if (tgid == ~0U) 3082 goto out; 3083 3084 ns = dentry->d_sb->s_fs_info; 3085 rcu_read_lock(); 3086 task = find_task_by_pid_ns(tgid, ns); 3087 if (task) 3088 get_task_struct(task); 3089 rcu_read_unlock(); 3090 if (!task) 3091 goto out; 3092 3093 result = proc_pid_instantiate(dir, dentry, task, NULL); 3094 put_task_struct(task); 3095 out: 3096 return ERR_PTR(result); 3097 } 3098 3099 /* 3100 * Find the first task with tgid >= tgid 3101 * 3102 */ 3103 struct tgid_iter { 3104 unsigned int tgid; 3105 struct task_struct *task; 3106 }; 3107 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) 3108 { 3109 struct pid *pid; 3110 3111 if (iter.task) 3112 put_task_struct(iter.task); 3113 rcu_read_lock(); 3114 retry: 3115 iter.task = NULL; 3116 pid = find_ge_pid(iter.tgid, ns); 3117 if (pid) { 3118 iter.tgid = pid_nr_ns(pid, ns); 3119 iter.task = pid_task(pid, PIDTYPE_PID); 3120 /* What we to know is if the pid we have find is the 3121 * pid of a thread_group_leader. Testing for task 3122 * being a thread_group_leader is the obvious thing 3123 * todo but there is a window when it fails, due to 3124 * the pid transfer logic in de_thread. 3125 * 3126 * So we perform the straight forward test of seeing 3127 * if the pid we have found is the pid of a thread 3128 * group leader, and don't worry if the task we have 3129 * found doesn't happen to be a thread group leader. 3130 * As we don't care in the case of readdir. 3131 */ 3132 if (!iter.task || !has_group_leader_pid(iter.task)) { 3133 iter.tgid += 1; 3134 goto retry; 3135 } 3136 get_task_struct(iter.task); 3137 } 3138 rcu_read_unlock(); 3139 return iter; 3140 } 3141 3142 #define TGID_OFFSET (FIRST_PROCESS_ENTRY + 2) 3143 3144 /* for the /proc/ directory itself, after non-process stuff has been done */ 3145 int proc_pid_readdir(struct file *file, struct dir_context *ctx) 3146 { 3147 struct tgid_iter iter; 3148 struct pid_namespace *ns = file_inode(file)->i_sb->s_fs_info; 3149 loff_t pos = ctx->pos; 3150 3151 if (pos >= PID_MAX_LIMIT + TGID_OFFSET) 3152 return 0; 3153 3154 if (pos == TGID_OFFSET - 2) { 3155 struct inode *inode = d_inode(ns->proc_self); 3156 if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK)) 3157 return 0; 3158 ctx->pos = pos = pos + 1; 3159 } 3160 if (pos == TGID_OFFSET - 1) { 3161 struct inode *inode = d_inode(ns->proc_thread_self); 3162 if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK)) 3163 return 0; 3164 ctx->pos = pos = pos + 1; 3165 } 3166 iter.tgid = pos - TGID_OFFSET; 3167 iter.task = NULL; 3168 for (iter = next_tgid(ns, iter); 3169 iter.task; 3170 iter.tgid += 1, iter = next_tgid(ns, iter)) { 3171 char name[PROC_NUMBUF]; 3172 int len; 3173 3174 cond_resched(); 3175 if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE)) 3176 continue; 3177 3178 len = snprintf(name, sizeof(name), "%d", iter.tgid); 3179 ctx->pos = iter.tgid + TGID_OFFSET; 3180 if (!proc_fill_cache(file, ctx, name, len, 3181 proc_pid_instantiate, iter.task, NULL)) { 3182 put_task_struct(iter.task); 3183 return 0; 3184 } 3185 } 3186 ctx->pos = PID_MAX_LIMIT + TGID_OFFSET; 3187 return 0; 3188 } 3189 3190 /* 3191 * proc_tid_comm_permission is a special permission function exclusively 3192 * used for the node /proc/<pid>/task/<tid>/comm. 3193 * It bypasses generic permission checks in the case where a task of the same 3194 * task group attempts to access the node. 3195 * The rationale behind this is that glibc and bionic access this node for 3196 * cross thread naming (pthread_set/getname_np(!self)). However, if 3197 * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0, 3198 * which locks out the cross thread naming implementation. 3199 * This function makes sure that the node is always accessible for members of 3200 * same thread group. 3201 */ 3202 static int proc_tid_comm_permission(struct inode *inode, int mask) 3203 { 3204 bool is_same_tgroup; 3205 struct task_struct *task; 3206 3207 task = get_proc_task(inode); 3208 if (!task) 3209 return -ESRCH; 3210 is_same_tgroup = same_thread_group(current, task); 3211 put_task_struct(task); 3212 3213 if (likely(is_same_tgroup && !(mask & MAY_EXEC))) { 3214 /* This file (/proc/<pid>/task/<tid>/comm) can always be 3215 * read or written by the members of the corresponding 3216 * thread group. 3217 */ 3218 return 0; 3219 } 3220 3221 return generic_permission(inode, mask); 3222 } 3223 3224 static const struct inode_operations proc_tid_comm_inode_operations = { 3225 .permission = proc_tid_comm_permission, 3226 }; 3227 3228 /* 3229 * Tasks 3230 */ 3231 static const struct pid_entry tid_base_stuff[] = { 3232 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 3233 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), 3234 DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations), 3235 #ifdef CONFIG_NET 3236 DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), 3237 #endif 3238 REG("environ", S_IRUSR, proc_environ_operations), 3239 REG("auxv", S_IRUSR, proc_auxv_operations), 3240 ONE("status", S_IRUGO, proc_pid_status), 3241 ONE("personality", S_IRUSR, proc_pid_personality), 3242 ONE("limits", S_IRUGO, proc_pid_limits), 3243 #ifdef CONFIG_SCHED_DEBUG 3244 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3245 #endif 3246 NOD("comm", S_IFREG|S_IRUGO|S_IWUSR, 3247 &proc_tid_comm_inode_operations, 3248 &proc_pid_set_comm_operations, {}), 3249 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK 3250 ONE("syscall", S_IRUSR, proc_pid_syscall), 3251 #endif 3252 REG("cmdline", S_IRUGO, proc_pid_cmdline_ops), 3253 ONE("stat", S_IRUGO, proc_tid_stat), 3254 ONE("statm", S_IRUGO, proc_pid_statm), 3255 REG("maps", S_IRUGO, proc_tid_maps_operations), 3256 #ifdef CONFIG_PROC_CHILDREN 3257 REG("children", S_IRUGO, proc_tid_children_operations), 3258 #endif 3259 #ifdef CONFIG_NUMA 3260 REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), 3261 #endif 3262 REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), 3263 LNK("cwd", proc_cwd_link), 3264 LNK("root", proc_root_link), 3265 LNK("exe", proc_exe_link), 3266 REG("mounts", S_IRUGO, proc_mounts_operations), 3267 REG("mountinfo", S_IRUGO, proc_mountinfo_operations), 3268 #ifdef CONFIG_PROC_PAGE_MONITOR 3269 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 3270 REG("smaps", S_IRUGO, proc_tid_smaps_operations), 3271 REG("pagemap", S_IRUSR, proc_pagemap_operations), 3272 #endif 3273 #ifdef CONFIG_SECURITY 3274 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 3275 #endif 3276 #ifdef CONFIG_KALLSYMS 3277 ONE("wchan", S_IRUGO, proc_pid_wchan), 3278 #endif 3279 #ifdef CONFIG_STACKTRACE 3280 ONE("stack", S_IRUSR, proc_pid_stack), 3281 #endif 3282 #ifdef CONFIG_SCHED_INFO 3283 ONE("schedstat", S_IRUGO, proc_pid_schedstat), 3284 #endif 3285 #ifdef CONFIG_LATENCYTOP 3286 REG("latency", S_IRUGO, proc_lstats_operations), 3287 #endif 3288 #ifdef CONFIG_PROC_PID_CPUSET 3289 ONE("cpuset", S_IRUGO, proc_cpuset_show), 3290 #endif 3291 #ifdef CONFIG_CGROUPS 3292 ONE("cgroup", S_IRUGO, proc_cgroup_show), 3293 #endif 3294 ONE("oom_score", S_IRUGO, proc_oom_score), 3295 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), 3296 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 3297 #ifdef CONFIG_AUDITSYSCALL 3298 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 3299 REG("sessionid", S_IRUGO, proc_sessionid_operations), 3300 #endif 3301 #ifdef CONFIG_FAULT_INJECTION 3302 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 3303 #endif 3304 #ifdef CONFIG_TASK_IO_ACCOUNTING 3305 ONE("io", S_IRUSR, proc_tid_io_accounting), 3306 #endif 3307 #ifdef CONFIG_HARDWALL 3308 ONE("hardwall", S_IRUGO, proc_pid_hardwall), 3309 #endif 3310 #ifdef CONFIG_USER_NS 3311 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations), 3312 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations), 3313 REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), 3314 REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), 3315 #endif 3316 }; 3317 3318 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) 3319 { 3320 return proc_pident_readdir(file, ctx, 3321 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 3322 } 3323 3324 static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 3325 { 3326 return proc_pident_lookup(dir, dentry, 3327 tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 3328 } 3329 3330 static const struct file_operations proc_tid_base_operations = { 3331 .read = generic_read_dir, 3332 .iterate_shared = proc_tid_base_readdir, 3333 .llseek = generic_file_llseek, 3334 }; 3335 3336 static const struct inode_operations proc_tid_base_inode_operations = { 3337 .lookup = proc_tid_base_lookup, 3338 .getattr = pid_getattr, 3339 .setattr = proc_setattr, 3340 }; 3341 3342 static int proc_task_instantiate(struct inode *dir, 3343 struct dentry *dentry, struct task_struct *task, const void *ptr) 3344 { 3345 struct inode *inode; 3346 inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); 3347 3348 if (!inode) 3349 goto out; 3350 inode->i_op = &proc_tid_base_inode_operations; 3351 inode->i_fop = &proc_tid_base_operations; 3352 inode->i_flags|=S_IMMUTABLE; 3353 3354 set_nlink(inode, nlink_tid); 3355 3356 d_set_d_op(dentry, &pid_dentry_operations); 3357 3358 d_add(dentry, inode); 3359 /* Close the race of the process dying before we return the dentry */ 3360 if (pid_revalidate(dentry, 0)) 3361 return 0; 3362 out: 3363 return -ENOENT; 3364 } 3365 3366 static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags) 3367 { 3368 int result = -ENOENT; 3369 struct task_struct *task; 3370 struct task_struct *leader = get_proc_task(dir); 3371 unsigned tid; 3372 struct pid_namespace *ns; 3373 3374 if (!leader) 3375 goto out_no_task; 3376 3377 tid = name_to_int(&dentry->d_name); 3378 if (tid == ~0U) 3379 goto out; 3380 3381 ns = dentry->d_sb->s_fs_info; 3382 rcu_read_lock(); 3383 task = find_task_by_pid_ns(tid, ns); 3384 if (task) 3385 get_task_struct(task); 3386 rcu_read_unlock(); 3387 if (!task) 3388 goto out; 3389 if (!same_thread_group(leader, task)) 3390 goto out_drop_task; 3391 3392 result = proc_task_instantiate(dir, dentry, task, NULL); 3393 out_drop_task: 3394 put_task_struct(task); 3395 out: 3396 put_task_struct(leader); 3397 out_no_task: 3398 return ERR_PTR(result); 3399 } 3400 3401 /* 3402 * Find the first tid of a thread group to return to user space. 3403 * 3404 * Usually this is just the thread group leader, but if the users 3405 * buffer was too small or there was a seek into the middle of the 3406 * directory we have more work todo. 3407 * 3408 * In the case of a short read we start with find_task_by_pid. 3409 * 3410 * In the case of a seek we start with the leader and walk nr 3411 * threads past it. 3412 */ 3413 static struct task_struct *first_tid(struct pid *pid, int tid, loff_t f_pos, 3414 struct pid_namespace *ns) 3415 { 3416 struct task_struct *pos, *task; 3417 unsigned long nr = f_pos; 3418 3419 if (nr != f_pos) /* 32bit overflow? */ 3420 return NULL; 3421 3422 rcu_read_lock(); 3423 task = pid_task(pid, PIDTYPE_PID); 3424 if (!task) 3425 goto fail; 3426 3427 /* Attempt to start with the tid of a thread */ 3428 if (tid && nr) { 3429 pos = find_task_by_pid_ns(tid, ns); 3430 if (pos && same_thread_group(pos, task)) 3431 goto found; 3432 } 3433 3434 /* If nr exceeds the number of threads there is nothing todo */ 3435 if (nr >= get_nr_threads(task)) 3436 goto fail; 3437 3438 /* If we haven't found our starting place yet start 3439 * with the leader and walk nr threads forward. 3440 */ 3441 pos = task = task->group_leader; 3442 do { 3443 if (!nr--) 3444 goto found; 3445 } while_each_thread(task, pos); 3446 fail: 3447 pos = NULL; 3448 goto out; 3449 found: 3450 get_task_struct(pos); 3451 out: 3452 rcu_read_unlock(); 3453 return pos; 3454 } 3455 3456 /* 3457 * Find the next thread in the thread list. 3458 * Return NULL if there is an error or no next thread. 3459 * 3460 * The reference to the input task_struct is released. 3461 */ 3462 static struct task_struct *next_tid(struct task_struct *start) 3463 { 3464 struct task_struct *pos = NULL; 3465 rcu_read_lock(); 3466 if (pid_alive(start)) { 3467 pos = next_thread(start); 3468 if (thread_group_leader(pos)) 3469 pos = NULL; 3470 else 3471 get_task_struct(pos); 3472 } 3473 rcu_read_unlock(); 3474 put_task_struct(start); 3475 return pos; 3476 } 3477 3478 /* for the /proc/TGID/task/ directories */ 3479 static int proc_task_readdir(struct file *file, struct dir_context *ctx) 3480 { 3481 struct inode *inode = file_inode(file); 3482 struct task_struct *task; 3483 struct pid_namespace *ns; 3484 int tid; 3485 3486 if (proc_inode_is_dead(inode)) 3487 return -ENOENT; 3488 3489 if (!dir_emit_dots(file, ctx)) 3490 return 0; 3491 3492 /* f_version caches the tgid value that the last readdir call couldn't 3493 * return. lseek aka telldir automagically resets f_version to 0. 3494 */ 3495 ns = inode->i_sb->s_fs_info; 3496 tid = (int)file->f_version; 3497 file->f_version = 0; 3498 for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns); 3499 task; 3500 task = next_tid(task), ctx->pos++) { 3501 char name[PROC_NUMBUF]; 3502 int len; 3503 tid = task_pid_nr_ns(task, ns); 3504 len = snprintf(name, sizeof(name), "%d", tid); 3505 if (!proc_fill_cache(file, ctx, name, len, 3506 proc_task_instantiate, task, NULL)) { 3507 /* returning this tgid failed, save it as the first 3508 * pid for the next readir call */ 3509 file->f_version = (u64)tid; 3510 put_task_struct(task); 3511 break; 3512 } 3513 } 3514 3515 return 0; 3516 } 3517 3518 static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) 3519 { 3520 struct inode *inode = d_inode(dentry); 3521 struct task_struct *p = get_proc_task(inode); 3522 generic_fillattr(inode, stat); 3523 3524 if (p) { 3525 stat->nlink += get_nr_threads(p); 3526 put_task_struct(p); 3527 } 3528 3529 return 0; 3530 } 3531 3532 static const struct inode_operations proc_task_inode_operations = { 3533 .lookup = proc_task_lookup, 3534 .getattr = proc_task_getattr, 3535 .setattr = proc_setattr, 3536 .permission = proc_pid_permission, 3537 }; 3538 3539 static const struct file_operations proc_task_operations = { 3540 .read = generic_read_dir, 3541 .iterate_shared = proc_task_readdir, 3542 .llseek = generic_file_llseek, 3543 }; 3544 3545 void __init set_proc_pid_nlink(void) 3546 { 3547 nlink_tid = pid_entry_nlink(tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); 3548 nlink_tgid = pid_entry_nlink(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); 3549 } 3550