1 #include "cgroup-internal.h" 2 3 #include <linux/ctype.h> 4 #include <linux/kmod.h> 5 #include <linux/sort.h> 6 #include <linux/delay.h> 7 #include <linux/mm.h> 8 #include <linux/sched/signal.h> 9 #include <linux/sched/task.h> 10 #include <linux/magic.h> 11 #include <linux/slab.h> 12 #include <linux/vmalloc.h> 13 #include <linux/delayacct.h> 14 #include <linux/pid_namespace.h> 15 #include <linux/cgroupstats.h> 16 17 #include <trace/events/cgroup.h> 18 19 /* 20 * pidlists linger the following amount before being destroyed. The goal 21 * is avoiding frequent destruction in the middle of consecutive read calls 22 * Expiring in the middle is a performance problem not a correctness one. 23 * 1 sec should be enough. 24 */ 25 #define CGROUP_PIDLIST_DESTROY_DELAY HZ 26 27 /* Controllers blocked by the commandline in v1 */ 28 static u16 cgroup_no_v1_mask; 29 30 /* disable named v1 mounts */ 31 static bool cgroup_no_v1_named; 32 33 /* 34 * pidlist destructions need to be flushed on cgroup destruction. Use a 35 * separate workqueue as flush domain. 36 */ 37 static struct workqueue_struct *cgroup_pidlist_destroy_wq; 38 39 /* 40 * Protects cgroup_subsys->release_agent_path. Modifying it also requires 41 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. 42 */ 43 static DEFINE_SPINLOCK(release_agent_path_lock); 44 45 bool cgroup1_ssid_disabled(int ssid) 46 { 47 return cgroup_no_v1_mask & (1 << ssid); 48 } 49 50 /** 51 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' 52 * @from: attach to all cgroups of a given task 53 * @tsk: the task to be attached 54 */ 55 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) 56 { 57 struct cgroup_root *root; 58 int retval = 0; 59 60 mutex_lock(&cgroup_mutex); 61 percpu_down_write(&cgroup_threadgroup_rwsem); 62 for_each_root(root) { 63 struct cgroup *from_cgrp; 64 65 if (root == &cgrp_dfl_root) 66 continue; 67 68 spin_lock_irq(&css_set_lock); 69 from_cgrp = task_cgroup_from_root(from, root); 70 spin_unlock_irq(&css_set_lock); 71 72 retval = cgroup_attach_task(from_cgrp, tsk, false); 73 if (retval) 74 break; 75 } 76 percpu_up_write(&cgroup_threadgroup_rwsem); 77 mutex_unlock(&cgroup_mutex); 78 79 return retval; 80 } 81 EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 82 83 /** 84 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another 85 * @to: cgroup to which the tasks will be moved 86 * @from: cgroup in which the tasks currently reside 87 * 88 * Locking rules between cgroup_post_fork() and the migration path 89 * guarantee that, if a task is forking while being migrated, the new child 90 * is guaranteed to be either visible in the source cgroup after the 91 * parent's migration is complete or put into the target cgroup. No task 92 * can slip out of migration through forking. 93 */ 94 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) 95 { 96 DEFINE_CGROUP_MGCTX(mgctx); 97 struct cgrp_cset_link *link; 98 struct css_task_iter it; 99 struct task_struct *task; 100 int ret; 101 102 if (cgroup_on_dfl(to)) 103 return -EINVAL; 104 105 ret = cgroup_migrate_vet_dst(to); 106 if (ret) 107 return ret; 108 109 mutex_lock(&cgroup_mutex); 110 111 percpu_down_write(&cgroup_threadgroup_rwsem); 112 113 /* all tasks in @from are being moved, all csets are source */ 114 spin_lock_irq(&css_set_lock); 115 list_for_each_entry(link, &from->cset_links, cset_link) 116 cgroup_migrate_add_src(link->cset, to, &mgctx); 117 spin_unlock_irq(&css_set_lock); 118 119 ret = cgroup_migrate_prepare_dst(&mgctx); 120 if (ret) 121 goto out_err; 122 123 /* 124 * Migrate tasks one-by-one until @from is empty. This fails iff 125 * ->can_attach() fails. 126 */ 127 do { 128 css_task_iter_start(&from->self, 0, &it); 129 130 do { 131 task = css_task_iter_next(&it); 132 } while (task && (task->flags & PF_EXITING)); 133 134 if (task) 135 get_task_struct(task); 136 css_task_iter_end(&it); 137 138 if (task) { 139 ret = cgroup_migrate(task, false, &mgctx); 140 if (!ret) 141 TRACE_CGROUP_PATH(transfer_tasks, to, task, false); 142 put_task_struct(task); 143 } 144 } while (task && !ret); 145 out_err: 146 cgroup_migrate_finish(&mgctx); 147 percpu_up_write(&cgroup_threadgroup_rwsem); 148 mutex_unlock(&cgroup_mutex); 149 return ret; 150 } 151 152 /* 153 * Stuff for reading the 'tasks'/'procs' files. 154 * 155 * Reading this file can return large amounts of data if a cgroup has 156 * *lots* of attached tasks. So it may need several calls to read(), 157 * but we cannot guarantee that the information we produce is correct 158 * unless we produce it entirely atomically. 159 * 160 */ 161 162 /* which pidlist file are we talking about? */ 163 enum cgroup_filetype { 164 CGROUP_FILE_PROCS, 165 CGROUP_FILE_TASKS, 166 }; 167 168 /* 169 * A pidlist is a list of pids that virtually represents the contents of one 170 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, 171 * a pair (one each for procs, tasks) for each pid namespace that's relevant 172 * to the cgroup. 173 */ 174 struct cgroup_pidlist { 175 /* 176 * used to find which pidlist is wanted. doesn't change as long as 177 * this particular list stays in the list. 178 */ 179 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; 180 /* array of xids */ 181 pid_t *list; 182 /* how many elements the above list has */ 183 int length; 184 /* each of these stored in a list by its cgroup */ 185 struct list_head links; 186 /* pointer to the cgroup we belong to, for list removal purposes */ 187 struct cgroup *owner; 188 /* for delayed destruction */ 189 struct delayed_work destroy_dwork; 190 }; 191 192 /* 193 * The following two functions "fix" the issue where there are more pids 194 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree. 195 * TODO: replace with a kernel-wide solution to this problem 196 */ 197 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2)) 198 static void *pidlist_allocate(int count) 199 { 200 if (PIDLIST_TOO_LARGE(count)) 201 return vmalloc(array_size(count, sizeof(pid_t))); 202 else 203 return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL); 204 } 205 206 static void pidlist_free(void *p) 207 { 208 kvfree(p); 209 } 210 211 /* 212 * Used to destroy all pidlists lingering waiting for destroy timer. None 213 * should be left afterwards. 214 */ 215 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) 216 { 217 struct cgroup_pidlist *l, *tmp_l; 218 219 mutex_lock(&cgrp->pidlist_mutex); 220 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) 221 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); 222 mutex_unlock(&cgrp->pidlist_mutex); 223 224 flush_workqueue(cgroup_pidlist_destroy_wq); 225 BUG_ON(!list_empty(&cgrp->pidlists)); 226 } 227 228 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) 229 { 230 struct delayed_work *dwork = to_delayed_work(work); 231 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, 232 destroy_dwork); 233 struct cgroup_pidlist *tofree = NULL; 234 235 mutex_lock(&l->owner->pidlist_mutex); 236 237 /* 238 * Destroy iff we didn't get queued again. The state won't change 239 * as destroy_dwork can only be queued while locked. 240 */ 241 if (!delayed_work_pending(dwork)) { 242 list_del(&l->links); 243 pidlist_free(l->list); 244 put_pid_ns(l->key.ns); 245 tofree = l; 246 } 247 248 mutex_unlock(&l->owner->pidlist_mutex); 249 kfree(tofree); 250 } 251 252 /* 253 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries 254 * Returns the number of unique elements. 255 */ 256 static int pidlist_uniq(pid_t *list, int length) 257 { 258 int src, dest = 1; 259 260 /* 261 * we presume the 0th element is unique, so i starts at 1. trivial 262 * edge cases first; no work needs to be done for either 263 */ 264 if (length == 0 || length == 1) 265 return length; 266 /* src and dest walk down the list; dest counts unique elements */ 267 for (src = 1; src < length; src++) { 268 /* find next unique element */ 269 while (list[src] == list[src-1]) { 270 src++; 271 if (src == length) 272 goto after; 273 } 274 /* dest always points to where the next unique element goes */ 275 list[dest] = list[src]; 276 dest++; 277 } 278 after: 279 return dest; 280 } 281 282 /* 283 * The two pid files - task and cgroup.procs - guaranteed that the result 284 * is sorted, which forced this whole pidlist fiasco. As pid order is 285 * different per namespace, each namespace needs differently sorted list, 286 * making it impossible to use, for example, single rbtree of member tasks 287 * sorted by task pointer. As pidlists can be fairly large, allocating one 288 * per open file is dangerous, so cgroup had to implement shared pool of 289 * pidlists keyed by cgroup and namespace. 290 */ 291 static int cmppid(const void *a, const void *b) 292 { 293 return *(pid_t *)a - *(pid_t *)b; 294 } 295 296 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, 297 enum cgroup_filetype type) 298 { 299 struct cgroup_pidlist *l; 300 /* don't need task_nsproxy() if we're looking at ourself */ 301 struct pid_namespace *ns = task_active_pid_ns(current); 302 303 lockdep_assert_held(&cgrp->pidlist_mutex); 304 305 list_for_each_entry(l, &cgrp->pidlists, links) 306 if (l->key.type == type && l->key.ns == ns) 307 return l; 308 return NULL; 309 } 310 311 /* 312 * find the appropriate pidlist for our purpose (given procs vs tasks) 313 * returns with the lock on that pidlist already held, and takes care 314 * of the use count, or returns NULL with no locks held if we're out of 315 * memory. 316 */ 317 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, 318 enum cgroup_filetype type) 319 { 320 struct cgroup_pidlist *l; 321 322 lockdep_assert_held(&cgrp->pidlist_mutex); 323 324 l = cgroup_pidlist_find(cgrp, type); 325 if (l) 326 return l; 327 328 /* entry not found; create a new one */ 329 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); 330 if (!l) 331 return l; 332 333 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); 334 l->key.type = type; 335 /* don't need task_nsproxy() if we're looking at ourself */ 336 l->key.ns = get_pid_ns(task_active_pid_ns(current)); 337 l->owner = cgrp; 338 list_add(&l->links, &cgrp->pidlists); 339 return l; 340 } 341 342 /** 343 * cgroup_task_count - count the number of tasks in a cgroup. 344 * @cgrp: the cgroup in question 345 */ 346 int cgroup_task_count(const struct cgroup *cgrp) 347 { 348 int count = 0; 349 struct cgrp_cset_link *link; 350 351 spin_lock_irq(&css_set_lock); 352 list_for_each_entry(link, &cgrp->cset_links, cset_link) 353 count += link->cset->nr_tasks; 354 spin_unlock_irq(&css_set_lock); 355 return count; 356 } 357 358 /* 359 * Load a cgroup's pidarray with either procs' tgids or tasks' pids 360 */ 361 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, 362 struct cgroup_pidlist **lp) 363 { 364 pid_t *array; 365 int length; 366 int pid, n = 0; /* used for populating the array */ 367 struct css_task_iter it; 368 struct task_struct *tsk; 369 struct cgroup_pidlist *l; 370 371 lockdep_assert_held(&cgrp->pidlist_mutex); 372 373 /* 374 * If cgroup gets more users after we read count, we won't have 375 * enough space - tough. This race is indistinguishable to the 376 * caller from the case that the additional cgroup users didn't 377 * show up until sometime later on. 378 */ 379 length = cgroup_task_count(cgrp); 380 array = pidlist_allocate(length); 381 if (!array) 382 return -ENOMEM; 383 /* now, populate the array */ 384 css_task_iter_start(&cgrp->self, 0, &it); 385 while ((tsk = css_task_iter_next(&it))) { 386 if (unlikely(n == length)) 387 break; 388 /* get tgid or pid for procs or tasks file respectively */ 389 if (type == CGROUP_FILE_PROCS) 390 pid = task_tgid_vnr(tsk); 391 else 392 pid = task_pid_vnr(tsk); 393 if (pid > 0) /* make sure to only use valid results */ 394 array[n++] = pid; 395 } 396 css_task_iter_end(&it); 397 length = n; 398 /* now sort & (if procs) strip out duplicates */ 399 sort(array, length, sizeof(pid_t), cmppid, NULL); 400 if (type == CGROUP_FILE_PROCS) 401 length = pidlist_uniq(array, length); 402 403 l = cgroup_pidlist_find_create(cgrp, type); 404 if (!l) { 405 pidlist_free(array); 406 return -ENOMEM; 407 } 408 409 /* store array, freeing old if necessary */ 410 pidlist_free(l->list); 411 l->list = array; 412 l->length = length; 413 *lp = l; 414 return 0; 415 } 416 417 /* 418 * seq_file methods for the tasks/procs files. The seq_file position is the 419 * next pid to display; the seq_file iterator is a pointer to the pid 420 * in the cgroup->l->list array. 421 */ 422 423 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) 424 { 425 /* 426 * Initially we receive a position value that corresponds to 427 * one more than the last pid shown (or 0 on the first call or 428 * after a seek to the start). Use a binary-search to find the 429 * next pid to display, if any 430 */ 431 struct kernfs_open_file *of = s->private; 432 struct cgroup *cgrp = seq_css(s)->cgroup; 433 struct cgroup_pidlist *l; 434 enum cgroup_filetype type = seq_cft(s)->private; 435 int index = 0, pid = *pos; 436 int *iter, ret; 437 438 mutex_lock(&cgrp->pidlist_mutex); 439 440 /* 441 * !NULL @of->priv indicates that this isn't the first start() 442 * after open. If the matching pidlist is around, we can use that. 443 * Look for it. Note that @of->priv can't be used directly. It 444 * could already have been destroyed. 445 */ 446 if (of->priv) 447 of->priv = cgroup_pidlist_find(cgrp, type); 448 449 /* 450 * Either this is the first start() after open or the matching 451 * pidlist has been destroyed inbetween. Create a new one. 452 */ 453 if (!of->priv) { 454 ret = pidlist_array_load(cgrp, type, 455 (struct cgroup_pidlist **)&of->priv); 456 if (ret) 457 return ERR_PTR(ret); 458 } 459 l = of->priv; 460 461 if (pid) { 462 int end = l->length; 463 464 while (index < end) { 465 int mid = (index + end) / 2; 466 if (l->list[mid] == pid) { 467 index = mid; 468 break; 469 } else if (l->list[mid] <= pid) 470 index = mid + 1; 471 else 472 end = mid; 473 } 474 } 475 /* If we're off the end of the array, we're done */ 476 if (index >= l->length) 477 return NULL; 478 /* Update the abstract position to be the actual pid that we found */ 479 iter = l->list + index; 480 *pos = *iter; 481 return iter; 482 } 483 484 static void cgroup_pidlist_stop(struct seq_file *s, void *v) 485 { 486 struct kernfs_open_file *of = s->private; 487 struct cgroup_pidlist *l = of->priv; 488 489 if (l) 490 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 491 CGROUP_PIDLIST_DESTROY_DELAY); 492 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); 493 } 494 495 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) 496 { 497 struct kernfs_open_file *of = s->private; 498 struct cgroup_pidlist *l = of->priv; 499 pid_t *p = v; 500 pid_t *end = l->list + l->length; 501 /* 502 * Advance to the next pid in the array. If this goes off the 503 * end, we're done 504 */ 505 p++; 506 if (p >= end) { 507 return NULL; 508 } else { 509 *pos = *p; 510 return p; 511 } 512 } 513 514 static int cgroup_pidlist_show(struct seq_file *s, void *v) 515 { 516 seq_printf(s, "%d\n", *(int *)v); 517 518 return 0; 519 } 520 521 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, 522 char *buf, size_t nbytes, loff_t off, 523 bool threadgroup) 524 { 525 struct cgroup *cgrp; 526 struct task_struct *task; 527 const struct cred *cred, *tcred; 528 ssize_t ret; 529 530 cgrp = cgroup_kn_lock_live(of->kn, false); 531 if (!cgrp) 532 return -ENODEV; 533 534 task = cgroup_procs_write_start(buf, threadgroup); 535 ret = PTR_ERR_OR_ZERO(task); 536 if (ret) 537 goto out_unlock; 538 539 /* 540 * Even if we're attaching all tasks in the thread group, we only 541 * need to check permissions on one of them. 542 */ 543 cred = current_cred(); 544 tcred = get_task_cred(task); 545 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 546 !uid_eq(cred->euid, tcred->uid) && 547 !uid_eq(cred->euid, tcred->suid)) 548 ret = -EACCES; 549 put_cred(tcred); 550 if (ret) 551 goto out_finish; 552 553 ret = cgroup_attach_task(cgrp, task, threadgroup); 554 555 out_finish: 556 cgroup_procs_write_finish(task); 557 out_unlock: 558 cgroup_kn_unlock(of->kn); 559 560 return ret ?: nbytes; 561 } 562 563 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of, 564 char *buf, size_t nbytes, loff_t off) 565 { 566 return __cgroup1_procs_write(of, buf, nbytes, off, true); 567 } 568 569 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of, 570 char *buf, size_t nbytes, loff_t off) 571 { 572 return __cgroup1_procs_write(of, buf, nbytes, off, false); 573 } 574 575 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, 576 char *buf, size_t nbytes, loff_t off) 577 { 578 struct cgroup *cgrp; 579 580 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); 581 582 cgrp = cgroup_kn_lock_live(of->kn, false); 583 if (!cgrp) 584 return -ENODEV; 585 spin_lock(&release_agent_path_lock); 586 strlcpy(cgrp->root->release_agent_path, strstrip(buf), 587 sizeof(cgrp->root->release_agent_path)); 588 spin_unlock(&release_agent_path_lock); 589 cgroup_kn_unlock(of->kn); 590 return nbytes; 591 } 592 593 static int cgroup_release_agent_show(struct seq_file *seq, void *v) 594 { 595 struct cgroup *cgrp = seq_css(seq)->cgroup; 596 597 spin_lock(&release_agent_path_lock); 598 seq_puts(seq, cgrp->root->release_agent_path); 599 spin_unlock(&release_agent_path_lock); 600 seq_putc(seq, '\n'); 601 return 0; 602 } 603 604 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) 605 { 606 seq_puts(seq, "0\n"); 607 return 0; 608 } 609 610 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, 611 struct cftype *cft) 612 { 613 return notify_on_release(css->cgroup); 614 } 615 616 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, 617 struct cftype *cft, u64 val) 618 { 619 if (val) 620 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 621 else 622 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 623 return 0; 624 } 625 626 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, 627 struct cftype *cft) 628 { 629 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 630 } 631 632 static int cgroup_clone_children_write(struct cgroup_subsys_state *css, 633 struct cftype *cft, u64 val) 634 { 635 if (val) 636 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 637 else 638 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 639 return 0; 640 } 641 642 /* cgroup core interface files for the legacy hierarchies */ 643 struct cftype cgroup1_base_files[] = { 644 { 645 .name = "cgroup.procs", 646 .seq_start = cgroup_pidlist_start, 647 .seq_next = cgroup_pidlist_next, 648 .seq_stop = cgroup_pidlist_stop, 649 .seq_show = cgroup_pidlist_show, 650 .private = CGROUP_FILE_PROCS, 651 .write = cgroup1_procs_write, 652 }, 653 { 654 .name = "cgroup.clone_children", 655 .read_u64 = cgroup_clone_children_read, 656 .write_u64 = cgroup_clone_children_write, 657 }, 658 { 659 .name = "cgroup.sane_behavior", 660 .flags = CFTYPE_ONLY_ON_ROOT, 661 .seq_show = cgroup_sane_behavior_show, 662 }, 663 { 664 .name = "tasks", 665 .seq_start = cgroup_pidlist_start, 666 .seq_next = cgroup_pidlist_next, 667 .seq_stop = cgroup_pidlist_stop, 668 .seq_show = cgroup_pidlist_show, 669 .private = CGROUP_FILE_TASKS, 670 .write = cgroup1_tasks_write, 671 }, 672 { 673 .name = "notify_on_release", 674 .read_u64 = cgroup_read_notify_on_release, 675 .write_u64 = cgroup_write_notify_on_release, 676 }, 677 { 678 .name = "release_agent", 679 .flags = CFTYPE_ONLY_ON_ROOT, 680 .seq_show = cgroup_release_agent_show, 681 .write = cgroup_release_agent_write, 682 .max_write_len = PATH_MAX - 1, 683 }, 684 { } /* terminate */ 685 }; 686 687 /* Display information about each subsystem and each hierarchy */ 688 int proc_cgroupstats_show(struct seq_file *m, void *v) 689 { 690 struct cgroup_subsys *ss; 691 int i; 692 693 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); 694 /* 695 * ideally we don't want subsystems moving around while we do this. 696 * cgroup_mutex is also necessary to guarantee an atomic snapshot of 697 * subsys/hierarchy state. 698 */ 699 mutex_lock(&cgroup_mutex); 700 701 for_each_subsys(ss, i) 702 seq_printf(m, "%s\t%d\t%d\t%d\n", 703 ss->legacy_name, ss->root->hierarchy_id, 704 atomic_read(&ss->root->nr_cgrps), 705 cgroup_ssid_enabled(i)); 706 707 mutex_unlock(&cgroup_mutex); 708 return 0; 709 } 710 711 /** 712 * cgroupstats_build - build and fill cgroupstats 713 * @stats: cgroupstats to fill information into 714 * @dentry: A dentry entry belonging to the cgroup for which stats have 715 * been requested. 716 * 717 * Build and fill cgroupstats so that taskstats can export it to user 718 * space. 719 */ 720 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) 721 { 722 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 723 struct cgroup *cgrp; 724 struct css_task_iter it; 725 struct task_struct *tsk; 726 727 /* it should be kernfs_node belonging to cgroupfs and is a directory */ 728 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || 729 kernfs_type(kn) != KERNFS_DIR) 730 return -EINVAL; 731 732 mutex_lock(&cgroup_mutex); 733 734 /* 735 * We aren't being called from kernfs and there's no guarantee on 736 * @kn->priv's validity. For this and css_tryget_online_from_dir(), 737 * @kn->priv is RCU safe. Let's do the RCU dancing. 738 */ 739 rcu_read_lock(); 740 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 741 if (!cgrp || cgroup_is_dead(cgrp)) { 742 rcu_read_unlock(); 743 mutex_unlock(&cgroup_mutex); 744 return -ENOENT; 745 } 746 rcu_read_unlock(); 747 748 css_task_iter_start(&cgrp->self, 0, &it); 749 while ((tsk = css_task_iter_next(&it))) { 750 switch (tsk->state) { 751 case TASK_RUNNING: 752 stats->nr_running++; 753 break; 754 case TASK_INTERRUPTIBLE: 755 stats->nr_sleeping++; 756 break; 757 case TASK_UNINTERRUPTIBLE: 758 stats->nr_uninterruptible++; 759 break; 760 case TASK_STOPPED: 761 stats->nr_stopped++; 762 break; 763 default: 764 if (delayacct_is_task_waiting_on_io(tsk)) 765 stats->nr_io_wait++; 766 break; 767 } 768 } 769 css_task_iter_end(&it); 770 771 mutex_unlock(&cgroup_mutex); 772 return 0; 773 } 774 775 void cgroup1_check_for_release(struct cgroup *cgrp) 776 { 777 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && 778 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) 779 schedule_work(&cgrp->release_agent_work); 780 } 781 782 /* 783 * Notify userspace when a cgroup is released, by running the 784 * configured release agent with the name of the cgroup (path 785 * relative to the root of cgroup file system) as the argument. 786 * 787 * Most likely, this user command will try to rmdir this cgroup. 788 * 789 * This races with the possibility that some other task will be 790 * attached to this cgroup before it is removed, or that some other 791 * user task will 'mkdir' a child cgroup of this cgroup. That's ok. 792 * The presumed 'rmdir' will fail quietly if this cgroup is no longer 793 * unused, and this cgroup will be reprieved from its death sentence, 794 * to continue to serve a useful existence. Next time it's released, 795 * we will get notified again, if it still has 'notify_on_release' set. 796 * 797 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which 798 * means only wait until the task is successfully execve()'d. The 799 * separate release agent task is forked by call_usermodehelper(), 800 * then control in this thread returns here, without waiting for the 801 * release agent task. We don't bother to wait because the caller of 802 * this routine has no use for the exit status of the release agent 803 * task, so no sense holding our caller up for that. 804 */ 805 void cgroup1_release_agent(struct work_struct *work) 806 { 807 struct cgroup *cgrp = 808 container_of(work, struct cgroup, release_agent_work); 809 char *pathbuf = NULL, *agentbuf = NULL; 810 char *argv[3], *envp[3]; 811 int ret; 812 813 mutex_lock(&cgroup_mutex); 814 815 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 816 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); 817 if (!pathbuf || !agentbuf) 818 goto out; 819 820 spin_lock_irq(&css_set_lock); 821 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); 822 spin_unlock_irq(&css_set_lock); 823 if (ret < 0 || ret >= PATH_MAX) 824 goto out; 825 826 argv[0] = agentbuf; 827 argv[1] = pathbuf; 828 argv[2] = NULL; 829 830 /* minimal command environment */ 831 envp[0] = "HOME=/"; 832 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 833 envp[2] = NULL; 834 835 mutex_unlock(&cgroup_mutex); 836 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 837 goto out_free; 838 out: 839 mutex_unlock(&cgroup_mutex); 840 out_free: 841 kfree(agentbuf); 842 kfree(pathbuf); 843 } 844 845 /* 846 * cgroup_rename - Only allow simple rename of directories in place. 847 */ 848 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, 849 const char *new_name_str) 850 { 851 struct cgroup *cgrp = kn->priv; 852 int ret; 853 854 if (kernfs_type(kn) != KERNFS_DIR) 855 return -ENOTDIR; 856 if (kn->parent != new_parent) 857 return -EIO; 858 859 /* 860 * We're gonna grab cgroup_mutex which nests outside kernfs 861 * active_ref. kernfs_rename() doesn't require active_ref 862 * protection. Break them before grabbing cgroup_mutex. 863 */ 864 kernfs_break_active_protection(new_parent); 865 kernfs_break_active_protection(kn); 866 867 mutex_lock(&cgroup_mutex); 868 869 ret = kernfs_rename(kn, new_parent, new_name_str); 870 if (!ret) 871 TRACE_CGROUP_PATH(rename, cgrp); 872 873 mutex_unlock(&cgroup_mutex); 874 875 kernfs_unbreak_active_protection(kn); 876 kernfs_unbreak_active_protection(new_parent); 877 return ret; 878 } 879 880 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 881 { 882 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 883 struct cgroup_subsys *ss; 884 int ssid; 885 886 for_each_subsys(ss, ssid) 887 if (root->subsys_mask & (1 << ssid)) 888 seq_show_option(seq, ss->legacy_name, NULL); 889 if (root->flags & CGRP_ROOT_NOPREFIX) 890 seq_puts(seq, ",noprefix"); 891 if (root->flags & CGRP_ROOT_XATTR) 892 seq_puts(seq, ",xattr"); 893 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE) 894 seq_puts(seq, ",cpuset_v2_mode"); 895 896 spin_lock(&release_agent_path_lock); 897 if (strlen(root->release_agent_path)) 898 seq_show_option(seq, "release_agent", 899 root->release_agent_path); 900 spin_unlock(&release_agent_path_lock); 901 902 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) 903 seq_puts(seq, ",clone_children"); 904 if (strlen(root->name)) 905 seq_show_option(seq, "name", root->name); 906 return 0; 907 } 908 909 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) 910 { 911 char *token, *o = data; 912 bool all_ss = false, one_ss = false; 913 u16 mask = U16_MAX; 914 struct cgroup_subsys *ss; 915 int nr_opts = 0; 916 int i; 917 918 #ifdef CONFIG_CPUSETS 919 mask = ~((u16)1 << cpuset_cgrp_id); 920 #endif 921 922 memset(opts, 0, sizeof(*opts)); 923 924 while ((token = strsep(&o, ",")) != NULL) { 925 nr_opts++; 926 927 if (!*token) 928 return -EINVAL; 929 if (!strcmp(token, "none")) { 930 /* Explicitly have no subsystems */ 931 opts->none = true; 932 continue; 933 } 934 if (!strcmp(token, "all")) { 935 /* Mutually exclusive option 'all' + subsystem name */ 936 if (one_ss) 937 return -EINVAL; 938 all_ss = true; 939 continue; 940 } 941 if (!strcmp(token, "noprefix")) { 942 opts->flags |= CGRP_ROOT_NOPREFIX; 943 continue; 944 } 945 if (!strcmp(token, "clone_children")) { 946 opts->cpuset_clone_children = true; 947 continue; 948 } 949 if (!strcmp(token, "cpuset_v2_mode")) { 950 opts->flags |= CGRP_ROOT_CPUSET_V2_MODE; 951 continue; 952 } 953 if (!strcmp(token, "xattr")) { 954 opts->flags |= CGRP_ROOT_XATTR; 955 continue; 956 } 957 if (!strncmp(token, "release_agent=", 14)) { 958 /* Specifying two release agents is forbidden */ 959 if (opts->release_agent) 960 return -EINVAL; 961 opts->release_agent = 962 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); 963 if (!opts->release_agent) 964 return -ENOMEM; 965 continue; 966 } 967 if (!strncmp(token, "name=", 5)) { 968 const char *name = token + 5; 969 970 /* blocked by boot param? */ 971 if (cgroup_no_v1_named) 972 return -ENOENT; 973 /* Can't specify an empty name */ 974 if (!strlen(name)) 975 return -EINVAL; 976 /* Must match [\w.-]+ */ 977 for (i = 0; i < strlen(name); i++) { 978 char c = name[i]; 979 if (isalnum(c)) 980 continue; 981 if ((c == '.') || (c == '-') || (c == '_')) 982 continue; 983 return -EINVAL; 984 } 985 /* Specifying two names is forbidden */ 986 if (opts->name) 987 return -EINVAL; 988 opts->name = kstrndup(name, 989 MAX_CGROUP_ROOT_NAMELEN - 1, 990 GFP_KERNEL); 991 if (!opts->name) 992 return -ENOMEM; 993 994 continue; 995 } 996 997 for_each_subsys(ss, i) { 998 if (strcmp(token, ss->legacy_name)) 999 continue; 1000 if (!cgroup_ssid_enabled(i)) 1001 continue; 1002 if (cgroup1_ssid_disabled(i)) 1003 continue; 1004 1005 /* Mutually exclusive option 'all' + subsystem name */ 1006 if (all_ss) 1007 return -EINVAL; 1008 opts->subsys_mask |= (1 << i); 1009 one_ss = true; 1010 1011 break; 1012 } 1013 if (i == CGROUP_SUBSYS_COUNT) 1014 return -ENOENT; 1015 } 1016 1017 /* 1018 * If the 'all' option was specified select all the subsystems, 1019 * otherwise if 'none', 'name=' and a subsystem name options were 1020 * not specified, let's default to 'all' 1021 */ 1022 if (all_ss || (!one_ss && !opts->none && !opts->name)) 1023 for_each_subsys(ss, i) 1024 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) 1025 opts->subsys_mask |= (1 << i); 1026 1027 /* 1028 * We either have to specify by name or by subsystems. (So all 1029 * empty hierarchies must have a name). 1030 */ 1031 if (!opts->subsys_mask && !opts->name) 1032 return -EINVAL; 1033 1034 /* 1035 * Option noprefix was introduced just for backward compatibility 1036 * with the old cpuset, so we allow noprefix only if mounting just 1037 * the cpuset subsystem. 1038 */ 1039 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) 1040 return -EINVAL; 1041 1042 /* Can't specify "none" and some subsystems */ 1043 if (opts->subsys_mask && opts->none) 1044 return -EINVAL; 1045 1046 return 0; 1047 } 1048 1049 static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data) 1050 { 1051 int ret = 0; 1052 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 1053 struct cgroup_sb_opts opts; 1054 u16 added_mask, removed_mask; 1055 1056 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1057 1058 /* See what subsystems are wanted */ 1059 ret = parse_cgroupfs_options(data, &opts); 1060 if (ret) 1061 goto out_unlock; 1062 1063 if (opts.subsys_mask != root->subsys_mask || opts.release_agent) 1064 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", 1065 task_tgid_nr(current), current->comm); 1066 1067 added_mask = opts.subsys_mask & ~root->subsys_mask; 1068 removed_mask = root->subsys_mask & ~opts.subsys_mask; 1069 1070 /* Don't allow flags or name to change at remount */ 1071 if ((opts.flags ^ root->flags) || 1072 (opts.name && strcmp(opts.name, root->name))) { 1073 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n", 1074 opts.flags, opts.name ?: "", root->flags, root->name); 1075 ret = -EINVAL; 1076 goto out_unlock; 1077 } 1078 1079 /* remounting is not allowed for populated hierarchies */ 1080 if (!list_empty(&root->cgrp.self.children)) { 1081 ret = -EBUSY; 1082 goto out_unlock; 1083 } 1084 1085 ret = rebind_subsystems(root, added_mask); 1086 if (ret) 1087 goto out_unlock; 1088 1089 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); 1090 1091 if (opts.release_agent) { 1092 spin_lock(&release_agent_path_lock); 1093 strcpy(root->release_agent_path, opts.release_agent); 1094 spin_unlock(&release_agent_path_lock); 1095 } 1096 1097 trace_cgroup_remount(root); 1098 1099 out_unlock: 1100 kfree(opts.release_agent); 1101 kfree(opts.name); 1102 mutex_unlock(&cgroup_mutex); 1103 return ret; 1104 } 1105 1106 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = { 1107 .rename = cgroup1_rename, 1108 .show_options = cgroup1_show_options, 1109 .remount_fs = cgroup1_remount, 1110 .mkdir = cgroup_mkdir, 1111 .rmdir = cgroup_rmdir, 1112 .show_path = cgroup_show_path, 1113 }; 1114 1115 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, 1116 void *data, unsigned long magic, 1117 struct cgroup_namespace *ns) 1118 { 1119 struct super_block *pinned_sb = NULL; 1120 struct cgroup_sb_opts opts; 1121 struct cgroup_root *root; 1122 struct cgroup_subsys *ss; 1123 struct dentry *dentry; 1124 int i, ret; 1125 bool new_root = false; 1126 1127 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1128 1129 /* First find the desired set of subsystems */ 1130 ret = parse_cgroupfs_options(data, &opts); 1131 if (ret) 1132 goto out_unlock; 1133 1134 /* 1135 * Destruction of cgroup root is asynchronous, so subsystems may 1136 * still be dying after the previous unmount. Let's drain the 1137 * dying subsystems. We just need to ensure that the ones 1138 * unmounted previously finish dying and don't care about new ones 1139 * starting. Testing ref liveliness is good enough. 1140 */ 1141 for_each_subsys(ss, i) { 1142 if (!(opts.subsys_mask & (1 << i)) || 1143 ss->root == &cgrp_dfl_root) 1144 continue; 1145 1146 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { 1147 mutex_unlock(&cgroup_mutex); 1148 msleep(10); 1149 ret = restart_syscall(); 1150 goto out_free; 1151 } 1152 cgroup_put(&ss->root->cgrp); 1153 } 1154 1155 for_each_root(root) { 1156 bool name_match = false; 1157 1158 if (root == &cgrp_dfl_root) 1159 continue; 1160 1161 /* 1162 * If we asked for a name then it must match. Also, if 1163 * name matches but sybsys_mask doesn't, we should fail. 1164 * Remember whether name matched. 1165 */ 1166 if (opts.name) { 1167 if (strcmp(opts.name, root->name)) 1168 continue; 1169 name_match = true; 1170 } 1171 1172 /* 1173 * If we asked for subsystems (or explicitly for no 1174 * subsystems) then they must match. 1175 */ 1176 if ((opts.subsys_mask || opts.none) && 1177 (opts.subsys_mask != root->subsys_mask)) { 1178 if (!name_match) 1179 continue; 1180 ret = -EBUSY; 1181 goto out_unlock; 1182 } 1183 1184 if (root->flags ^ opts.flags) 1185 pr_warn("new mount options do not match the existing superblock, will be ignored\n"); 1186 1187 /* 1188 * We want to reuse @root whose lifetime is governed by its 1189 * ->cgrp. Let's check whether @root is alive and keep it 1190 * that way. As cgroup_kill_sb() can happen anytime, we 1191 * want to block it by pinning the sb so that @root doesn't 1192 * get killed before mount is complete. 1193 * 1194 * With the sb pinned, tryget_live can reliably indicate 1195 * whether @root can be reused. If it's being killed, 1196 * drain it. We can use wait_queue for the wait but this 1197 * path is super cold. Let's just sleep a bit and retry. 1198 */ 1199 pinned_sb = kernfs_pin_sb(root->kf_root, NULL); 1200 if (IS_ERR(pinned_sb) || 1201 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { 1202 mutex_unlock(&cgroup_mutex); 1203 if (!IS_ERR_OR_NULL(pinned_sb)) 1204 deactivate_super(pinned_sb); 1205 msleep(10); 1206 ret = restart_syscall(); 1207 goto out_free; 1208 } 1209 1210 ret = 0; 1211 goto out_unlock; 1212 } 1213 1214 /* 1215 * No such thing, create a new one. name= matching without subsys 1216 * specification is allowed for already existing hierarchies but we 1217 * can't create new one without subsys specification. 1218 */ 1219 if (!opts.subsys_mask && !opts.none) { 1220 ret = -EINVAL; 1221 goto out_unlock; 1222 } 1223 1224 /* Hierarchies may only be created in the initial cgroup namespace. */ 1225 if (ns != &init_cgroup_ns) { 1226 ret = -EPERM; 1227 goto out_unlock; 1228 } 1229 1230 root = kzalloc(sizeof(*root), GFP_KERNEL); 1231 if (!root) { 1232 ret = -ENOMEM; 1233 goto out_unlock; 1234 } 1235 new_root = true; 1236 1237 init_cgroup_root(root, &opts); 1238 1239 ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD); 1240 if (ret) 1241 cgroup_free_root(root); 1242 1243 out_unlock: 1244 mutex_unlock(&cgroup_mutex); 1245 out_free: 1246 kfree(opts.release_agent); 1247 kfree(opts.name); 1248 1249 if (ret) 1250 return ERR_PTR(ret); 1251 1252 dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, 1253 CGROUP_SUPER_MAGIC, ns); 1254 1255 /* 1256 * There's a race window after we release cgroup_mutex and before 1257 * allocating a superblock. Make sure a concurrent process won't 1258 * be able to re-use the root during this window by delaying the 1259 * initialization of root refcnt. 1260 */ 1261 if (new_root) { 1262 mutex_lock(&cgroup_mutex); 1263 percpu_ref_reinit(&root->cgrp.self.refcnt); 1264 mutex_unlock(&cgroup_mutex); 1265 } 1266 1267 /* 1268 * If @pinned_sb, we're reusing an existing root and holding an 1269 * extra ref on its sb. Mount is complete. Put the extra ref. 1270 */ 1271 if (pinned_sb) 1272 deactivate_super(pinned_sb); 1273 1274 return dentry; 1275 } 1276 1277 static int __init cgroup1_wq_init(void) 1278 { 1279 /* 1280 * Used to destroy pidlists and separate to serve as flush domain. 1281 * Cap @max_active to 1 too. 1282 */ 1283 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", 1284 0, 1); 1285 BUG_ON(!cgroup_pidlist_destroy_wq); 1286 return 0; 1287 } 1288 core_initcall(cgroup1_wq_init); 1289 1290 static int __init cgroup_no_v1(char *str) 1291 { 1292 struct cgroup_subsys *ss; 1293 char *token; 1294 int i; 1295 1296 while ((token = strsep(&str, ",")) != NULL) { 1297 if (!*token) 1298 continue; 1299 1300 if (!strcmp(token, "all")) { 1301 cgroup_no_v1_mask = U16_MAX; 1302 continue; 1303 } 1304 1305 if (!strcmp(token, "named")) { 1306 cgroup_no_v1_named = true; 1307 continue; 1308 } 1309 1310 for_each_subsys(ss, i) { 1311 if (strcmp(token, ss->name) && 1312 strcmp(token, ss->legacy_name)) 1313 continue; 1314 1315 cgroup_no_v1_mask |= 1 << i; 1316 } 1317 } 1318 return 1; 1319 } 1320 __setup("cgroup_no_v1=", cgroup_no_v1); 1321