1 // SPDX-License-Identifier: GPL-2.0-only 2 #include "cgroup-internal.h" 3 4 #include <linux/ctype.h> 5 #include <linux/kmod.h> 6 #include <linux/sort.h> 7 #include <linux/delay.h> 8 #include <linux/mm.h> 9 #include <linux/sched/signal.h> 10 #include <linux/sched/task.h> 11 #include <linux/magic.h> 12 #include <linux/slab.h> 13 #include <linux/vmalloc.h> 14 #include <linux/delayacct.h> 15 #include <linux/pid_namespace.h> 16 #include <linux/cgroupstats.h> 17 #include <linux/fs_parser.h> 18 19 #include <trace/events/cgroup.h> 20 21 /* 22 * pidlists linger the following amount before being destroyed. The goal 23 * is avoiding frequent destruction in the middle of consecutive read calls 24 * Expiring in the middle is a performance problem not a correctness one. 25 * 1 sec should be enough. 26 */ 27 #define CGROUP_PIDLIST_DESTROY_DELAY HZ 28 29 /* Controllers blocked by the commandline in v1 */ 30 static u16 cgroup_no_v1_mask; 31 32 /* disable named v1 mounts */ 33 static bool cgroup_no_v1_named; 34 35 /* 36 * pidlist destructions need to be flushed on cgroup destruction. Use a 37 * separate workqueue as flush domain. 38 */ 39 static struct workqueue_struct *cgroup_pidlist_destroy_wq; 40 41 /* protects cgroup_subsys->release_agent_path */ 42 static DEFINE_SPINLOCK(release_agent_path_lock); 43 44 bool cgroup1_ssid_disabled(int ssid) 45 { 46 return cgroup_no_v1_mask & (1 << ssid); 47 } 48 49 /** 50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' 51 * @from: attach to all cgroups of a given task 52 * @tsk: the task to be attached 53 * 54 * Return: %0 on success or a negative errno code on failure 55 */ 56 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) 57 { 58 struct cgroup_root *root; 59 int retval = 0; 60 61 mutex_lock(&cgroup_mutex); 62 cpus_read_lock(); 63 percpu_down_write(&cgroup_threadgroup_rwsem); 64 for_each_root(root) { 65 struct cgroup *from_cgrp; 66 67 spin_lock_irq(&css_set_lock); 68 from_cgrp = task_cgroup_from_root(from, root); 69 spin_unlock_irq(&css_set_lock); 70 71 retval = cgroup_attach_task(from_cgrp, tsk, false); 72 if (retval) 73 break; 74 } 75 percpu_up_write(&cgroup_threadgroup_rwsem); 76 cpus_read_unlock(); 77 mutex_unlock(&cgroup_mutex); 78 79 return retval; 80 } 81 EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 82 83 /** 84 * cgroup_transfer_tasks - move tasks from one cgroup to another 85 * @to: cgroup to which the tasks will be moved 86 * @from: cgroup in which the tasks currently reside 87 * 88 * Locking rules between cgroup_post_fork() and the migration path 89 * guarantee that, if a task is forking while being migrated, the new child 90 * is guaranteed to be either visible in the source cgroup after the 91 * parent's migration is complete or put into the target cgroup. No task 92 * can slip out of migration through forking. 93 * 94 * Return: %0 on success or a negative errno code on failure 95 */ 96 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) 97 { 98 DEFINE_CGROUP_MGCTX(mgctx); 99 struct cgrp_cset_link *link; 100 struct css_task_iter it; 101 struct task_struct *task; 102 int ret; 103 104 if (cgroup_on_dfl(to)) 105 return -EINVAL; 106 107 ret = cgroup_migrate_vet_dst(to); 108 if (ret) 109 return ret; 110 111 mutex_lock(&cgroup_mutex); 112 113 percpu_down_write(&cgroup_threadgroup_rwsem); 114 115 /* all tasks in @from are being moved, all csets are source */ 116 spin_lock_irq(&css_set_lock); 117 list_for_each_entry(link, &from->cset_links, cset_link) 118 cgroup_migrate_add_src(link->cset, to, &mgctx); 119 spin_unlock_irq(&css_set_lock); 120 121 ret = cgroup_migrate_prepare_dst(&mgctx); 122 if (ret) 123 goto out_err; 124 125 /* 126 * Migrate tasks one-by-one until @from is empty. This fails iff 127 * ->can_attach() fails. 128 */ 129 do { 130 css_task_iter_start(&from->self, 0, &it); 131 132 do { 133 task = css_task_iter_next(&it); 134 } while (task && (task->flags & PF_EXITING)); 135 136 if (task) 137 get_task_struct(task); 138 css_task_iter_end(&it); 139 140 if (task) { 141 ret = cgroup_migrate(task, false, &mgctx); 142 if (!ret) 143 TRACE_CGROUP_PATH(transfer_tasks, to, task, false); 144 put_task_struct(task); 145 } 146 } while (task && !ret); 147 out_err: 148 cgroup_migrate_finish(&mgctx); 149 percpu_up_write(&cgroup_threadgroup_rwsem); 150 mutex_unlock(&cgroup_mutex); 151 return ret; 152 } 153 154 /* 155 * Stuff for reading the 'tasks'/'procs' files. 156 * 157 * Reading this file can return large amounts of data if a cgroup has 158 * *lots* of attached tasks. So it may need several calls to read(), 159 * but we cannot guarantee that the information we produce is correct 160 * unless we produce it entirely atomically. 161 * 162 */ 163 164 /* which pidlist file are we talking about? */ 165 enum cgroup_filetype { 166 CGROUP_FILE_PROCS, 167 CGROUP_FILE_TASKS, 168 }; 169 170 /* 171 * A pidlist is a list of pids that virtually represents the contents of one 172 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, 173 * a pair (one each for procs, tasks) for each pid namespace that's relevant 174 * to the cgroup. 175 */ 176 struct cgroup_pidlist { 177 /* 178 * used to find which pidlist is wanted. doesn't change as long as 179 * this particular list stays in the list. 180 */ 181 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; 182 /* array of xids */ 183 pid_t *list; 184 /* how many elements the above list has */ 185 int length; 186 /* each of these stored in a list by its cgroup */ 187 struct list_head links; 188 /* pointer to the cgroup we belong to, for list removal purposes */ 189 struct cgroup *owner; 190 /* for delayed destruction */ 191 struct delayed_work destroy_dwork; 192 }; 193 194 /* 195 * Used to destroy all pidlists lingering waiting for destroy timer. None 196 * should be left afterwards. 197 */ 198 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) 199 { 200 struct cgroup_pidlist *l, *tmp_l; 201 202 mutex_lock(&cgrp->pidlist_mutex); 203 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) 204 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); 205 mutex_unlock(&cgrp->pidlist_mutex); 206 207 flush_workqueue(cgroup_pidlist_destroy_wq); 208 BUG_ON(!list_empty(&cgrp->pidlists)); 209 } 210 211 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) 212 { 213 struct delayed_work *dwork = to_delayed_work(work); 214 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, 215 destroy_dwork); 216 struct cgroup_pidlist *tofree = NULL; 217 218 mutex_lock(&l->owner->pidlist_mutex); 219 220 /* 221 * Destroy iff we didn't get queued again. The state won't change 222 * as destroy_dwork can only be queued while locked. 223 */ 224 if (!delayed_work_pending(dwork)) { 225 list_del(&l->links); 226 kvfree(l->list); 227 put_pid_ns(l->key.ns); 228 tofree = l; 229 } 230 231 mutex_unlock(&l->owner->pidlist_mutex); 232 kfree(tofree); 233 } 234 235 /* 236 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries 237 * Returns the number of unique elements. 238 */ 239 static int pidlist_uniq(pid_t *list, int length) 240 { 241 int src, dest = 1; 242 243 /* 244 * we presume the 0th element is unique, so i starts at 1. trivial 245 * edge cases first; no work needs to be done for either 246 */ 247 if (length == 0 || length == 1) 248 return length; 249 /* src and dest walk down the list; dest counts unique elements */ 250 for (src = 1; src < length; src++) { 251 /* find next unique element */ 252 while (list[src] == list[src-1]) { 253 src++; 254 if (src == length) 255 goto after; 256 } 257 /* dest always points to where the next unique element goes */ 258 list[dest] = list[src]; 259 dest++; 260 } 261 after: 262 return dest; 263 } 264 265 /* 266 * The two pid files - task and cgroup.procs - guaranteed that the result 267 * is sorted, which forced this whole pidlist fiasco. As pid order is 268 * different per namespace, each namespace needs differently sorted list, 269 * making it impossible to use, for example, single rbtree of member tasks 270 * sorted by task pointer. As pidlists can be fairly large, allocating one 271 * per open file is dangerous, so cgroup had to implement shared pool of 272 * pidlists keyed by cgroup and namespace. 273 */ 274 static int cmppid(const void *a, const void *b) 275 { 276 return *(pid_t *)a - *(pid_t *)b; 277 } 278 279 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, 280 enum cgroup_filetype type) 281 { 282 struct cgroup_pidlist *l; 283 /* don't need task_nsproxy() if we're looking at ourself */ 284 struct pid_namespace *ns = task_active_pid_ns(current); 285 286 lockdep_assert_held(&cgrp->pidlist_mutex); 287 288 list_for_each_entry(l, &cgrp->pidlists, links) 289 if (l->key.type == type && l->key.ns == ns) 290 return l; 291 return NULL; 292 } 293 294 /* 295 * find the appropriate pidlist for our purpose (given procs vs tasks) 296 * returns with the lock on that pidlist already held, and takes care 297 * of the use count, or returns NULL with no locks held if we're out of 298 * memory. 299 */ 300 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, 301 enum cgroup_filetype type) 302 { 303 struct cgroup_pidlist *l; 304 305 lockdep_assert_held(&cgrp->pidlist_mutex); 306 307 l = cgroup_pidlist_find(cgrp, type); 308 if (l) 309 return l; 310 311 /* entry not found; create a new one */ 312 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); 313 if (!l) 314 return l; 315 316 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); 317 l->key.type = type; 318 /* don't need task_nsproxy() if we're looking at ourself */ 319 l->key.ns = get_pid_ns(task_active_pid_ns(current)); 320 l->owner = cgrp; 321 list_add(&l->links, &cgrp->pidlists); 322 return l; 323 } 324 325 /* 326 * Load a cgroup's pidarray with either procs' tgids or tasks' pids 327 */ 328 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, 329 struct cgroup_pidlist **lp) 330 { 331 pid_t *array; 332 int length; 333 int pid, n = 0; /* used for populating the array */ 334 struct css_task_iter it; 335 struct task_struct *tsk; 336 struct cgroup_pidlist *l; 337 338 lockdep_assert_held(&cgrp->pidlist_mutex); 339 340 /* 341 * If cgroup gets more users after we read count, we won't have 342 * enough space - tough. This race is indistinguishable to the 343 * caller from the case that the additional cgroup users didn't 344 * show up until sometime later on. 345 */ 346 length = cgroup_task_count(cgrp); 347 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL); 348 if (!array) 349 return -ENOMEM; 350 /* now, populate the array */ 351 css_task_iter_start(&cgrp->self, 0, &it); 352 while ((tsk = css_task_iter_next(&it))) { 353 if (unlikely(n == length)) 354 break; 355 /* get tgid or pid for procs or tasks file respectively */ 356 if (type == CGROUP_FILE_PROCS) 357 pid = task_tgid_vnr(tsk); 358 else 359 pid = task_pid_vnr(tsk); 360 if (pid > 0) /* make sure to only use valid results */ 361 array[n++] = pid; 362 } 363 css_task_iter_end(&it); 364 length = n; 365 /* now sort & (if procs) strip out duplicates */ 366 sort(array, length, sizeof(pid_t), cmppid, NULL); 367 if (type == CGROUP_FILE_PROCS) 368 length = pidlist_uniq(array, length); 369 370 l = cgroup_pidlist_find_create(cgrp, type); 371 if (!l) { 372 kvfree(array); 373 return -ENOMEM; 374 } 375 376 /* store array, freeing old if necessary */ 377 kvfree(l->list); 378 l->list = array; 379 l->length = length; 380 *lp = l; 381 return 0; 382 } 383 384 /* 385 * seq_file methods for the tasks/procs files. The seq_file position is the 386 * next pid to display; the seq_file iterator is a pointer to the pid 387 * in the cgroup->l->list array. 388 */ 389 390 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) 391 { 392 /* 393 * Initially we receive a position value that corresponds to 394 * one more than the last pid shown (or 0 on the first call or 395 * after a seek to the start). Use a binary-search to find the 396 * next pid to display, if any 397 */ 398 struct kernfs_open_file *of = s->private; 399 struct cgroup_file_ctx *ctx = of->priv; 400 struct cgroup *cgrp = seq_css(s)->cgroup; 401 struct cgroup_pidlist *l; 402 enum cgroup_filetype type = seq_cft(s)->private; 403 int index = 0, pid = *pos; 404 int *iter, ret; 405 406 mutex_lock(&cgrp->pidlist_mutex); 407 408 /* 409 * !NULL @ctx->procs1.pidlist indicates that this isn't the first 410 * start() after open. If the matching pidlist is around, we can use 411 * that. Look for it. Note that @ctx->procs1.pidlist can't be used 412 * directly. It could already have been destroyed. 413 */ 414 if (ctx->procs1.pidlist) 415 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type); 416 417 /* 418 * Either this is the first start() after open or the matching 419 * pidlist has been destroyed inbetween. Create a new one. 420 */ 421 if (!ctx->procs1.pidlist) { 422 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist); 423 if (ret) 424 return ERR_PTR(ret); 425 } 426 l = ctx->procs1.pidlist; 427 428 if (pid) { 429 int end = l->length; 430 431 while (index < end) { 432 int mid = (index + end) / 2; 433 if (l->list[mid] == pid) { 434 index = mid; 435 break; 436 } else if (l->list[mid] <= pid) 437 index = mid + 1; 438 else 439 end = mid; 440 } 441 } 442 /* If we're off the end of the array, we're done */ 443 if (index >= l->length) 444 return NULL; 445 /* Update the abstract position to be the actual pid that we found */ 446 iter = l->list + index; 447 *pos = *iter; 448 return iter; 449 } 450 451 static void cgroup_pidlist_stop(struct seq_file *s, void *v) 452 { 453 struct kernfs_open_file *of = s->private; 454 struct cgroup_file_ctx *ctx = of->priv; 455 struct cgroup_pidlist *l = ctx->procs1.pidlist; 456 457 if (l) 458 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 459 CGROUP_PIDLIST_DESTROY_DELAY); 460 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); 461 } 462 463 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) 464 { 465 struct kernfs_open_file *of = s->private; 466 struct cgroup_file_ctx *ctx = of->priv; 467 struct cgroup_pidlist *l = ctx->procs1.pidlist; 468 pid_t *p = v; 469 pid_t *end = l->list + l->length; 470 /* 471 * Advance to the next pid in the array. If this goes off the 472 * end, we're done 473 */ 474 p++; 475 if (p >= end) { 476 (*pos)++; 477 return NULL; 478 } else { 479 *pos = *p; 480 return p; 481 } 482 } 483 484 static int cgroup_pidlist_show(struct seq_file *s, void *v) 485 { 486 seq_printf(s, "%d\n", *(int *)v); 487 488 return 0; 489 } 490 491 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, 492 char *buf, size_t nbytes, loff_t off, 493 bool threadgroup) 494 { 495 struct cgroup *cgrp; 496 struct task_struct *task; 497 const struct cred *cred, *tcred; 498 ssize_t ret; 499 bool locked; 500 501 cgrp = cgroup_kn_lock_live(of->kn, false); 502 if (!cgrp) 503 return -ENODEV; 504 505 task = cgroup_procs_write_start(buf, threadgroup, &locked); 506 ret = PTR_ERR_OR_ZERO(task); 507 if (ret) 508 goto out_unlock; 509 510 /* 511 * Even if we're attaching all tasks in the thread group, we only need 512 * to check permissions on one of them. Check permissions using the 513 * credentials from file open to protect against inherited fd attacks. 514 */ 515 cred = of->file->f_cred; 516 tcred = get_task_cred(task); 517 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 518 !uid_eq(cred->euid, tcred->uid) && 519 !uid_eq(cred->euid, tcred->suid)) 520 ret = -EACCES; 521 put_cred(tcred); 522 if (ret) 523 goto out_finish; 524 525 ret = cgroup_attach_task(cgrp, task, threadgroup); 526 527 out_finish: 528 cgroup_procs_write_finish(task, locked); 529 out_unlock: 530 cgroup_kn_unlock(of->kn); 531 532 return ret ?: nbytes; 533 } 534 535 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of, 536 char *buf, size_t nbytes, loff_t off) 537 { 538 return __cgroup1_procs_write(of, buf, nbytes, off, true); 539 } 540 541 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of, 542 char *buf, size_t nbytes, loff_t off) 543 { 544 return __cgroup1_procs_write(of, buf, nbytes, off, false); 545 } 546 547 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, 548 char *buf, size_t nbytes, loff_t off) 549 { 550 struct cgroup *cgrp; 551 struct cgroup_file_ctx *ctx; 552 553 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); 554 555 /* 556 * Release agent gets called with all capabilities, 557 * require capabilities to set release agent. 558 */ 559 ctx = of->priv; 560 if ((ctx->ns->user_ns != &init_user_ns) || 561 !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN)) 562 return -EPERM; 563 564 cgrp = cgroup_kn_lock_live(of->kn, false); 565 if (!cgrp) 566 return -ENODEV; 567 spin_lock(&release_agent_path_lock); 568 strlcpy(cgrp->root->release_agent_path, strstrip(buf), 569 sizeof(cgrp->root->release_agent_path)); 570 spin_unlock(&release_agent_path_lock); 571 cgroup_kn_unlock(of->kn); 572 return nbytes; 573 } 574 575 static int cgroup_release_agent_show(struct seq_file *seq, void *v) 576 { 577 struct cgroup *cgrp = seq_css(seq)->cgroup; 578 579 spin_lock(&release_agent_path_lock); 580 seq_puts(seq, cgrp->root->release_agent_path); 581 spin_unlock(&release_agent_path_lock); 582 seq_putc(seq, '\n'); 583 return 0; 584 } 585 586 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) 587 { 588 seq_puts(seq, "0\n"); 589 return 0; 590 } 591 592 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, 593 struct cftype *cft) 594 { 595 return notify_on_release(css->cgroup); 596 } 597 598 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, 599 struct cftype *cft, u64 val) 600 { 601 if (val) 602 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 603 else 604 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 605 return 0; 606 } 607 608 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, 609 struct cftype *cft) 610 { 611 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 612 } 613 614 static int cgroup_clone_children_write(struct cgroup_subsys_state *css, 615 struct cftype *cft, u64 val) 616 { 617 if (val) 618 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 619 else 620 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 621 return 0; 622 } 623 624 /* cgroup core interface files for the legacy hierarchies */ 625 struct cftype cgroup1_base_files[] = { 626 { 627 .name = "cgroup.procs", 628 .seq_start = cgroup_pidlist_start, 629 .seq_next = cgroup_pidlist_next, 630 .seq_stop = cgroup_pidlist_stop, 631 .seq_show = cgroup_pidlist_show, 632 .private = CGROUP_FILE_PROCS, 633 .write = cgroup1_procs_write, 634 }, 635 { 636 .name = "cgroup.clone_children", 637 .read_u64 = cgroup_clone_children_read, 638 .write_u64 = cgroup_clone_children_write, 639 }, 640 { 641 .name = "cgroup.sane_behavior", 642 .flags = CFTYPE_ONLY_ON_ROOT, 643 .seq_show = cgroup_sane_behavior_show, 644 }, 645 { 646 .name = "tasks", 647 .seq_start = cgroup_pidlist_start, 648 .seq_next = cgroup_pidlist_next, 649 .seq_stop = cgroup_pidlist_stop, 650 .seq_show = cgroup_pidlist_show, 651 .private = CGROUP_FILE_TASKS, 652 .write = cgroup1_tasks_write, 653 }, 654 { 655 .name = "notify_on_release", 656 .read_u64 = cgroup_read_notify_on_release, 657 .write_u64 = cgroup_write_notify_on_release, 658 }, 659 { 660 .name = "release_agent", 661 .flags = CFTYPE_ONLY_ON_ROOT, 662 .seq_show = cgroup_release_agent_show, 663 .write = cgroup_release_agent_write, 664 .max_write_len = PATH_MAX - 1, 665 }, 666 { } /* terminate */ 667 }; 668 669 /* Display information about each subsystem and each hierarchy */ 670 int proc_cgroupstats_show(struct seq_file *m, void *v) 671 { 672 struct cgroup_subsys *ss; 673 int i; 674 675 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); 676 /* 677 * Grab the subsystems state racily. No need to add avenue to 678 * cgroup_mutex contention. 679 */ 680 681 for_each_subsys(ss, i) 682 seq_printf(m, "%s\t%d\t%d\t%d\n", 683 ss->legacy_name, ss->root->hierarchy_id, 684 atomic_read(&ss->root->nr_cgrps), 685 cgroup_ssid_enabled(i)); 686 687 return 0; 688 } 689 690 /** 691 * cgroupstats_build - build and fill cgroupstats 692 * @stats: cgroupstats to fill information into 693 * @dentry: A dentry entry belonging to the cgroup for which stats have 694 * been requested. 695 * 696 * Build and fill cgroupstats so that taskstats can export it to user 697 * space. 698 * 699 * Return: %0 on success or a negative errno code on failure 700 */ 701 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) 702 { 703 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 704 struct cgroup *cgrp; 705 struct css_task_iter it; 706 struct task_struct *tsk; 707 708 /* it should be kernfs_node belonging to cgroupfs and is a directory */ 709 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || 710 kernfs_type(kn) != KERNFS_DIR) 711 return -EINVAL; 712 713 /* 714 * We aren't being called from kernfs and there's no guarantee on 715 * @kn->priv's validity. For this and css_tryget_online_from_dir(), 716 * @kn->priv is RCU safe. Let's do the RCU dancing. 717 */ 718 rcu_read_lock(); 719 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 720 if (!cgrp || !cgroup_tryget(cgrp)) { 721 rcu_read_unlock(); 722 return -ENOENT; 723 } 724 rcu_read_unlock(); 725 726 css_task_iter_start(&cgrp->self, 0, &it); 727 while ((tsk = css_task_iter_next(&it))) { 728 switch (READ_ONCE(tsk->__state)) { 729 case TASK_RUNNING: 730 stats->nr_running++; 731 break; 732 case TASK_INTERRUPTIBLE: 733 stats->nr_sleeping++; 734 break; 735 case TASK_UNINTERRUPTIBLE: 736 stats->nr_uninterruptible++; 737 break; 738 case TASK_STOPPED: 739 stats->nr_stopped++; 740 break; 741 default: 742 if (tsk->in_iowait) 743 stats->nr_io_wait++; 744 break; 745 } 746 } 747 css_task_iter_end(&it); 748 749 cgroup_put(cgrp); 750 return 0; 751 } 752 753 void cgroup1_check_for_release(struct cgroup *cgrp) 754 { 755 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && 756 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) 757 schedule_work(&cgrp->release_agent_work); 758 } 759 760 /* 761 * Notify userspace when a cgroup is released, by running the 762 * configured release agent with the name of the cgroup (path 763 * relative to the root of cgroup file system) as the argument. 764 * 765 * Most likely, this user command will try to rmdir this cgroup. 766 * 767 * This races with the possibility that some other task will be 768 * attached to this cgroup before it is removed, or that some other 769 * user task will 'mkdir' a child cgroup of this cgroup. That's ok. 770 * The presumed 'rmdir' will fail quietly if this cgroup is no longer 771 * unused, and this cgroup will be reprieved from its death sentence, 772 * to continue to serve a useful existence. Next time it's released, 773 * we will get notified again, if it still has 'notify_on_release' set. 774 * 775 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which 776 * means only wait until the task is successfully execve()'d. The 777 * separate release agent task is forked by call_usermodehelper(), 778 * then control in this thread returns here, without waiting for the 779 * release agent task. We don't bother to wait because the caller of 780 * this routine has no use for the exit status of the release agent 781 * task, so no sense holding our caller up for that. 782 */ 783 void cgroup1_release_agent(struct work_struct *work) 784 { 785 struct cgroup *cgrp = 786 container_of(work, struct cgroup, release_agent_work); 787 char *pathbuf, *agentbuf; 788 char *argv[3], *envp[3]; 789 int ret; 790 791 /* snoop agent path and exit early if empty */ 792 if (!cgrp->root->release_agent_path[0]) 793 return; 794 795 /* prepare argument buffers */ 796 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 797 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL); 798 if (!pathbuf || !agentbuf) 799 goto out_free; 800 801 spin_lock(&release_agent_path_lock); 802 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX); 803 spin_unlock(&release_agent_path_lock); 804 if (!agentbuf[0]) 805 goto out_free; 806 807 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); 808 if (ret < 0 || ret >= PATH_MAX) 809 goto out_free; 810 811 argv[0] = agentbuf; 812 argv[1] = pathbuf; 813 argv[2] = NULL; 814 815 /* minimal command environment */ 816 envp[0] = "HOME=/"; 817 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 818 envp[2] = NULL; 819 820 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 821 out_free: 822 kfree(agentbuf); 823 kfree(pathbuf); 824 } 825 826 /* 827 * cgroup_rename - Only allow simple rename of directories in place. 828 */ 829 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, 830 const char *new_name_str) 831 { 832 struct cgroup *cgrp = kn->priv; 833 int ret; 834 835 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ 836 if (strchr(new_name_str, '\n')) 837 return -EINVAL; 838 839 if (kernfs_type(kn) != KERNFS_DIR) 840 return -ENOTDIR; 841 if (kn->parent != new_parent) 842 return -EIO; 843 844 /* 845 * We're gonna grab cgroup_mutex which nests outside kernfs 846 * active_ref. kernfs_rename() doesn't require active_ref 847 * protection. Break them before grabbing cgroup_mutex. 848 */ 849 kernfs_break_active_protection(new_parent); 850 kernfs_break_active_protection(kn); 851 852 mutex_lock(&cgroup_mutex); 853 854 ret = kernfs_rename(kn, new_parent, new_name_str); 855 if (!ret) 856 TRACE_CGROUP_PATH(rename, cgrp); 857 858 mutex_unlock(&cgroup_mutex); 859 860 kernfs_unbreak_active_protection(kn); 861 kernfs_unbreak_active_protection(new_parent); 862 return ret; 863 } 864 865 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 866 { 867 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 868 struct cgroup_subsys *ss; 869 int ssid; 870 871 for_each_subsys(ss, ssid) 872 if (root->subsys_mask & (1 << ssid)) 873 seq_show_option(seq, ss->legacy_name, NULL); 874 if (root->flags & CGRP_ROOT_NOPREFIX) 875 seq_puts(seq, ",noprefix"); 876 if (root->flags & CGRP_ROOT_XATTR) 877 seq_puts(seq, ",xattr"); 878 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE) 879 seq_puts(seq, ",cpuset_v2_mode"); 880 if (root->flags & CGRP_ROOT_FAVOR_DYNMODS) 881 seq_puts(seq, ",favordynmods"); 882 883 spin_lock(&release_agent_path_lock); 884 if (strlen(root->release_agent_path)) 885 seq_show_option(seq, "release_agent", 886 root->release_agent_path); 887 spin_unlock(&release_agent_path_lock); 888 889 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) 890 seq_puts(seq, ",clone_children"); 891 if (strlen(root->name)) 892 seq_show_option(seq, "name", root->name); 893 return 0; 894 } 895 896 enum cgroup1_param { 897 Opt_all, 898 Opt_clone_children, 899 Opt_cpuset_v2_mode, 900 Opt_name, 901 Opt_none, 902 Opt_noprefix, 903 Opt_release_agent, 904 Opt_xattr, 905 Opt_favordynmods, 906 Opt_nofavordynmods, 907 }; 908 909 const struct fs_parameter_spec cgroup1_fs_parameters[] = { 910 fsparam_flag ("all", Opt_all), 911 fsparam_flag ("clone_children", Opt_clone_children), 912 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode), 913 fsparam_string("name", Opt_name), 914 fsparam_flag ("none", Opt_none), 915 fsparam_flag ("noprefix", Opt_noprefix), 916 fsparam_string("release_agent", Opt_release_agent), 917 fsparam_flag ("xattr", Opt_xattr), 918 fsparam_flag ("favordynmods", Opt_favordynmods), 919 fsparam_flag ("nofavordynmods", Opt_nofavordynmods), 920 {} 921 }; 922 923 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) 924 { 925 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 926 struct cgroup_subsys *ss; 927 struct fs_parse_result result; 928 int opt, i; 929 930 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result); 931 if (opt == -ENOPARAM) { 932 int ret; 933 934 ret = vfs_parse_fs_param_source(fc, param); 935 if (ret != -ENOPARAM) 936 return ret; 937 for_each_subsys(ss, i) { 938 if (strcmp(param->key, ss->legacy_name)) 939 continue; 940 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i)) 941 return invalfc(fc, "Disabled controller '%s'", 942 param->key); 943 ctx->subsys_mask |= (1 << i); 944 return 0; 945 } 946 return invalfc(fc, "Unknown subsys name '%s'", param->key); 947 } 948 if (opt < 0) 949 return opt; 950 951 switch (opt) { 952 case Opt_none: 953 /* Explicitly have no subsystems */ 954 ctx->none = true; 955 break; 956 case Opt_all: 957 ctx->all_ss = true; 958 break; 959 case Opt_noprefix: 960 ctx->flags |= CGRP_ROOT_NOPREFIX; 961 break; 962 case Opt_clone_children: 963 ctx->cpuset_clone_children = true; 964 break; 965 case Opt_cpuset_v2_mode: 966 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE; 967 break; 968 case Opt_xattr: 969 ctx->flags |= CGRP_ROOT_XATTR; 970 break; 971 case Opt_favordynmods: 972 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS; 973 break; 974 case Opt_nofavordynmods: 975 ctx->flags &= ~CGRP_ROOT_FAVOR_DYNMODS; 976 break; 977 case Opt_release_agent: 978 /* Specifying two release agents is forbidden */ 979 if (ctx->release_agent) 980 return invalfc(fc, "release_agent respecified"); 981 /* 982 * Release agent gets called with all capabilities, 983 * require capabilities to set release agent. 984 */ 985 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) 986 return invalfc(fc, "Setting release_agent not allowed"); 987 ctx->release_agent = param->string; 988 param->string = NULL; 989 break; 990 case Opt_name: 991 /* blocked by boot param? */ 992 if (cgroup_no_v1_named) 993 return -ENOENT; 994 /* Can't specify an empty name */ 995 if (!param->size) 996 return invalfc(fc, "Empty name"); 997 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1) 998 return invalfc(fc, "Name too long"); 999 /* Must match [\w.-]+ */ 1000 for (i = 0; i < param->size; i++) { 1001 char c = param->string[i]; 1002 if (isalnum(c)) 1003 continue; 1004 if ((c == '.') || (c == '-') || (c == '_')) 1005 continue; 1006 return invalfc(fc, "Invalid name"); 1007 } 1008 /* Specifying two names is forbidden */ 1009 if (ctx->name) 1010 return invalfc(fc, "name respecified"); 1011 ctx->name = param->string; 1012 param->string = NULL; 1013 break; 1014 } 1015 return 0; 1016 } 1017 1018 static int check_cgroupfs_options(struct fs_context *fc) 1019 { 1020 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1021 u16 mask = U16_MAX; 1022 u16 enabled = 0; 1023 struct cgroup_subsys *ss; 1024 int i; 1025 1026 #ifdef CONFIG_CPUSETS 1027 mask = ~((u16)1 << cpuset_cgrp_id); 1028 #endif 1029 for_each_subsys(ss, i) 1030 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) 1031 enabled |= 1 << i; 1032 1033 ctx->subsys_mask &= enabled; 1034 1035 /* 1036 * In absence of 'none', 'name=' and subsystem name options, 1037 * let's default to 'all'. 1038 */ 1039 if (!ctx->subsys_mask && !ctx->none && !ctx->name) 1040 ctx->all_ss = true; 1041 1042 if (ctx->all_ss) { 1043 /* Mutually exclusive option 'all' + subsystem name */ 1044 if (ctx->subsys_mask) 1045 return invalfc(fc, "subsys name conflicts with all"); 1046 /* 'all' => select all the subsystems */ 1047 ctx->subsys_mask = enabled; 1048 } 1049 1050 /* 1051 * We either have to specify by name or by subsystems. (So all 1052 * empty hierarchies must have a name). 1053 */ 1054 if (!ctx->subsys_mask && !ctx->name) 1055 return invalfc(fc, "Need name or subsystem set"); 1056 1057 /* 1058 * Option noprefix was introduced just for backward compatibility 1059 * with the old cpuset, so we allow noprefix only if mounting just 1060 * the cpuset subsystem. 1061 */ 1062 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask)) 1063 return invalfc(fc, "noprefix used incorrectly"); 1064 1065 /* Can't specify "none" and some subsystems */ 1066 if (ctx->subsys_mask && ctx->none) 1067 return invalfc(fc, "none used incorrectly"); 1068 1069 return 0; 1070 } 1071 1072 int cgroup1_reconfigure(struct fs_context *fc) 1073 { 1074 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1075 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb); 1076 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 1077 int ret = 0; 1078 u16 added_mask, removed_mask; 1079 1080 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1081 1082 /* See what subsystems are wanted */ 1083 ret = check_cgroupfs_options(fc); 1084 if (ret) 1085 goto out_unlock; 1086 1087 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent) 1088 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", 1089 task_tgid_nr(current), current->comm); 1090 1091 added_mask = ctx->subsys_mask & ~root->subsys_mask; 1092 removed_mask = root->subsys_mask & ~ctx->subsys_mask; 1093 1094 /* Don't allow flags or name to change at remount */ 1095 if ((ctx->flags ^ root->flags) || 1096 (ctx->name && strcmp(ctx->name, root->name))) { 1097 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"", 1098 ctx->flags, ctx->name ?: "", root->flags, root->name); 1099 ret = -EINVAL; 1100 goto out_unlock; 1101 } 1102 1103 /* remounting is not allowed for populated hierarchies */ 1104 if (!list_empty(&root->cgrp.self.children)) { 1105 ret = -EBUSY; 1106 goto out_unlock; 1107 } 1108 1109 ret = rebind_subsystems(root, added_mask); 1110 if (ret) 1111 goto out_unlock; 1112 1113 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); 1114 1115 if (ctx->release_agent) { 1116 spin_lock(&release_agent_path_lock); 1117 strcpy(root->release_agent_path, ctx->release_agent); 1118 spin_unlock(&release_agent_path_lock); 1119 } 1120 1121 trace_cgroup_remount(root); 1122 1123 out_unlock: 1124 mutex_unlock(&cgroup_mutex); 1125 return ret; 1126 } 1127 1128 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = { 1129 .rename = cgroup1_rename, 1130 .show_options = cgroup1_show_options, 1131 .mkdir = cgroup_mkdir, 1132 .rmdir = cgroup_rmdir, 1133 .show_path = cgroup_show_path, 1134 }; 1135 1136 /* 1137 * The guts of cgroup1 mount - find or create cgroup_root to use. 1138 * Called with cgroup_mutex held; returns 0 on success, -E... on 1139 * error and positive - in case when the candidate is busy dying. 1140 * On success it stashes a reference to cgroup_root into given 1141 * cgroup_fs_context; that reference is *NOT* counting towards the 1142 * cgroup_root refcount. 1143 */ 1144 static int cgroup1_root_to_use(struct fs_context *fc) 1145 { 1146 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1147 struct cgroup_root *root; 1148 struct cgroup_subsys *ss; 1149 int i, ret; 1150 1151 /* First find the desired set of subsystems */ 1152 ret = check_cgroupfs_options(fc); 1153 if (ret) 1154 return ret; 1155 1156 /* 1157 * Destruction of cgroup root is asynchronous, so subsystems may 1158 * still be dying after the previous unmount. Let's drain the 1159 * dying subsystems. We just need to ensure that the ones 1160 * unmounted previously finish dying and don't care about new ones 1161 * starting. Testing ref liveliness is good enough. 1162 */ 1163 for_each_subsys(ss, i) { 1164 if (!(ctx->subsys_mask & (1 << i)) || 1165 ss->root == &cgrp_dfl_root) 1166 continue; 1167 1168 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) 1169 return 1; /* restart */ 1170 cgroup_put(&ss->root->cgrp); 1171 } 1172 1173 for_each_root(root) { 1174 bool name_match = false; 1175 1176 if (root == &cgrp_dfl_root) 1177 continue; 1178 1179 /* 1180 * If we asked for a name then it must match. Also, if 1181 * name matches but sybsys_mask doesn't, we should fail. 1182 * Remember whether name matched. 1183 */ 1184 if (ctx->name) { 1185 if (strcmp(ctx->name, root->name)) 1186 continue; 1187 name_match = true; 1188 } 1189 1190 /* 1191 * If we asked for subsystems (or explicitly for no 1192 * subsystems) then they must match. 1193 */ 1194 if ((ctx->subsys_mask || ctx->none) && 1195 (ctx->subsys_mask != root->subsys_mask)) { 1196 if (!name_match) 1197 continue; 1198 return -EBUSY; 1199 } 1200 1201 if (root->flags ^ ctx->flags) 1202 pr_warn("new mount options do not match the existing superblock, will be ignored\n"); 1203 1204 ctx->root = root; 1205 return 0; 1206 } 1207 1208 /* 1209 * No such thing, create a new one. name= matching without subsys 1210 * specification is allowed for already existing hierarchies but we 1211 * can't create new one without subsys specification. 1212 */ 1213 if (!ctx->subsys_mask && !ctx->none) 1214 return invalfc(fc, "No subsys list or none specified"); 1215 1216 /* Hierarchies may only be created in the initial cgroup namespace. */ 1217 if (ctx->ns != &init_cgroup_ns) 1218 return -EPERM; 1219 1220 root = kzalloc(sizeof(*root), GFP_KERNEL); 1221 if (!root) 1222 return -ENOMEM; 1223 1224 ctx->root = root; 1225 init_cgroup_root(ctx); 1226 1227 ret = cgroup_setup_root(root, ctx->subsys_mask); 1228 if (!ret) 1229 cgroup_favor_dynmods(root, ctx->flags & CGRP_ROOT_FAVOR_DYNMODS); 1230 else 1231 cgroup_free_root(root); 1232 1233 return ret; 1234 } 1235 1236 int cgroup1_get_tree(struct fs_context *fc) 1237 { 1238 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1239 int ret; 1240 1241 /* Check if the caller has permission to mount. */ 1242 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN)) 1243 return -EPERM; 1244 1245 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1246 1247 ret = cgroup1_root_to_use(fc); 1248 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt)) 1249 ret = 1; /* restart */ 1250 1251 mutex_unlock(&cgroup_mutex); 1252 1253 if (!ret) 1254 ret = cgroup_do_get_tree(fc); 1255 1256 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { 1257 fc_drop_locked(fc); 1258 ret = 1; 1259 } 1260 1261 if (unlikely(ret > 0)) { 1262 msleep(10); 1263 return restart_syscall(); 1264 } 1265 return ret; 1266 } 1267 1268 static int __init cgroup1_wq_init(void) 1269 { 1270 /* 1271 * Used to destroy pidlists and separate to serve as flush domain. 1272 * Cap @max_active to 1 too. 1273 */ 1274 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", 1275 0, 1); 1276 BUG_ON(!cgroup_pidlist_destroy_wq); 1277 return 0; 1278 } 1279 core_initcall(cgroup1_wq_init); 1280 1281 static int __init cgroup_no_v1(char *str) 1282 { 1283 struct cgroup_subsys *ss; 1284 char *token; 1285 int i; 1286 1287 while ((token = strsep(&str, ",")) != NULL) { 1288 if (!*token) 1289 continue; 1290 1291 if (!strcmp(token, "all")) { 1292 cgroup_no_v1_mask = U16_MAX; 1293 continue; 1294 } 1295 1296 if (!strcmp(token, "named")) { 1297 cgroup_no_v1_named = true; 1298 continue; 1299 } 1300 1301 for_each_subsys(ss, i) { 1302 if (strcmp(token, ss->name) && 1303 strcmp(token, ss->legacy_name)) 1304 continue; 1305 1306 cgroup_no_v1_mask |= 1 << i; 1307 } 1308 } 1309 return 1; 1310 } 1311 __setup("cgroup_no_v1=", cgroup_no_v1); 1312