1 // SPDX-License-Identifier: GPL-2.0-only 2 #include "cgroup-internal.h" 3 4 #include <linux/ctype.h> 5 #include <linux/kmod.h> 6 #include <linux/sort.h> 7 #include <linux/delay.h> 8 #include <linux/mm.h> 9 #include <linux/sched/signal.h> 10 #include <linux/sched/task.h> 11 #include <linux/magic.h> 12 #include <linux/slab.h> 13 #include <linux/vmalloc.h> 14 #include <linux/delayacct.h> 15 #include <linux/pid_namespace.h> 16 #include <linux/cgroupstats.h> 17 #include <linux/fs_parser.h> 18 19 #include <trace/events/cgroup.h> 20 21 /* 22 * pidlists linger the following amount before being destroyed. The goal 23 * is avoiding frequent destruction in the middle of consecutive read calls 24 * Expiring in the middle is a performance problem not a correctness one. 25 * 1 sec should be enough. 26 */ 27 #define CGROUP_PIDLIST_DESTROY_DELAY HZ 28 29 /* Controllers blocked by the commandline in v1 */ 30 static u16 cgroup_no_v1_mask; 31 32 /* disable named v1 mounts */ 33 static bool cgroup_no_v1_named; 34 35 /* 36 * pidlist destructions need to be flushed on cgroup destruction. Use a 37 * separate workqueue as flush domain. 38 */ 39 static struct workqueue_struct *cgroup_pidlist_destroy_wq; 40 41 /* protects cgroup_subsys->release_agent_path */ 42 static DEFINE_SPINLOCK(release_agent_path_lock); 43 44 bool cgroup1_ssid_disabled(int ssid) 45 { 46 return cgroup_no_v1_mask & (1 << ssid); 47 } 48 49 /** 50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' 51 * @from: attach to all cgroups of a given task 52 * @tsk: the task to be attached 53 * 54 * Return: %0 on success or a negative errno code on failure 55 */ 56 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) 57 { 58 struct cgroup_root *root; 59 int retval = 0; 60 61 mutex_lock(&cgroup_mutex); 62 percpu_down_write(&cgroup_threadgroup_rwsem); 63 for_each_root(root) { 64 struct cgroup *from_cgrp; 65 66 spin_lock_irq(&css_set_lock); 67 from_cgrp = task_cgroup_from_root(from, root); 68 spin_unlock_irq(&css_set_lock); 69 70 retval = cgroup_attach_task(from_cgrp, tsk, false); 71 if (retval) 72 break; 73 } 74 percpu_up_write(&cgroup_threadgroup_rwsem); 75 mutex_unlock(&cgroup_mutex); 76 77 return retval; 78 } 79 EXPORT_SYMBOL_GPL(cgroup_attach_task_all); 80 81 /** 82 * cgroup_transfer_tasks - move tasks from one cgroup to another 83 * @to: cgroup to which the tasks will be moved 84 * @from: cgroup in which the tasks currently reside 85 * 86 * Locking rules between cgroup_post_fork() and the migration path 87 * guarantee that, if a task is forking while being migrated, the new child 88 * is guaranteed to be either visible in the source cgroup after the 89 * parent's migration is complete or put into the target cgroup. No task 90 * can slip out of migration through forking. 91 * 92 * Return: %0 on success or a negative errno code on failure 93 */ 94 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) 95 { 96 DEFINE_CGROUP_MGCTX(mgctx); 97 struct cgrp_cset_link *link; 98 struct css_task_iter it; 99 struct task_struct *task; 100 int ret; 101 102 if (cgroup_on_dfl(to)) 103 return -EINVAL; 104 105 ret = cgroup_migrate_vet_dst(to); 106 if (ret) 107 return ret; 108 109 mutex_lock(&cgroup_mutex); 110 111 percpu_down_write(&cgroup_threadgroup_rwsem); 112 113 /* all tasks in @from are being moved, all csets are source */ 114 spin_lock_irq(&css_set_lock); 115 list_for_each_entry(link, &from->cset_links, cset_link) 116 cgroup_migrate_add_src(link->cset, to, &mgctx); 117 spin_unlock_irq(&css_set_lock); 118 119 ret = cgroup_migrate_prepare_dst(&mgctx); 120 if (ret) 121 goto out_err; 122 123 /* 124 * Migrate tasks one-by-one until @from is empty. This fails iff 125 * ->can_attach() fails. 126 */ 127 do { 128 css_task_iter_start(&from->self, 0, &it); 129 130 do { 131 task = css_task_iter_next(&it); 132 } while (task && (task->flags & PF_EXITING)); 133 134 if (task) 135 get_task_struct(task); 136 css_task_iter_end(&it); 137 138 if (task) { 139 ret = cgroup_migrate(task, false, &mgctx); 140 if (!ret) 141 TRACE_CGROUP_PATH(transfer_tasks, to, task, false); 142 put_task_struct(task); 143 } 144 } while (task && !ret); 145 out_err: 146 cgroup_migrate_finish(&mgctx); 147 percpu_up_write(&cgroup_threadgroup_rwsem); 148 mutex_unlock(&cgroup_mutex); 149 return ret; 150 } 151 152 /* 153 * Stuff for reading the 'tasks'/'procs' files. 154 * 155 * Reading this file can return large amounts of data if a cgroup has 156 * *lots* of attached tasks. So it may need several calls to read(), 157 * but we cannot guarantee that the information we produce is correct 158 * unless we produce it entirely atomically. 159 * 160 */ 161 162 /* which pidlist file are we talking about? */ 163 enum cgroup_filetype { 164 CGROUP_FILE_PROCS, 165 CGROUP_FILE_TASKS, 166 }; 167 168 /* 169 * A pidlist is a list of pids that virtually represents the contents of one 170 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists, 171 * a pair (one each for procs, tasks) for each pid namespace that's relevant 172 * to the cgroup. 173 */ 174 struct cgroup_pidlist { 175 /* 176 * used to find which pidlist is wanted. doesn't change as long as 177 * this particular list stays in the list. 178 */ 179 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key; 180 /* array of xids */ 181 pid_t *list; 182 /* how many elements the above list has */ 183 int length; 184 /* each of these stored in a list by its cgroup */ 185 struct list_head links; 186 /* pointer to the cgroup we belong to, for list removal purposes */ 187 struct cgroup *owner; 188 /* for delayed destruction */ 189 struct delayed_work destroy_dwork; 190 }; 191 192 /* 193 * Used to destroy all pidlists lingering waiting for destroy timer. None 194 * should be left afterwards. 195 */ 196 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp) 197 { 198 struct cgroup_pidlist *l, *tmp_l; 199 200 mutex_lock(&cgrp->pidlist_mutex); 201 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links) 202 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0); 203 mutex_unlock(&cgrp->pidlist_mutex); 204 205 flush_workqueue(cgroup_pidlist_destroy_wq); 206 BUG_ON(!list_empty(&cgrp->pidlists)); 207 } 208 209 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) 210 { 211 struct delayed_work *dwork = to_delayed_work(work); 212 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist, 213 destroy_dwork); 214 struct cgroup_pidlist *tofree = NULL; 215 216 mutex_lock(&l->owner->pidlist_mutex); 217 218 /* 219 * Destroy iff we didn't get queued again. The state won't change 220 * as destroy_dwork can only be queued while locked. 221 */ 222 if (!delayed_work_pending(dwork)) { 223 list_del(&l->links); 224 kvfree(l->list); 225 put_pid_ns(l->key.ns); 226 tofree = l; 227 } 228 229 mutex_unlock(&l->owner->pidlist_mutex); 230 kfree(tofree); 231 } 232 233 /* 234 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries 235 * Returns the number of unique elements. 236 */ 237 static int pidlist_uniq(pid_t *list, int length) 238 { 239 int src, dest = 1; 240 241 /* 242 * we presume the 0th element is unique, so i starts at 1. trivial 243 * edge cases first; no work needs to be done for either 244 */ 245 if (length == 0 || length == 1) 246 return length; 247 /* src and dest walk down the list; dest counts unique elements */ 248 for (src = 1; src < length; src++) { 249 /* find next unique element */ 250 while (list[src] == list[src-1]) { 251 src++; 252 if (src == length) 253 goto after; 254 } 255 /* dest always points to where the next unique element goes */ 256 list[dest] = list[src]; 257 dest++; 258 } 259 after: 260 return dest; 261 } 262 263 /* 264 * The two pid files - task and cgroup.procs - guaranteed that the result 265 * is sorted, which forced this whole pidlist fiasco. As pid order is 266 * different per namespace, each namespace needs differently sorted list, 267 * making it impossible to use, for example, single rbtree of member tasks 268 * sorted by task pointer. As pidlists can be fairly large, allocating one 269 * per open file is dangerous, so cgroup had to implement shared pool of 270 * pidlists keyed by cgroup and namespace. 271 */ 272 static int cmppid(const void *a, const void *b) 273 { 274 return *(pid_t *)a - *(pid_t *)b; 275 } 276 277 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, 278 enum cgroup_filetype type) 279 { 280 struct cgroup_pidlist *l; 281 /* don't need task_nsproxy() if we're looking at ourself */ 282 struct pid_namespace *ns = task_active_pid_ns(current); 283 284 lockdep_assert_held(&cgrp->pidlist_mutex); 285 286 list_for_each_entry(l, &cgrp->pidlists, links) 287 if (l->key.type == type && l->key.ns == ns) 288 return l; 289 return NULL; 290 } 291 292 /* 293 * find the appropriate pidlist for our purpose (given procs vs tasks) 294 * returns with the lock on that pidlist already held, and takes care 295 * of the use count, or returns NULL with no locks held if we're out of 296 * memory. 297 */ 298 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, 299 enum cgroup_filetype type) 300 { 301 struct cgroup_pidlist *l; 302 303 lockdep_assert_held(&cgrp->pidlist_mutex); 304 305 l = cgroup_pidlist_find(cgrp, type); 306 if (l) 307 return l; 308 309 /* entry not found; create a new one */ 310 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL); 311 if (!l) 312 return l; 313 314 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); 315 l->key.type = type; 316 /* don't need task_nsproxy() if we're looking at ourself */ 317 l->key.ns = get_pid_ns(task_active_pid_ns(current)); 318 l->owner = cgrp; 319 list_add(&l->links, &cgrp->pidlists); 320 return l; 321 } 322 323 /* 324 * Load a cgroup's pidarray with either procs' tgids or tasks' pids 325 */ 326 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, 327 struct cgroup_pidlist **lp) 328 { 329 pid_t *array; 330 int length; 331 int pid, n = 0; /* used for populating the array */ 332 struct css_task_iter it; 333 struct task_struct *tsk; 334 struct cgroup_pidlist *l; 335 336 lockdep_assert_held(&cgrp->pidlist_mutex); 337 338 /* 339 * If cgroup gets more users after we read count, we won't have 340 * enough space - tough. This race is indistinguishable to the 341 * caller from the case that the additional cgroup users didn't 342 * show up until sometime later on. 343 */ 344 length = cgroup_task_count(cgrp); 345 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL); 346 if (!array) 347 return -ENOMEM; 348 /* now, populate the array */ 349 css_task_iter_start(&cgrp->self, 0, &it); 350 while ((tsk = css_task_iter_next(&it))) { 351 if (unlikely(n == length)) 352 break; 353 /* get tgid or pid for procs or tasks file respectively */ 354 if (type == CGROUP_FILE_PROCS) 355 pid = task_tgid_vnr(tsk); 356 else 357 pid = task_pid_vnr(tsk); 358 if (pid > 0) /* make sure to only use valid results */ 359 array[n++] = pid; 360 } 361 css_task_iter_end(&it); 362 length = n; 363 /* now sort & (if procs) strip out duplicates */ 364 sort(array, length, sizeof(pid_t), cmppid, NULL); 365 if (type == CGROUP_FILE_PROCS) 366 length = pidlist_uniq(array, length); 367 368 l = cgroup_pidlist_find_create(cgrp, type); 369 if (!l) { 370 kvfree(array); 371 return -ENOMEM; 372 } 373 374 /* store array, freeing old if necessary */ 375 kvfree(l->list); 376 l->list = array; 377 l->length = length; 378 *lp = l; 379 return 0; 380 } 381 382 /* 383 * seq_file methods for the tasks/procs files. The seq_file position is the 384 * next pid to display; the seq_file iterator is a pointer to the pid 385 * in the cgroup->l->list array. 386 */ 387 388 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) 389 { 390 /* 391 * Initially we receive a position value that corresponds to 392 * one more than the last pid shown (or 0 on the first call or 393 * after a seek to the start). Use a binary-search to find the 394 * next pid to display, if any 395 */ 396 struct kernfs_open_file *of = s->private; 397 struct cgroup_file_ctx *ctx = of->priv; 398 struct cgroup *cgrp = seq_css(s)->cgroup; 399 struct cgroup_pidlist *l; 400 enum cgroup_filetype type = seq_cft(s)->private; 401 int index = 0, pid = *pos; 402 int *iter, ret; 403 404 mutex_lock(&cgrp->pidlist_mutex); 405 406 /* 407 * !NULL @ctx->procs1.pidlist indicates that this isn't the first 408 * start() after open. If the matching pidlist is around, we can use 409 * that. Look for it. Note that @ctx->procs1.pidlist can't be used 410 * directly. It could already have been destroyed. 411 */ 412 if (ctx->procs1.pidlist) 413 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type); 414 415 /* 416 * Either this is the first start() after open or the matching 417 * pidlist has been destroyed inbetween. Create a new one. 418 */ 419 if (!ctx->procs1.pidlist) { 420 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist); 421 if (ret) 422 return ERR_PTR(ret); 423 } 424 l = ctx->procs1.pidlist; 425 426 if (pid) { 427 int end = l->length; 428 429 while (index < end) { 430 int mid = (index + end) / 2; 431 if (l->list[mid] == pid) { 432 index = mid; 433 break; 434 } else if (l->list[mid] <= pid) 435 index = mid + 1; 436 else 437 end = mid; 438 } 439 } 440 /* If we're off the end of the array, we're done */ 441 if (index >= l->length) 442 return NULL; 443 /* Update the abstract position to be the actual pid that we found */ 444 iter = l->list + index; 445 *pos = *iter; 446 return iter; 447 } 448 449 static void cgroup_pidlist_stop(struct seq_file *s, void *v) 450 { 451 struct kernfs_open_file *of = s->private; 452 struct cgroup_file_ctx *ctx = of->priv; 453 struct cgroup_pidlist *l = ctx->procs1.pidlist; 454 455 if (l) 456 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 457 CGROUP_PIDLIST_DESTROY_DELAY); 458 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex); 459 } 460 461 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) 462 { 463 struct kernfs_open_file *of = s->private; 464 struct cgroup_file_ctx *ctx = of->priv; 465 struct cgroup_pidlist *l = ctx->procs1.pidlist; 466 pid_t *p = v; 467 pid_t *end = l->list + l->length; 468 /* 469 * Advance to the next pid in the array. If this goes off the 470 * end, we're done 471 */ 472 p++; 473 if (p >= end) { 474 (*pos)++; 475 return NULL; 476 } else { 477 *pos = *p; 478 return p; 479 } 480 } 481 482 static int cgroup_pidlist_show(struct seq_file *s, void *v) 483 { 484 seq_printf(s, "%d\n", *(int *)v); 485 486 return 0; 487 } 488 489 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of, 490 char *buf, size_t nbytes, loff_t off, 491 bool threadgroup) 492 { 493 struct cgroup *cgrp; 494 struct task_struct *task; 495 const struct cred *cred, *tcred; 496 ssize_t ret; 497 bool locked; 498 499 cgrp = cgroup_kn_lock_live(of->kn, false); 500 if (!cgrp) 501 return -ENODEV; 502 503 task = cgroup_procs_write_start(buf, threadgroup, &locked); 504 ret = PTR_ERR_OR_ZERO(task); 505 if (ret) 506 goto out_unlock; 507 508 /* 509 * Even if we're attaching all tasks in the thread group, we only need 510 * to check permissions on one of them. Check permissions using the 511 * credentials from file open to protect against inherited fd attacks. 512 */ 513 cred = of->file->f_cred; 514 tcred = get_task_cred(task); 515 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 516 !uid_eq(cred->euid, tcred->uid) && 517 !uid_eq(cred->euid, tcred->suid)) 518 ret = -EACCES; 519 put_cred(tcred); 520 if (ret) 521 goto out_finish; 522 523 ret = cgroup_attach_task(cgrp, task, threadgroup); 524 525 out_finish: 526 cgroup_procs_write_finish(task, locked); 527 out_unlock: 528 cgroup_kn_unlock(of->kn); 529 530 return ret ?: nbytes; 531 } 532 533 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of, 534 char *buf, size_t nbytes, loff_t off) 535 { 536 return __cgroup1_procs_write(of, buf, nbytes, off, true); 537 } 538 539 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of, 540 char *buf, size_t nbytes, loff_t off) 541 { 542 return __cgroup1_procs_write(of, buf, nbytes, off, false); 543 } 544 545 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, 546 char *buf, size_t nbytes, loff_t off) 547 { 548 struct cgroup *cgrp; 549 550 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); 551 552 cgrp = cgroup_kn_lock_live(of->kn, false); 553 if (!cgrp) 554 return -ENODEV; 555 spin_lock(&release_agent_path_lock); 556 strlcpy(cgrp->root->release_agent_path, strstrip(buf), 557 sizeof(cgrp->root->release_agent_path)); 558 spin_unlock(&release_agent_path_lock); 559 cgroup_kn_unlock(of->kn); 560 return nbytes; 561 } 562 563 static int cgroup_release_agent_show(struct seq_file *seq, void *v) 564 { 565 struct cgroup *cgrp = seq_css(seq)->cgroup; 566 567 spin_lock(&release_agent_path_lock); 568 seq_puts(seq, cgrp->root->release_agent_path); 569 spin_unlock(&release_agent_path_lock); 570 seq_putc(seq, '\n'); 571 return 0; 572 } 573 574 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) 575 { 576 seq_puts(seq, "0\n"); 577 return 0; 578 } 579 580 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, 581 struct cftype *cft) 582 { 583 return notify_on_release(css->cgroup); 584 } 585 586 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, 587 struct cftype *cft, u64 val) 588 { 589 if (val) 590 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 591 else 592 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 593 return 0; 594 } 595 596 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, 597 struct cftype *cft) 598 { 599 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 600 } 601 602 static int cgroup_clone_children_write(struct cgroup_subsys_state *css, 603 struct cftype *cft, u64 val) 604 { 605 if (val) 606 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 607 else 608 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags); 609 return 0; 610 } 611 612 /* cgroup core interface files for the legacy hierarchies */ 613 struct cftype cgroup1_base_files[] = { 614 { 615 .name = "cgroup.procs", 616 .seq_start = cgroup_pidlist_start, 617 .seq_next = cgroup_pidlist_next, 618 .seq_stop = cgroup_pidlist_stop, 619 .seq_show = cgroup_pidlist_show, 620 .private = CGROUP_FILE_PROCS, 621 .write = cgroup1_procs_write, 622 }, 623 { 624 .name = "cgroup.clone_children", 625 .read_u64 = cgroup_clone_children_read, 626 .write_u64 = cgroup_clone_children_write, 627 }, 628 { 629 .name = "cgroup.sane_behavior", 630 .flags = CFTYPE_ONLY_ON_ROOT, 631 .seq_show = cgroup_sane_behavior_show, 632 }, 633 { 634 .name = "tasks", 635 .seq_start = cgroup_pidlist_start, 636 .seq_next = cgroup_pidlist_next, 637 .seq_stop = cgroup_pidlist_stop, 638 .seq_show = cgroup_pidlist_show, 639 .private = CGROUP_FILE_TASKS, 640 .write = cgroup1_tasks_write, 641 }, 642 { 643 .name = "notify_on_release", 644 .read_u64 = cgroup_read_notify_on_release, 645 .write_u64 = cgroup_write_notify_on_release, 646 }, 647 { 648 .name = "release_agent", 649 .flags = CFTYPE_ONLY_ON_ROOT, 650 .seq_show = cgroup_release_agent_show, 651 .write = cgroup_release_agent_write, 652 .max_write_len = PATH_MAX - 1, 653 }, 654 { } /* terminate */ 655 }; 656 657 /* Display information about each subsystem and each hierarchy */ 658 int proc_cgroupstats_show(struct seq_file *m, void *v) 659 { 660 struct cgroup_subsys *ss; 661 int i; 662 663 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n"); 664 /* 665 * Grab the subsystems state racily. No need to add avenue to 666 * cgroup_mutex contention. 667 */ 668 669 for_each_subsys(ss, i) 670 seq_printf(m, "%s\t%d\t%d\t%d\n", 671 ss->legacy_name, ss->root->hierarchy_id, 672 atomic_read(&ss->root->nr_cgrps), 673 cgroup_ssid_enabled(i)); 674 675 return 0; 676 } 677 678 /** 679 * cgroupstats_build - build and fill cgroupstats 680 * @stats: cgroupstats to fill information into 681 * @dentry: A dentry entry belonging to the cgroup for which stats have 682 * been requested. 683 * 684 * Build and fill cgroupstats so that taskstats can export it to user 685 * space. 686 * 687 * Return: %0 on success or a negative errno code on failure 688 */ 689 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) 690 { 691 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 692 struct cgroup *cgrp; 693 struct css_task_iter it; 694 struct task_struct *tsk; 695 696 /* it should be kernfs_node belonging to cgroupfs and is a directory */ 697 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || 698 kernfs_type(kn) != KERNFS_DIR) 699 return -EINVAL; 700 701 /* 702 * We aren't being called from kernfs and there's no guarantee on 703 * @kn->priv's validity. For this and css_tryget_online_from_dir(), 704 * @kn->priv is RCU safe. Let's do the RCU dancing. 705 */ 706 rcu_read_lock(); 707 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 708 if (!cgrp || !cgroup_tryget(cgrp)) { 709 rcu_read_unlock(); 710 return -ENOENT; 711 } 712 rcu_read_unlock(); 713 714 css_task_iter_start(&cgrp->self, 0, &it); 715 while ((tsk = css_task_iter_next(&it))) { 716 switch (READ_ONCE(tsk->__state)) { 717 case TASK_RUNNING: 718 stats->nr_running++; 719 break; 720 case TASK_INTERRUPTIBLE: 721 stats->nr_sleeping++; 722 break; 723 case TASK_UNINTERRUPTIBLE: 724 stats->nr_uninterruptible++; 725 break; 726 case TASK_STOPPED: 727 stats->nr_stopped++; 728 break; 729 default: 730 if (tsk->in_iowait) 731 stats->nr_io_wait++; 732 break; 733 } 734 } 735 css_task_iter_end(&it); 736 737 cgroup_put(cgrp); 738 return 0; 739 } 740 741 void cgroup1_check_for_release(struct cgroup *cgrp) 742 { 743 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) && 744 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) 745 schedule_work(&cgrp->release_agent_work); 746 } 747 748 /* 749 * Notify userspace when a cgroup is released, by running the 750 * configured release agent with the name of the cgroup (path 751 * relative to the root of cgroup file system) as the argument. 752 * 753 * Most likely, this user command will try to rmdir this cgroup. 754 * 755 * This races with the possibility that some other task will be 756 * attached to this cgroup before it is removed, or that some other 757 * user task will 'mkdir' a child cgroup of this cgroup. That's ok. 758 * The presumed 'rmdir' will fail quietly if this cgroup is no longer 759 * unused, and this cgroup will be reprieved from its death sentence, 760 * to continue to serve a useful existence. Next time it's released, 761 * we will get notified again, if it still has 'notify_on_release' set. 762 * 763 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which 764 * means only wait until the task is successfully execve()'d. The 765 * separate release agent task is forked by call_usermodehelper(), 766 * then control in this thread returns here, without waiting for the 767 * release agent task. We don't bother to wait because the caller of 768 * this routine has no use for the exit status of the release agent 769 * task, so no sense holding our caller up for that. 770 */ 771 void cgroup1_release_agent(struct work_struct *work) 772 { 773 struct cgroup *cgrp = 774 container_of(work, struct cgroup, release_agent_work); 775 char *pathbuf, *agentbuf; 776 char *argv[3], *envp[3]; 777 int ret; 778 779 /* snoop agent path and exit early if empty */ 780 if (!cgrp->root->release_agent_path[0]) 781 return; 782 783 /* prepare argument buffers */ 784 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 785 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL); 786 if (!pathbuf || !agentbuf) 787 goto out_free; 788 789 spin_lock(&release_agent_path_lock); 790 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX); 791 spin_unlock(&release_agent_path_lock); 792 if (!agentbuf[0]) 793 goto out_free; 794 795 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); 796 if (ret < 0 || ret >= PATH_MAX) 797 goto out_free; 798 799 argv[0] = agentbuf; 800 argv[1] = pathbuf; 801 argv[2] = NULL; 802 803 /* minimal command environment */ 804 envp[0] = "HOME=/"; 805 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 806 envp[2] = NULL; 807 808 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 809 out_free: 810 kfree(agentbuf); 811 kfree(pathbuf); 812 } 813 814 /* 815 * cgroup_rename - Only allow simple rename of directories in place. 816 */ 817 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, 818 const char *new_name_str) 819 { 820 struct cgroup *cgrp = kn->priv; 821 int ret; 822 823 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ 824 if (strchr(new_name_str, '\n')) 825 return -EINVAL; 826 827 if (kernfs_type(kn) != KERNFS_DIR) 828 return -ENOTDIR; 829 if (kn->parent != new_parent) 830 return -EIO; 831 832 /* 833 * We're gonna grab cgroup_mutex which nests outside kernfs 834 * active_ref. kernfs_rename() doesn't require active_ref 835 * protection. Break them before grabbing cgroup_mutex. 836 */ 837 kernfs_break_active_protection(new_parent); 838 kernfs_break_active_protection(kn); 839 840 mutex_lock(&cgroup_mutex); 841 842 ret = kernfs_rename(kn, new_parent, new_name_str); 843 if (!ret) 844 TRACE_CGROUP_PATH(rename, cgrp); 845 846 mutex_unlock(&cgroup_mutex); 847 848 kernfs_unbreak_active_protection(kn); 849 kernfs_unbreak_active_protection(new_parent); 850 return ret; 851 } 852 853 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 854 { 855 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 856 struct cgroup_subsys *ss; 857 int ssid; 858 859 for_each_subsys(ss, ssid) 860 if (root->subsys_mask & (1 << ssid)) 861 seq_show_option(seq, ss->legacy_name, NULL); 862 if (root->flags & CGRP_ROOT_NOPREFIX) 863 seq_puts(seq, ",noprefix"); 864 if (root->flags & CGRP_ROOT_XATTR) 865 seq_puts(seq, ",xattr"); 866 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE) 867 seq_puts(seq, ",cpuset_v2_mode"); 868 869 spin_lock(&release_agent_path_lock); 870 if (strlen(root->release_agent_path)) 871 seq_show_option(seq, "release_agent", 872 root->release_agent_path); 873 spin_unlock(&release_agent_path_lock); 874 875 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) 876 seq_puts(seq, ",clone_children"); 877 if (strlen(root->name)) 878 seq_show_option(seq, "name", root->name); 879 return 0; 880 } 881 882 enum cgroup1_param { 883 Opt_all, 884 Opt_clone_children, 885 Opt_cpuset_v2_mode, 886 Opt_name, 887 Opt_none, 888 Opt_noprefix, 889 Opt_release_agent, 890 Opt_xattr, 891 }; 892 893 const struct fs_parameter_spec cgroup1_fs_parameters[] = { 894 fsparam_flag ("all", Opt_all), 895 fsparam_flag ("clone_children", Opt_clone_children), 896 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode), 897 fsparam_string("name", Opt_name), 898 fsparam_flag ("none", Opt_none), 899 fsparam_flag ("noprefix", Opt_noprefix), 900 fsparam_string("release_agent", Opt_release_agent), 901 fsparam_flag ("xattr", Opt_xattr), 902 {} 903 }; 904 905 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) 906 { 907 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 908 struct cgroup_subsys *ss; 909 struct fs_parse_result result; 910 int opt, i; 911 912 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result); 913 if (opt == -ENOPARAM) { 914 int ret; 915 916 ret = vfs_parse_fs_param_source(fc, param); 917 if (ret != -ENOPARAM) 918 return ret; 919 for_each_subsys(ss, i) { 920 if (strcmp(param->key, ss->legacy_name)) 921 continue; 922 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i)) 923 return invalfc(fc, "Disabled controller '%s'", 924 param->key); 925 ctx->subsys_mask |= (1 << i); 926 return 0; 927 } 928 return invalfc(fc, "Unknown subsys name '%s'", param->key); 929 } 930 if (opt < 0) 931 return opt; 932 933 switch (opt) { 934 case Opt_none: 935 /* Explicitly have no subsystems */ 936 ctx->none = true; 937 break; 938 case Opt_all: 939 ctx->all_ss = true; 940 break; 941 case Opt_noprefix: 942 ctx->flags |= CGRP_ROOT_NOPREFIX; 943 break; 944 case Opt_clone_children: 945 ctx->cpuset_clone_children = true; 946 break; 947 case Opt_cpuset_v2_mode: 948 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE; 949 break; 950 case Opt_xattr: 951 ctx->flags |= CGRP_ROOT_XATTR; 952 break; 953 case Opt_release_agent: 954 /* Specifying two release agents is forbidden */ 955 if (ctx->release_agent) 956 return invalfc(fc, "release_agent respecified"); 957 ctx->release_agent = param->string; 958 param->string = NULL; 959 break; 960 case Opt_name: 961 /* blocked by boot param? */ 962 if (cgroup_no_v1_named) 963 return -ENOENT; 964 /* Can't specify an empty name */ 965 if (!param->size) 966 return invalfc(fc, "Empty name"); 967 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1) 968 return invalfc(fc, "Name too long"); 969 /* Must match [\w.-]+ */ 970 for (i = 0; i < param->size; i++) { 971 char c = param->string[i]; 972 if (isalnum(c)) 973 continue; 974 if ((c == '.') || (c == '-') || (c == '_')) 975 continue; 976 return invalfc(fc, "Invalid name"); 977 } 978 /* Specifying two names is forbidden */ 979 if (ctx->name) 980 return invalfc(fc, "name respecified"); 981 ctx->name = param->string; 982 param->string = NULL; 983 break; 984 } 985 return 0; 986 } 987 988 static int check_cgroupfs_options(struct fs_context *fc) 989 { 990 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 991 u16 mask = U16_MAX; 992 u16 enabled = 0; 993 struct cgroup_subsys *ss; 994 int i; 995 996 #ifdef CONFIG_CPUSETS 997 mask = ~((u16)1 << cpuset_cgrp_id); 998 #endif 999 for_each_subsys(ss, i) 1000 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i)) 1001 enabled |= 1 << i; 1002 1003 ctx->subsys_mask &= enabled; 1004 1005 /* 1006 * In absence of 'none', 'name=' and subsystem name options, 1007 * let's default to 'all'. 1008 */ 1009 if (!ctx->subsys_mask && !ctx->none && !ctx->name) 1010 ctx->all_ss = true; 1011 1012 if (ctx->all_ss) { 1013 /* Mutually exclusive option 'all' + subsystem name */ 1014 if (ctx->subsys_mask) 1015 return invalfc(fc, "subsys name conflicts with all"); 1016 /* 'all' => select all the subsystems */ 1017 ctx->subsys_mask = enabled; 1018 } 1019 1020 /* 1021 * We either have to specify by name or by subsystems. (So all 1022 * empty hierarchies must have a name). 1023 */ 1024 if (!ctx->subsys_mask && !ctx->name) 1025 return invalfc(fc, "Need name or subsystem set"); 1026 1027 /* 1028 * Option noprefix was introduced just for backward compatibility 1029 * with the old cpuset, so we allow noprefix only if mounting just 1030 * the cpuset subsystem. 1031 */ 1032 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask)) 1033 return invalfc(fc, "noprefix used incorrectly"); 1034 1035 /* Can't specify "none" and some subsystems */ 1036 if (ctx->subsys_mask && ctx->none) 1037 return invalfc(fc, "none used incorrectly"); 1038 1039 return 0; 1040 } 1041 1042 int cgroup1_reconfigure(struct fs_context *fc) 1043 { 1044 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1045 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb); 1046 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 1047 int ret = 0; 1048 u16 added_mask, removed_mask; 1049 1050 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1051 1052 /* See what subsystems are wanted */ 1053 ret = check_cgroupfs_options(fc); 1054 if (ret) 1055 goto out_unlock; 1056 1057 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent) 1058 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", 1059 task_tgid_nr(current), current->comm); 1060 1061 added_mask = ctx->subsys_mask & ~root->subsys_mask; 1062 removed_mask = root->subsys_mask & ~ctx->subsys_mask; 1063 1064 /* Don't allow flags or name to change at remount */ 1065 if ((ctx->flags ^ root->flags) || 1066 (ctx->name && strcmp(ctx->name, root->name))) { 1067 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"", 1068 ctx->flags, ctx->name ?: "", root->flags, root->name); 1069 ret = -EINVAL; 1070 goto out_unlock; 1071 } 1072 1073 /* remounting is not allowed for populated hierarchies */ 1074 if (!list_empty(&root->cgrp.self.children)) { 1075 ret = -EBUSY; 1076 goto out_unlock; 1077 } 1078 1079 ret = rebind_subsystems(root, added_mask); 1080 if (ret) 1081 goto out_unlock; 1082 1083 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask)); 1084 1085 if (ctx->release_agent) { 1086 spin_lock(&release_agent_path_lock); 1087 strcpy(root->release_agent_path, ctx->release_agent); 1088 spin_unlock(&release_agent_path_lock); 1089 } 1090 1091 trace_cgroup_remount(root); 1092 1093 out_unlock: 1094 mutex_unlock(&cgroup_mutex); 1095 return ret; 1096 } 1097 1098 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = { 1099 .rename = cgroup1_rename, 1100 .show_options = cgroup1_show_options, 1101 .mkdir = cgroup_mkdir, 1102 .rmdir = cgroup_rmdir, 1103 .show_path = cgroup_show_path, 1104 }; 1105 1106 /* 1107 * The guts of cgroup1 mount - find or create cgroup_root to use. 1108 * Called with cgroup_mutex held; returns 0 on success, -E... on 1109 * error and positive - in case when the candidate is busy dying. 1110 * On success it stashes a reference to cgroup_root into given 1111 * cgroup_fs_context; that reference is *NOT* counting towards the 1112 * cgroup_root refcount. 1113 */ 1114 static int cgroup1_root_to_use(struct fs_context *fc) 1115 { 1116 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1117 struct cgroup_root *root; 1118 struct cgroup_subsys *ss; 1119 int i, ret; 1120 1121 /* First find the desired set of subsystems */ 1122 ret = check_cgroupfs_options(fc); 1123 if (ret) 1124 return ret; 1125 1126 /* 1127 * Destruction of cgroup root is asynchronous, so subsystems may 1128 * still be dying after the previous unmount. Let's drain the 1129 * dying subsystems. We just need to ensure that the ones 1130 * unmounted previously finish dying and don't care about new ones 1131 * starting. Testing ref liveliness is good enough. 1132 */ 1133 for_each_subsys(ss, i) { 1134 if (!(ctx->subsys_mask & (1 << i)) || 1135 ss->root == &cgrp_dfl_root) 1136 continue; 1137 1138 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) 1139 return 1; /* restart */ 1140 cgroup_put(&ss->root->cgrp); 1141 } 1142 1143 for_each_root(root) { 1144 bool name_match = false; 1145 1146 if (root == &cgrp_dfl_root) 1147 continue; 1148 1149 /* 1150 * If we asked for a name then it must match. Also, if 1151 * name matches but sybsys_mask doesn't, we should fail. 1152 * Remember whether name matched. 1153 */ 1154 if (ctx->name) { 1155 if (strcmp(ctx->name, root->name)) 1156 continue; 1157 name_match = true; 1158 } 1159 1160 /* 1161 * If we asked for subsystems (or explicitly for no 1162 * subsystems) then they must match. 1163 */ 1164 if ((ctx->subsys_mask || ctx->none) && 1165 (ctx->subsys_mask != root->subsys_mask)) { 1166 if (!name_match) 1167 continue; 1168 return -EBUSY; 1169 } 1170 1171 if (root->flags ^ ctx->flags) 1172 pr_warn("new mount options do not match the existing superblock, will be ignored\n"); 1173 1174 ctx->root = root; 1175 return 0; 1176 } 1177 1178 /* 1179 * No such thing, create a new one. name= matching without subsys 1180 * specification is allowed for already existing hierarchies but we 1181 * can't create new one without subsys specification. 1182 */ 1183 if (!ctx->subsys_mask && !ctx->none) 1184 return invalfc(fc, "No subsys list or none specified"); 1185 1186 /* Hierarchies may only be created in the initial cgroup namespace. */ 1187 if (ctx->ns != &init_cgroup_ns) 1188 return -EPERM; 1189 1190 root = kzalloc(sizeof(*root), GFP_KERNEL); 1191 if (!root) 1192 return -ENOMEM; 1193 1194 ctx->root = root; 1195 init_cgroup_root(ctx); 1196 1197 ret = cgroup_setup_root(root, ctx->subsys_mask); 1198 if (ret) 1199 cgroup_free_root(root); 1200 return ret; 1201 } 1202 1203 int cgroup1_get_tree(struct fs_context *fc) 1204 { 1205 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1206 int ret; 1207 1208 /* Check if the caller has permission to mount. */ 1209 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN)) 1210 return -EPERM; 1211 1212 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1213 1214 ret = cgroup1_root_to_use(fc); 1215 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt)) 1216 ret = 1; /* restart */ 1217 1218 mutex_unlock(&cgroup_mutex); 1219 1220 if (!ret) 1221 ret = cgroup_do_get_tree(fc); 1222 1223 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) { 1224 fc_drop_locked(fc); 1225 ret = 1; 1226 } 1227 1228 if (unlikely(ret > 0)) { 1229 msleep(10); 1230 return restart_syscall(); 1231 } 1232 return ret; 1233 } 1234 1235 static int __init cgroup1_wq_init(void) 1236 { 1237 /* 1238 * Used to destroy pidlists and separate to serve as flush domain. 1239 * Cap @max_active to 1 too. 1240 */ 1241 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy", 1242 0, 1); 1243 BUG_ON(!cgroup_pidlist_destroy_wq); 1244 return 0; 1245 } 1246 core_initcall(cgroup1_wq_init); 1247 1248 static int __init cgroup_no_v1(char *str) 1249 { 1250 struct cgroup_subsys *ss; 1251 char *token; 1252 int i; 1253 1254 while ((token = strsep(&str, ",")) != NULL) { 1255 if (!*token) 1256 continue; 1257 1258 if (!strcmp(token, "all")) { 1259 cgroup_no_v1_mask = U16_MAX; 1260 continue; 1261 } 1262 1263 if (!strcmp(token, "named")) { 1264 cgroup_no_v1_named = true; 1265 continue; 1266 } 1267 1268 for_each_subsys(ss, i) { 1269 if (strcmp(token, ss->name) && 1270 strcmp(token, ss->legacy_name)) 1271 continue; 1272 1273 cgroup_no_v1_mask |= 1 << i; 1274 } 1275 } 1276 return 1; 1277 } 1278 __setup("cgroup_no_v1=", cgroup_no_v1); 1279