1 /* 2 * Generic process-grouping system. 3 * 4 * Based originally on the cpuset system, extracted by Paul Menage 5 * Copyright (C) 2006 Google, Inc 6 * 7 * Notifications support 8 * Copyright (C) 2009 Nokia Corporation 9 * Author: Kirill A. Shutemov 10 * 11 * Copyright notices from the original cpuset code: 12 * -------------------------------------------------- 13 * Copyright (C) 2003 BULL SA. 14 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 15 * 16 * Portions derived from Patrick Mochel's sysfs code. 17 * sysfs is Copyright (c) 2001-3 Patrick Mochel 18 * 19 * 2003-10-10 Written by Simon Derr. 20 * 2003-10-22 Updates by Stephen Hemminger. 21 * 2004 May-July Rework by Paul Jackson. 22 * --------------------------------------------------- 23 * 24 * This file is subject to the terms and conditions of the GNU General Public 25 * License. See the file COPYING in the main directory of the Linux 26 * distribution for more details. 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include "cgroup-internal.h" 32 33 #include <linux/cred.h> 34 #include <linux/errno.h> 35 #include <linux/init_task.h> 36 #include <linux/kernel.h> 37 #include <linux/magic.h> 38 #include <linux/mutex.h> 39 #include <linux/mount.h> 40 #include <linux/pagemap.h> 41 #include <linux/proc_fs.h> 42 #include <linux/rcupdate.h> 43 #include <linux/sched.h> 44 #include <linux/sched/task.h> 45 #include <linux/slab.h> 46 #include <linux/spinlock.h> 47 #include <linux/percpu-rwsem.h> 48 #include <linux/string.h> 49 #include <linux/hashtable.h> 50 #include <linux/idr.h> 51 #include <linux/kthread.h> 52 #include <linux/atomic.h> 53 #include <linux/cpuset.h> 54 #include <linux/proc_ns.h> 55 #include <linux/nsproxy.h> 56 #include <linux/file.h> 57 #include <linux/fs_parser.h> 58 #include <linux/sched/cputime.h> 59 #include <linux/psi.h> 60 #include <net/sock.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/cgroup.h> 64 65 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ 66 MAX_CFTYPE_NAME + 2) 67 /* let's not notify more than 100 times per second */ 68 #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100) 69 70 /* 71 * cgroup_mutex is the master lock. Any modification to cgroup or its 72 * hierarchy must be performed while holding it. 73 * 74 * css_set_lock protects task->cgroups pointer, the list of css_set 75 * objects, and the chain of tasks off each css_set. 76 * 77 * These locks are exported if CONFIG_PROVE_RCU so that accessors in 78 * cgroup.h can use them for lockdep annotations. 79 */ 80 DEFINE_MUTEX(cgroup_mutex); 81 DEFINE_SPINLOCK(css_set_lock); 82 83 #ifdef CONFIG_PROVE_RCU 84 EXPORT_SYMBOL_GPL(cgroup_mutex); 85 EXPORT_SYMBOL_GPL(css_set_lock); 86 #endif 87 88 DEFINE_SPINLOCK(trace_cgroup_path_lock); 89 char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; 90 bool cgroup_debug __read_mostly; 91 92 /* 93 * Protects cgroup_idr and css_idr so that IDs can be released without 94 * grabbing cgroup_mutex. 95 */ 96 static DEFINE_SPINLOCK(cgroup_idr_lock); 97 98 /* 99 * Protects cgroup_file->kn for !self csses. It synchronizes notifications 100 * against file removal/re-creation across css hiding. 101 */ 102 static DEFINE_SPINLOCK(cgroup_file_kn_lock); 103 104 struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 105 106 #define cgroup_assert_mutex_or_rcu_locked() \ 107 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 108 !lockdep_is_held(&cgroup_mutex), \ 109 "cgroup_mutex or RCU read lock required"); 110 111 /* 112 * cgroup destruction makes heavy use of work items and there can be a lot 113 * of concurrent destructions. Use a separate workqueue so that cgroup 114 * destruction work items don't end up filling up max_active of system_wq 115 * which may lead to deadlock. 116 */ 117 static struct workqueue_struct *cgroup_destroy_wq; 118 119 /* generate an array of cgroup subsystem pointers */ 120 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, 121 struct cgroup_subsys *cgroup_subsys[] = { 122 #include <linux/cgroup_subsys.h> 123 }; 124 #undef SUBSYS 125 126 /* array of cgroup subsystem names */ 127 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, 128 static const char *cgroup_subsys_name[] = { 129 #include <linux/cgroup_subsys.h> 130 }; 131 #undef SUBSYS 132 133 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */ 134 #define SUBSYS(_x) \ 135 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \ 136 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \ 137 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \ 138 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key); 139 #include <linux/cgroup_subsys.h> 140 #undef SUBSYS 141 142 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key, 143 static struct static_key_true *cgroup_subsys_enabled_key[] = { 144 #include <linux/cgroup_subsys.h> 145 }; 146 #undef SUBSYS 147 148 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key, 149 static struct static_key_true *cgroup_subsys_on_dfl_key[] = { 150 #include <linux/cgroup_subsys.h> 151 }; 152 #undef SUBSYS 153 154 static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu); 155 156 /* 157 * The default hierarchy, reserved for the subsystems that are otherwise 158 * unattached - it never has more than a single cgroup, and all tasks are 159 * part of that cgroup. 160 */ 161 struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu }; 162 EXPORT_SYMBOL_GPL(cgrp_dfl_root); 163 164 /* 165 * The default hierarchy always exists but is hidden until mounted for the 166 * first time. This is for backward compatibility. 167 */ 168 static bool cgrp_dfl_visible; 169 170 /* some controllers are not supported in the default hierarchy */ 171 static u16 cgrp_dfl_inhibit_ss_mask; 172 173 /* some controllers are implicitly enabled on the default hierarchy */ 174 static u16 cgrp_dfl_implicit_ss_mask; 175 176 /* some controllers can be threaded on the default hierarchy */ 177 static u16 cgrp_dfl_threaded_ss_mask; 178 179 /* The list of hierarchy roots */ 180 LIST_HEAD(cgroup_roots); 181 static int cgroup_root_count; 182 183 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ 184 static DEFINE_IDR(cgroup_hierarchy_idr); 185 186 /* 187 * Assign a monotonically increasing serial number to csses. It guarantees 188 * cgroups with bigger numbers are newer than those with smaller numbers. 189 * Also, as csses are always appended to the parent's ->children list, it 190 * guarantees that sibling csses are always sorted in the ascending serial 191 * number order on the list. Protected by cgroup_mutex. 192 */ 193 static u64 css_serial_nr_next = 1; 194 195 /* 196 * These bitmasks identify subsystems with specific features to avoid 197 * having to do iterative checks repeatedly. 198 */ 199 static u16 have_fork_callback __read_mostly; 200 static u16 have_exit_callback __read_mostly; 201 static u16 have_release_callback __read_mostly; 202 static u16 have_canfork_callback __read_mostly; 203 204 /* cgroup namespace for init task */ 205 struct cgroup_namespace init_cgroup_ns = { 206 .count = REFCOUNT_INIT(2), 207 .user_ns = &init_user_ns, 208 .ns.ops = &cgroupns_operations, 209 .ns.inum = PROC_CGROUP_INIT_INO, 210 .root_cset = &init_css_set, 211 }; 212 213 static struct file_system_type cgroup2_fs_type; 214 static struct cftype cgroup_base_files[]; 215 216 static int cgroup_apply_control(struct cgroup *cgrp); 217 static void cgroup_finalize_control(struct cgroup *cgrp, int ret); 218 static void css_task_iter_advance(struct css_task_iter *it); 219 static int cgroup_destroy_locked(struct cgroup *cgrp); 220 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 221 struct cgroup_subsys *ss); 222 static void css_release(struct percpu_ref *ref); 223 static void kill_css(struct cgroup_subsys_state *css); 224 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 225 struct cgroup *cgrp, struct cftype cfts[], 226 bool is_add); 227 228 /** 229 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID 230 * @ssid: subsys ID of interest 231 * 232 * cgroup_subsys_enabled() can only be used with literal subsys names which 233 * is fine for individual subsystems but unsuitable for cgroup core. This 234 * is slower static_key_enabled() based test indexed by @ssid. 235 */ 236 bool cgroup_ssid_enabled(int ssid) 237 { 238 if (CGROUP_SUBSYS_COUNT == 0) 239 return false; 240 241 return static_key_enabled(cgroup_subsys_enabled_key[ssid]); 242 } 243 244 /** 245 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy 246 * @cgrp: the cgroup of interest 247 * 248 * The default hierarchy is the v2 interface of cgroup and this function 249 * can be used to test whether a cgroup is on the default hierarchy for 250 * cases where a subsystem should behave differnetly depending on the 251 * interface version. 252 * 253 * The set of behaviors which change on the default hierarchy are still 254 * being determined and the mount option is prefixed with __DEVEL__. 255 * 256 * List of changed behaviors: 257 * 258 * - Mount options "noprefix", "xattr", "clone_children", "release_agent" 259 * and "name" are disallowed. 260 * 261 * - When mounting an existing superblock, mount options should match. 262 * 263 * - Remount is disallowed. 264 * 265 * - rename(2) is disallowed. 266 * 267 * - "tasks" is removed. Everything should be at process granularity. Use 268 * "cgroup.procs" instead. 269 * 270 * - "cgroup.procs" is not sorted. pids will be unique unless they got 271 * recycled inbetween reads. 272 * 273 * - "release_agent" and "notify_on_release" are removed. Replacement 274 * notification mechanism will be implemented. 275 * 276 * - "cgroup.clone_children" is removed. 277 * 278 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup 279 * and its descendants contain no task; otherwise, 1. The file also 280 * generates kernfs notification which can be monitored through poll and 281 * [di]notify when the value of the file changes. 282 * 283 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and 284 * take masks of ancestors with non-empty cpus/mems, instead of being 285 * moved to an ancestor. 286 * 287 * - cpuset: a task can be moved into an empty cpuset, and again it takes 288 * masks of ancestors. 289 * 290 * - memcg: use_hierarchy is on by default and the cgroup file for the flag 291 * is not created. 292 * 293 * - blkcg: blk-throttle becomes properly hierarchical. 294 * 295 * - debug: disallowed on the default hierarchy. 296 */ 297 bool cgroup_on_dfl(const struct cgroup *cgrp) 298 { 299 return cgrp->root == &cgrp_dfl_root; 300 } 301 302 /* IDR wrappers which synchronize using cgroup_idr_lock */ 303 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, 304 gfp_t gfp_mask) 305 { 306 int ret; 307 308 idr_preload(gfp_mask); 309 spin_lock_bh(&cgroup_idr_lock); 310 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); 311 spin_unlock_bh(&cgroup_idr_lock); 312 idr_preload_end(); 313 return ret; 314 } 315 316 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) 317 { 318 void *ret; 319 320 spin_lock_bh(&cgroup_idr_lock); 321 ret = idr_replace(idr, ptr, id); 322 spin_unlock_bh(&cgroup_idr_lock); 323 return ret; 324 } 325 326 static void cgroup_idr_remove(struct idr *idr, int id) 327 { 328 spin_lock_bh(&cgroup_idr_lock); 329 idr_remove(idr, id); 330 spin_unlock_bh(&cgroup_idr_lock); 331 } 332 333 static bool cgroup_has_tasks(struct cgroup *cgrp) 334 { 335 return cgrp->nr_populated_csets; 336 } 337 338 bool cgroup_is_threaded(struct cgroup *cgrp) 339 { 340 return cgrp->dom_cgrp != cgrp; 341 } 342 343 /* can @cgrp host both domain and threaded children? */ 344 static bool cgroup_is_mixable(struct cgroup *cgrp) 345 { 346 /* 347 * Root isn't under domain level resource control exempting it from 348 * the no-internal-process constraint, so it can serve as a thread 349 * root and a parent of resource domains at the same time. 350 */ 351 return !cgroup_parent(cgrp); 352 } 353 354 /* can @cgrp become a thread root? should always be true for a thread root */ 355 static bool cgroup_can_be_thread_root(struct cgroup *cgrp) 356 { 357 /* mixables don't care */ 358 if (cgroup_is_mixable(cgrp)) 359 return true; 360 361 /* domain roots can't be nested under threaded */ 362 if (cgroup_is_threaded(cgrp)) 363 return false; 364 365 /* can only have either domain or threaded children */ 366 if (cgrp->nr_populated_domain_children) 367 return false; 368 369 /* and no domain controllers can be enabled */ 370 if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 371 return false; 372 373 return true; 374 } 375 376 /* is @cgrp root of a threaded subtree? */ 377 bool cgroup_is_thread_root(struct cgroup *cgrp) 378 { 379 /* thread root should be a domain */ 380 if (cgroup_is_threaded(cgrp)) 381 return false; 382 383 /* a domain w/ threaded children is a thread root */ 384 if (cgrp->nr_threaded_children) 385 return true; 386 387 /* 388 * A domain which has tasks and explicit threaded controllers 389 * enabled is a thread root. 390 */ 391 if (cgroup_has_tasks(cgrp) && 392 (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask)) 393 return true; 394 395 return false; 396 } 397 398 /* a domain which isn't connected to the root w/o brekage can't be used */ 399 static bool cgroup_is_valid_domain(struct cgroup *cgrp) 400 { 401 /* the cgroup itself can be a thread root */ 402 if (cgroup_is_threaded(cgrp)) 403 return false; 404 405 /* but the ancestors can't be unless mixable */ 406 while ((cgrp = cgroup_parent(cgrp))) { 407 if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp)) 408 return false; 409 if (cgroup_is_threaded(cgrp)) 410 return false; 411 } 412 413 return true; 414 } 415 416 /* subsystems visibly enabled on a cgroup */ 417 static u16 cgroup_control(struct cgroup *cgrp) 418 { 419 struct cgroup *parent = cgroup_parent(cgrp); 420 u16 root_ss_mask = cgrp->root->subsys_mask; 421 422 if (parent) { 423 u16 ss_mask = parent->subtree_control; 424 425 /* threaded cgroups can only have threaded controllers */ 426 if (cgroup_is_threaded(cgrp)) 427 ss_mask &= cgrp_dfl_threaded_ss_mask; 428 return ss_mask; 429 } 430 431 if (cgroup_on_dfl(cgrp)) 432 root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask | 433 cgrp_dfl_implicit_ss_mask); 434 return root_ss_mask; 435 } 436 437 /* subsystems enabled on a cgroup */ 438 static u16 cgroup_ss_mask(struct cgroup *cgrp) 439 { 440 struct cgroup *parent = cgroup_parent(cgrp); 441 442 if (parent) { 443 u16 ss_mask = parent->subtree_ss_mask; 444 445 /* threaded cgroups can only have threaded controllers */ 446 if (cgroup_is_threaded(cgrp)) 447 ss_mask &= cgrp_dfl_threaded_ss_mask; 448 return ss_mask; 449 } 450 451 return cgrp->root->subsys_mask; 452 } 453 454 /** 455 * cgroup_css - obtain a cgroup's css for the specified subsystem 456 * @cgrp: the cgroup of interest 457 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 458 * 459 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This 460 * function must be called either under cgroup_mutex or rcu_read_lock() and 461 * the caller is responsible for pinning the returned css if it wants to 462 * keep accessing it outside the said locks. This function may return 463 * %NULL if @cgrp doesn't have @subsys_id enabled. 464 */ 465 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, 466 struct cgroup_subsys *ss) 467 { 468 if (ss) 469 return rcu_dereference_check(cgrp->subsys[ss->id], 470 lockdep_is_held(&cgroup_mutex)); 471 else 472 return &cgrp->self; 473 } 474 475 /** 476 * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem 477 * @cgrp: the cgroup of interest 478 * @ss: the subsystem of interest 479 * 480 * Find and get @cgrp's css assocaited with @ss. If the css doesn't exist 481 * or is offline, %NULL is returned. 482 */ 483 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, 484 struct cgroup_subsys *ss) 485 { 486 struct cgroup_subsys_state *css; 487 488 rcu_read_lock(); 489 css = cgroup_css(cgrp, ss); 490 if (!css || !css_tryget_online(css)) 491 css = NULL; 492 rcu_read_unlock(); 493 494 return css; 495 } 496 497 /** 498 * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss 499 * @cgrp: the cgroup of interest 500 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 501 * 502 * Similar to cgroup_css() but returns the effective css, which is defined 503 * as the matching css of the nearest ancestor including self which has @ss 504 * enabled. If @ss is associated with the hierarchy @cgrp is on, this 505 * function is guaranteed to return non-NULL css. 506 */ 507 static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, 508 struct cgroup_subsys *ss) 509 { 510 lockdep_assert_held(&cgroup_mutex); 511 512 if (!ss) 513 return &cgrp->self; 514 515 /* 516 * This function is used while updating css associations and thus 517 * can't test the csses directly. Test ss_mask. 518 */ 519 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { 520 cgrp = cgroup_parent(cgrp); 521 if (!cgrp) 522 return NULL; 523 } 524 525 return cgroup_css(cgrp, ss); 526 } 527 528 /** 529 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem 530 * @cgrp: the cgroup of interest 531 * @ss: the subsystem of interest 532 * 533 * Find and get the effective css of @cgrp for @ss. The effective css is 534 * defined as the matching css of the nearest ancestor including self which 535 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 536 * the root css is returned, so this function always returns a valid css. 537 * 538 * The returned css is not guaranteed to be online, and therefore it is the 539 * callers responsiblity to tryget a reference for it. 540 */ 541 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, 542 struct cgroup_subsys *ss) 543 { 544 struct cgroup_subsys_state *css; 545 546 do { 547 css = cgroup_css(cgrp, ss); 548 549 if (css) 550 return css; 551 cgrp = cgroup_parent(cgrp); 552 } while (cgrp); 553 554 return init_css_set.subsys[ss->id]; 555 } 556 557 /** 558 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem 559 * @cgrp: the cgroup of interest 560 * @ss: the subsystem of interest 561 * 562 * Find and get the effective css of @cgrp for @ss. The effective css is 563 * defined as the matching css of the nearest ancestor including self which 564 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 565 * the root css is returned, so this function always returns a valid css. 566 * The returned css must be put using css_put(). 567 */ 568 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, 569 struct cgroup_subsys *ss) 570 { 571 struct cgroup_subsys_state *css; 572 573 rcu_read_lock(); 574 575 do { 576 css = cgroup_css(cgrp, ss); 577 578 if (css && css_tryget_online(css)) 579 goto out_unlock; 580 cgrp = cgroup_parent(cgrp); 581 } while (cgrp); 582 583 css = init_css_set.subsys[ss->id]; 584 css_get(css); 585 out_unlock: 586 rcu_read_unlock(); 587 return css; 588 } 589 590 static void cgroup_get_live(struct cgroup *cgrp) 591 { 592 WARN_ON_ONCE(cgroup_is_dead(cgrp)); 593 css_get(&cgrp->self); 594 } 595 596 /** 597 * __cgroup_task_count - count the number of tasks in a cgroup. The caller 598 * is responsible for taking the css_set_lock. 599 * @cgrp: the cgroup in question 600 */ 601 int __cgroup_task_count(const struct cgroup *cgrp) 602 { 603 int count = 0; 604 struct cgrp_cset_link *link; 605 606 lockdep_assert_held(&css_set_lock); 607 608 list_for_each_entry(link, &cgrp->cset_links, cset_link) 609 count += link->cset->nr_tasks; 610 611 return count; 612 } 613 614 /** 615 * cgroup_task_count - count the number of tasks in a cgroup. 616 * @cgrp: the cgroup in question 617 */ 618 int cgroup_task_count(const struct cgroup *cgrp) 619 { 620 int count; 621 622 spin_lock_irq(&css_set_lock); 623 count = __cgroup_task_count(cgrp); 624 spin_unlock_irq(&css_set_lock); 625 626 return count; 627 } 628 629 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) 630 { 631 struct cgroup *cgrp = of->kn->parent->priv; 632 struct cftype *cft = of_cft(of); 633 634 /* 635 * This is open and unprotected implementation of cgroup_css(). 636 * seq_css() is only called from a kernfs file operation which has 637 * an active reference on the file. Because all the subsystem 638 * files are drained before a css is disassociated with a cgroup, 639 * the matching css from the cgroup's subsys table is guaranteed to 640 * be and stay valid until the enclosing operation is complete. 641 */ 642 if (cft->ss) 643 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); 644 else 645 return &cgrp->self; 646 } 647 EXPORT_SYMBOL_GPL(of_css); 648 649 /** 650 * for_each_css - iterate all css's of a cgroup 651 * @css: the iteration cursor 652 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 653 * @cgrp: the target cgroup to iterate css's of 654 * 655 * Should be called under cgroup_[tree_]mutex. 656 */ 657 #define for_each_css(css, ssid, cgrp) \ 658 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 659 if (!((css) = rcu_dereference_check( \ 660 (cgrp)->subsys[(ssid)], \ 661 lockdep_is_held(&cgroup_mutex)))) { } \ 662 else 663 664 /** 665 * for_each_e_css - iterate all effective css's of a cgroup 666 * @css: the iteration cursor 667 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 668 * @cgrp: the target cgroup to iterate css's of 669 * 670 * Should be called under cgroup_[tree_]mutex. 671 */ 672 #define for_each_e_css(css, ssid, cgrp) \ 673 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 674 if (!((css) = cgroup_e_css_by_mask(cgrp, \ 675 cgroup_subsys[(ssid)]))) \ 676 ; \ 677 else 678 679 /** 680 * do_each_subsys_mask - filter for_each_subsys with a bitmask 681 * @ss: the iteration cursor 682 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end 683 * @ss_mask: the bitmask 684 * 685 * The block will only run for cases where the ssid-th bit (1 << ssid) of 686 * @ss_mask is set. 687 */ 688 #define do_each_subsys_mask(ss, ssid, ss_mask) do { \ 689 unsigned long __ss_mask = (ss_mask); \ 690 if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \ 691 (ssid) = 0; \ 692 break; \ 693 } \ 694 for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \ 695 (ss) = cgroup_subsys[ssid]; \ 696 { 697 698 #define while_each_subsys_mask() \ 699 } \ 700 } \ 701 } while (false) 702 703 /* iterate over child cgrps, lock should be held throughout iteration */ 704 #define cgroup_for_each_live_child(child, cgrp) \ 705 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ 706 if (({ lockdep_assert_held(&cgroup_mutex); \ 707 cgroup_is_dead(child); })) \ 708 ; \ 709 else 710 711 /* walk live descendants in preorder */ 712 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \ 713 css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \ 714 if (({ lockdep_assert_held(&cgroup_mutex); \ 715 (dsct) = (d_css)->cgroup; \ 716 cgroup_is_dead(dsct); })) \ 717 ; \ 718 else 719 720 /* walk live descendants in postorder */ 721 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \ 722 css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \ 723 if (({ lockdep_assert_held(&cgroup_mutex); \ 724 (dsct) = (d_css)->cgroup; \ 725 cgroup_is_dead(dsct); })) \ 726 ; \ 727 else 728 729 /* 730 * The default css_set - used by init and its children prior to any 731 * hierarchies being mounted. It contains a pointer to the root state 732 * for each subsystem. Also used to anchor the list of css_sets. Not 733 * reference-counted, to improve performance when child cgroups 734 * haven't been created. 735 */ 736 struct css_set init_css_set = { 737 .refcount = REFCOUNT_INIT(1), 738 .dom_cset = &init_css_set, 739 .tasks = LIST_HEAD_INIT(init_css_set.tasks), 740 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), 741 .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), 742 .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), 743 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 744 .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), 745 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), 746 747 /* 748 * The following field is re-initialized when this cset gets linked 749 * in cgroup_init(). However, let's initialize the field 750 * statically too so that the default cgroup can be accessed safely 751 * early during boot. 752 */ 753 .dfl_cgrp = &cgrp_dfl_root.cgrp, 754 }; 755 756 static int css_set_count = 1; /* 1 for init_css_set */ 757 758 static bool css_set_threaded(struct css_set *cset) 759 { 760 return cset->dom_cset != cset; 761 } 762 763 /** 764 * css_set_populated - does a css_set contain any tasks? 765 * @cset: target css_set 766 * 767 * css_set_populated() should be the same as !!cset->nr_tasks at steady 768 * state. However, css_set_populated() can be called while a task is being 769 * added to or removed from the linked list before the nr_tasks is 770 * properly updated. Hence, we can't just look at ->nr_tasks here. 771 */ 772 static bool css_set_populated(struct css_set *cset) 773 { 774 lockdep_assert_held(&css_set_lock); 775 776 return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks); 777 } 778 779 /** 780 * cgroup_update_populated - update the populated count of a cgroup 781 * @cgrp: the target cgroup 782 * @populated: inc or dec populated count 783 * 784 * One of the css_sets associated with @cgrp is either getting its first 785 * task or losing the last. Update @cgrp->nr_populated_* accordingly. The 786 * count is propagated towards root so that a given cgroup's 787 * nr_populated_children is zero iff none of its descendants contain any 788 * tasks. 789 * 790 * @cgrp's interface file "cgroup.populated" is zero if both 791 * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and 792 * 1 otherwise. When the sum changes from or to zero, userland is notified 793 * that the content of the interface file has changed. This can be used to 794 * detect when @cgrp and its descendants become populated or empty. 795 */ 796 static void cgroup_update_populated(struct cgroup *cgrp, bool populated) 797 { 798 struct cgroup *child = NULL; 799 int adj = populated ? 1 : -1; 800 801 lockdep_assert_held(&css_set_lock); 802 803 do { 804 bool was_populated = cgroup_is_populated(cgrp); 805 806 if (!child) { 807 cgrp->nr_populated_csets += adj; 808 } else { 809 if (cgroup_is_threaded(child)) 810 cgrp->nr_populated_threaded_children += adj; 811 else 812 cgrp->nr_populated_domain_children += adj; 813 } 814 815 if (was_populated == cgroup_is_populated(cgrp)) 816 break; 817 818 cgroup1_check_for_release(cgrp); 819 TRACE_CGROUP_PATH(notify_populated, cgrp, 820 cgroup_is_populated(cgrp)); 821 cgroup_file_notify(&cgrp->events_file); 822 823 child = cgrp; 824 cgrp = cgroup_parent(cgrp); 825 } while (cgrp); 826 } 827 828 /** 829 * css_set_update_populated - update populated state of a css_set 830 * @cset: target css_set 831 * @populated: whether @cset is populated or depopulated 832 * 833 * @cset is either getting the first task or losing the last. Update the 834 * populated counters of all associated cgroups accordingly. 835 */ 836 static void css_set_update_populated(struct css_set *cset, bool populated) 837 { 838 struct cgrp_cset_link *link; 839 840 lockdep_assert_held(&css_set_lock); 841 842 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) 843 cgroup_update_populated(link->cgrp, populated); 844 } 845 846 /** 847 * css_set_move_task - move a task from one css_set to another 848 * @task: task being moved 849 * @from_cset: css_set @task currently belongs to (may be NULL) 850 * @to_cset: new css_set @task is being moved to (may be NULL) 851 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks 852 * 853 * Move @task from @from_cset to @to_cset. If @task didn't belong to any 854 * css_set, @from_cset can be NULL. If @task is being disassociated 855 * instead of moved, @to_cset can be NULL. 856 * 857 * This function automatically handles populated counter updates and 858 * css_task_iter adjustments but the caller is responsible for managing 859 * @from_cset and @to_cset's reference counts. 860 */ 861 static void css_set_move_task(struct task_struct *task, 862 struct css_set *from_cset, struct css_set *to_cset, 863 bool use_mg_tasks) 864 { 865 lockdep_assert_held(&css_set_lock); 866 867 if (to_cset && !css_set_populated(to_cset)) 868 css_set_update_populated(to_cset, true); 869 870 if (from_cset) { 871 struct css_task_iter *it, *pos; 872 873 WARN_ON_ONCE(list_empty(&task->cg_list)); 874 875 /* 876 * @task is leaving, advance task iterators which are 877 * pointing to it so that they can resume at the next 878 * position. Advancing an iterator might remove it from 879 * the list, use safe walk. See css_task_iter_advance*() 880 * for details. 881 */ 882 list_for_each_entry_safe(it, pos, &from_cset->task_iters, 883 iters_node) 884 if (it->task_pos == &task->cg_list) 885 css_task_iter_advance(it); 886 887 list_del_init(&task->cg_list); 888 if (!css_set_populated(from_cset)) 889 css_set_update_populated(from_cset, false); 890 } else { 891 WARN_ON_ONCE(!list_empty(&task->cg_list)); 892 } 893 894 if (to_cset) { 895 /* 896 * We are synchronized through cgroup_threadgroup_rwsem 897 * against PF_EXITING setting such that we can't race 898 * against cgroup_exit() changing the css_set to 899 * init_css_set and dropping the old one. 900 */ 901 WARN_ON_ONCE(task->flags & PF_EXITING); 902 903 cgroup_move_task(task, to_cset); 904 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : 905 &to_cset->tasks); 906 } 907 } 908 909 /* 910 * hash table for cgroup groups. This improves the performance to find 911 * an existing css_set. This hash doesn't (currently) take into 912 * account cgroups in empty hierarchies. 913 */ 914 #define CSS_SET_HASH_BITS 7 915 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); 916 917 static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) 918 { 919 unsigned long key = 0UL; 920 struct cgroup_subsys *ss; 921 int i; 922 923 for_each_subsys(ss, i) 924 key += (unsigned long)css[i]; 925 key = (key >> 16) ^ key; 926 927 return key; 928 } 929 930 void put_css_set_locked(struct css_set *cset) 931 { 932 struct cgrp_cset_link *link, *tmp_link; 933 struct cgroup_subsys *ss; 934 int ssid; 935 936 lockdep_assert_held(&css_set_lock); 937 938 if (!refcount_dec_and_test(&cset->refcount)) 939 return; 940 941 WARN_ON_ONCE(!list_empty(&cset->threaded_csets)); 942 943 /* This css_set is dead. unlink it and release cgroup and css refs */ 944 for_each_subsys(ss, ssid) { 945 list_del(&cset->e_cset_node[ssid]); 946 css_put(cset->subsys[ssid]); 947 } 948 hash_del(&cset->hlist); 949 css_set_count--; 950 951 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { 952 list_del(&link->cset_link); 953 list_del(&link->cgrp_link); 954 if (cgroup_parent(link->cgrp)) 955 cgroup_put(link->cgrp); 956 kfree(link); 957 } 958 959 if (css_set_threaded(cset)) { 960 list_del(&cset->threaded_csets_node); 961 put_css_set_locked(cset->dom_cset); 962 } 963 964 kfree_rcu(cset, rcu_head); 965 } 966 967 /** 968 * compare_css_sets - helper function for find_existing_css_set(). 969 * @cset: candidate css_set being tested 970 * @old_cset: existing css_set for a task 971 * @new_cgrp: cgroup that's being entered by the task 972 * @template: desired set of css pointers in css_set (pre-calculated) 973 * 974 * Returns true if "cset" matches "old_cset" except for the hierarchy 975 * which "new_cgrp" belongs to, for which it should match "new_cgrp". 976 */ 977 static bool compare_css_sets(struct css_set *cset, 978 struct css_set *old_cset, 979 struct cgroup *new_cgrp, 980 struct cgroup_subsys_state *template[]) 981 { 982 struct cgroup *new_dfl_cgrp; 983 struct list_head *l1, *l2; 984 985 /* 986 * On the default hierarchy, there can be csets which are 987 * associated with the same set of cgroups but different csses. 988 * Let's first ensure that csses match. 989 */ 990 if (memcmp(template, cset->subsys, sizeof(cset->subsys))) 991 return false; 992 993 994 /* @cset's domain should match the default cgroup's */ 995 if (cgroup_on_dfl(new_cgrp)) 996 new_dfl_cgrp = new_cgrp; 997 else 998 new_dfl_cgrp = old_cset->dfl_cgrp; 999 1000 if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp) 1001 return false; 1002 1003 /* 1004 * Compare cgroup pointers in order to distinguish between 1005 * different cgroups in hierarchies. As different cgroups may 1006 * share the same effective css, this comparison is always 1007 * necessary. 1008 */ 1009 l1 = &cset->cgrp_links; 1010 l2 = &old_cset->cgrp_links; 1011 while (1) { 1012 struct cgrp_cset_link *link1, *link2; 1013 struct cgroup *cgrp1, *cgrp2; 1014 1015 l1 = l1->next; 1016 l2 = l2->next; 1017 /* See if we reached the end - both lists are equal length. */ 1018 if (l1 == &cset->cgrp_links) { 1019 BUG_ON(l2 != &old_cset->cgrp_links); 1020 break; 1021 } else { 1022 BUG_ON(l2 == &old_cset->cgrp_links); 1023 } 1024 /* Locate the cgroups associated with these links. */ 1025 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); 1026 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); 1027 cgrp1 = link1->cgrp; 1028 cgrp2 = link2->cgrp; 1029 /* Hierarchies should be linked in the same order. */ 1030 BUG_ON(cgrp1->root != cgrp2->root); 1031 1032 /* 1033 * If this hierarchy is the hierarchy of the cgroup 1034 * that's changing, then we need to check that this 1035 * css_set points to the new cgroup; if it's any other 1036 * hierarchy, then this css_set should point to the 1037 * same cgroup as the old css_set. 1038 */ 1039 if (cgrp1->root == new_cgrp->root) { 1040 if (cgrp1 != new_cgrp) 1041 return false; 1042 } else { 1043 if (cgrp1 != cgrp2) 1044 return false; 1045 } 1046 } 1047 return true; 1048 } 1049 1050 /** 1051 * find_existing_css_set - init css array and find the matching css_set 1052 * @old_cset: the css_set that we're using before the cgroup transition 1053 * @cgrp: the cgroup that we're moving into 1054 * @template: out param for the new set of csses, should be clear on entry 1055 */ 1056 static struct css_set *find_existing_css_set(struct css_set *old_cset, 1057 struct cgroup *cgrp, 1058 struct cgroup_subsys_state *template[]) 1059 { 1060 struct cgroup_root *root = cgrp->root; 1061 struct cgroup_subsys *ss; 1062 struct css_set *cset; 1063 unsigned long key; 1064 int i; 1065 1066 /* 1067 * Build the set of subsystem state objects that we want to see in the 1068 * new css_set. while subsystems can change globally, the entries here 1069 * won't change, so no need for locking. 1070 */ 1071 for_each_subsys(ss, i) { 1072 if (root->subsys_mask & (1UL << i)) { 1073 /* 1074 * @ss is in this hierarchy, so we want the 1075 * effective css from @cgrp. 1076 */ 1077 template[i] = cgroup_e_css_by_mask(cgrp, ss); 1078 } else { 1079 /* 1080 * @ss is not in this hierarchy, so we don't want 1081 * to change the css. 1082 */ 1083 template[i] = old_cset->subsys[i]; 1084 } 1085 } 1086 1087 key = css_set_hash(template); 1088 hash_for_each_possible(css_set_table, cset, hlist, key) { 1089 if (!compare_css_sets(cset, old_cset, cgrp, template)) 1090 continue; 1091 1092 /* This css_set matches what we need */ 1093 return cset; 1094 } 1095 1096 /* No existing cgroup group matched */ 1097 return NULL; 1098 } 1099 1100 static void free_cgrp_cset_links(struct list_head *links_to_free) 1101 { 1102 struct cgrp_cset_link *link, *tmp_link; 1103 1104 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { 1105 list_del(&link->cset_link); 1106 kfree(link); 1107 } 1108 } 1109 1110 /** 1111 * allocate_cgrp_cset_links - allocate cgrp_cset_links 1112 * @count: the number of links to allocate 1113 * @tmp_links: list_head the allocated links are put on 1114 * 1115 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links 1116 * through ->cset_link. Returns 0 on success or -errno. 1117 */ 1118 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) 1119 { 1120 struct cgrp_cset_link *link; 1121 int i; 1122 1123 INIT_LIST_HEAD(tmp_links); 1124 1125 for (i = 0; i < count; i++) { 1126 link = kzalloc(sizeof(*link), GFP_KERNEL); 1127 if (!link) { 1128 free_cgrp_cset_links(tmp_links); 1129 return -ENOMEM; 1130 } 1131 list_add(&link->cset_link, tmp_links); 1132 } 1133 return 0; 1134 } 1135 1136 /** 1137 * link_css_set - a helper function to link a css_set to a cgroup 1138 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() 1139 * @cset: the css_set to be linked 1140 * @cgrp: the destination cgroup 1141 */ 1142 static void link_css_set(struct list_head *tmp_links, struct css_set *cset, 1143 struct cgroup *cgrp) 1144 { 1145 struct cgrp_cset_link *link; 1146 1147 BUG_ON(list_empty(tmp_links)); 1148 1149 if (cgroup_on_dfl(cgrp)) 1150 cset->dfl_cgrp = cgrp; 1151 1152 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); 1153 link->cset = cset; 1154 link->cgrp = cgrp; 1155 1156 /* 1157 * Always add links to the tail of the lists so that the lists are 1158 * in choronological order. 1159 */ 1160 list_move_tail(&link->cset_link, &cgrp->cset_links); 1161 list_add_tail(&link->cgrp_link, &cset->cgrp_links); 1162 1163 if (cgroup_parent(cgrp)) 1164 cgroup_get_live(cgrp); 1165 } 1166 1167 /** 1168 * find_css_set - return a new css_set with one cgroup updated 1169 * @old_cset: the baseline css_set 1170 * @cgrp: the cgroup to be updated 1171 * 1172 * Return a new css_set that's equivalent to @old_cset, but with @cgrp 1173 * substituted into the appropriate hierarchy. 1174 */ 1175 static struct css_set *find_css_set(struct css_set *old_cset, 1176 struct cgroup *cgrp) 1177 { 1178 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; 1179 struct css_set *cset; 1180 struct list_head tmp_links; 1181 struct cgrp_cset_link *link; 1182 struct cgroup_subsys *ss; 1183 unsigned long key; 1184 int ssid; 1185 1186 lockdep_assert_held(&cgroup_mutex); 1187 1188 /* First see if we already have a cgroup group that matches 1189 * the desired set */ 1190 spin_lock_irq(&css_set_lock); 1191 cset = find_existing_css_set(old_cset, cgrp, template); 1192 if (cset) 1193 get_css_set(cset); 1194 spin_unlock_irq(&css_set_lock); 1195 1196 if (cset) 1197 return cset; 1198 1199 cset = kzalloc(sizeof(*cset), GFP_KERNEL); 1200 if (!cset) 1201 return NULL; 1202 1203 /* Allocate all the cgrp_cset_link objects that we'll need */ 1204 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { 1205 kfree(cset); 1206 return NULL; 1207 } 1208 1209 refcount_set(&cset->refcount, 1); 1210 cset->dom_cset = cset; 1211 INIT_LIST_HEAD(&cset->tasks); 1212 INIT_LIST_HEAD(&cset->mg_tasks); 1213 INIT_LIST_HEAD(&cset->task_iters); 1214 INIT_LIST_HEAD(&cset->threaded_csets); 1215 INIT_HLIST_NODE(&cset->hlist); 1216 INIT_LIST_HEAD(&cset->cgrp_links); 1217 INIT_LIST_HEAD(&cset->mg_preload_node); 1218 INIT_LIST_HEAD(&cset->mg_node); 1219 1220 /* Copy the set of subsystem state objects generated in 1221 * find_existing_css_set() */ 1222 memcpy(cset->subsys, template, sizeof(cset->subsys)); 1223 1224 spin_lock_irq(&css_set_lock); 1225 /* Add reference counts and links from the new css_set. */ 1226 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { 1227 struct cgroup *c = link->cgrp; 1228 1229 if (c->root == cgrp->root) 1230 c = cgrp; 1231 link_css_set(&tmp_links, cset, c); 1232 } 1233 1234 BUG_ON(!list_empty(&tmp_links)); 1235 1236 css_set_count++; 1237 1238 /* Add @cset to the hash table */ 1239 key = css_set_hash(cset->subsys); 1240 hash_add(css_set_table, &cset->hlist, key); 1241 1242 for_each_subsys(ss, ssid) { 1243 struct cgroup_subsys_state *css = cset->subsys[ssid]; 1244 1245 list_add_tail(&cset->e_cset_node[ssid], 1246 &css->cgroup->e_csets[ssid]); 1247 css_get(css); 1248 } 1249 1250 spin_unlock_irq(&css_set_lock); 1251 1252 /* 1253 * If @cset should be threaded, look up the matching dom_cset and 1254 * link them up. We first fully initialize @cset then look for the 1255 * dom_cset. It's simpler this way and safe as @cset is guaranteed 1256 * to stay empty until we return. 1257 */ 1258 if (cgroup_is_threaded(cset->dfl_cgrp)) { 1259 struct css_set *dcset; 1260 1261 dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp); 1262 if (!dcset) { 1263 put_css_set(cset); 1264 return NULL; 1265 } 1266 1267 spin_lock_irq(&css_set_lock); 1268 cset->dom_cset = dcset; 1269 list_add_tail(&cset->threaded_csets_node, 1270 &dcset->threaded_csets); 1271 spin_unlock_irq(&css_set_lock); 1272 } 1273 1274 return cset; 1275 } 1276 1277 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) 1278 { 1279 struct cgroup *root_cgrp = kf_root->kn->priv; 1280 1281 return root_cgrp->root; 1282 } 1283 1284 static int cgroup_init_root_id(struct cgroup_root *root) 1285 { 1286 int id; 1287 1288 lockdep_assert_held(&cgroup_mutex); 1289 1290 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); 1291 if (id < 0) 1292 return id; 1293 1294 root->hierarchy_id = id; 1295 return 0; 1296 } 1297 1298 static void cgroup_exit_root_id(struct cgroup_root *root) 1299 { 1300 lockdep_assert_held(&cgroup_mutex); 1301 1302 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); 1303 } 1304 1305 void cgroup_free_root(struct cgroup_root *root) 1306 { 1307 if (root) { 1308 idr_destroy(&root->cgroup_idr); 1309 kfree(root); 1310 } 1311 } 1312 1313 static void cgroup_destroy_root(struct cgroup_root *root) 1314 { 1315 struct cgroup *cgrp = &root->cgrp; 1316 struct cgrp_cset_link *link, *tmp_link; 1317 1318 trace_cgroup_destroy_root(root); 1319 1320 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1321 1322 BUG_ON(atomic_read(&root->nr_cgrps)); 1323 BUG_ON(!list_empty(&cgrp->self.children)); 1324 1325 /* Rebind all subsystems back to the default hierarchy */ 1326 WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask)); 1327 1328 /* 1329 * Release all the links from cset_links to this hierarchy's 1330 * root cgroup 1331 */ 1332 spin_lock_irq(&css_set_lock); 1333 1334 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { 1335 list_del(&link->cset_link); 1336 list_del(&link->cgrp_link); 1337 kfree(link); 1338 } 1339 1340 spin_unlock_irq(&css_set_lock); 1341 1342 if (!list_empty(&root->root_list)) { 1343 list_del(&root->root_list); 1344 cgroup_root_count--; 1345 } 1346 1347 cgroup_exit_root_id(root); 1348 1349 mutex_unlock(&cgroup_mutex); 1350 1351 kernfs_destroy_root(root->kf_root); 1352 cgroup_free_root(root); 1353 } 1354 1355 /* 1356 * look up cgroup associated with current task's cgroup namespace on the 1357 * specified hierarchy 1358 */ 1359 static struct cgroup * 1360 current_cgns_cgroup_from_root(struct cgroup_root *root) 1361 { 1362 struct cgroup *res = NULL; 1363 struct css_set *cset; 1364 1365 lockdep_assert_held(&css_set_lock); 1366 1367 rcu_read_lock(); 1368 1369 cset = current->nsproxy->cgroup_ns->root_cset; 1370 if (cset == &init_css_set) { 1371 res = &root->cgrp; 1372 } else { 1373 struct cgrp_cset_link *link; 1374 1375 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1376 struct cgroup *c = link->cgrp; 1377 1378 if (c->root == root) { 1379 res = c; 1380 break; 1381 } 1382 } 1383 } 1384 rcu_read_unlock(); 1385 1386 BUG_ON(!res); 1387 return res; 1388 } 1389 1390 /* look up cgroup associated with given css_set on the specified hierarchy */ 1391 static struct cgroup *cset_cgroup_from_root(struct css_set *cset, 1392 struct cgroup_root *root) 1393 { 1394 struct cgroup *res = NULL; 1395 1396 lockdep_assert_held(&cgroup_mutex); 1397 lockdep_assert_held(&css_set_lock); 1398 1399 if (cset == &init_css_set) { 1400 res = &root->cgrp; 1401 } else if (root == &cgrp_dfl_root) { 1402 res = cset->dfl_cgrp; 1403 } else { 1404 struct cgrp_cset_link *link; 1405 1406 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1407 struct cgroup *c = link->cgrp; 1408 1409 if (c->root == root) { 1410 res = c; 1411 break; 1412 } 1413 } 1414 } 1415 1416 BUG_ON(!res); 1417 return res; 1418 } 1419 1420 /* 1421 * Return the cgroup for "task" from the given hierarchy. Must be 1422 * called with cgroup_mutex and css_set_lock held. 1423 */ 1424 struct cgroup *task_cgroup_from_root(struct task_struct *task, 1425 struct cgroup_root *root) 1426 { 1427 /* 1428 * No need to lock the task - since we hold cgroup_mutex the 1429 * task can't change groups, so the only thing that can happen 1430 * is that it exits and its css is set back to init_css_set. 1431 */ 1432 return cset_cgroup_from_root(task_css_set(task), root); 1433 } 1434 1435 /* 1436 * A task must hold cgroup_mutex to modify cgroups. 1437 * 1438 * Any task can increment and decrement the count field without lock. 1439 * So in general, code holding cgroup_mutex can't rely on the count 1440 * field not changing. However, if the count goes to zero, then only 1441 * cgroup_attach_task() can increment it again. Because a count of zero 1442 * means that no tasks are currently attached, therefore there is no 1443 * way a task attached to that cgroup can fork (the other way to 1444 * increment the count). So code holding cgroup_mutex can safely 1445 * assume that if the count is zero, it will stay zero. Similarly, if 1446 * a task holds cgroup_mutex on a cgroup with zero count, it 1447 * knows that the cgroup won't be removed, as cgroup_rmdir() 1448 * needs that mutex. 1449 * 1450 * A cgroup can only be deleted if both its 'count' of using tasks 1451 * is zero, and its list of 'children' cgroups is empty. Since all 1452 * tasks in the system use _some_ cgroup, and since there is always at 1453 * least one task in the system (init, pid == 1), therefore, root cgroup 1454 * always has either children cgroups and/or using tasks. So we don't 1455 * need a special hack to ensure that root cgroup cannot be deleted. 1456 * 1457 * P.S. One more locking exception. RCU is used to guard the 1458 * update of a tasks cgroup pointer by cgroup_attach_task() 1459 */ 1460 1461 static struct kernfs_syscall_ops cgroup_kf_syscall_ops; 1462 1463 static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft, 1464 char *buf, bool write_link_name) 1465 { 1466 struct cgroup_subsys *ss = cft->ss; 1467 1468 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && 1469 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { 1470 const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : ""; 1471 1472 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s", 1473 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1474 write_link_name ? cft->link_name : cft->name); 1475 } else { 1476 strscpy(buf, write_link_name ? cft->link_name : cft->name, 1477 CGROUP_FILE_NAME_MAX); 1478 } 1479 return buf; 1480 } 1481 1482 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, 1483 char *buf) 1484 { 1485 return cgroup_fill_name(cgrp, cft, buf, false); 1486 } 1487 1488 static char *cgroup_link_name(struct cgroup *cgrp, const struct cftype *cft, 1489 char *buf) 1490 { 1491 return cgroup_fill_name(cgrp, cft, buf, true); 1492 } 1493 1494 /** 1495 * cgroup_file_mode - deduce file mode of a control file 1496 * @cft: the control file in question 1497 * 1498 * S_IRUGO for read, S_IWUSR for write. 1499 */ 1500 static umode_t cgroup_file_mode(const struct cftype *cft) 1501 { 1502 umode_t mode = 0; 1503 1504 if (cft->read_u64 || cft->read_s64 || cft->seq_show) 1505 mode |= S_IRUGO; 1506 1507 if (cft->write_u64 || cft->write_s64 || cft->write) { 1508 if (cft->flags & CFTYPE_WORLD_WRITABLE) 1509 mode |= S_IWUGO; 1510 else 1511 mode |= S_IWUSR; 1512 } 1513 1514 return mode; 1515 } 1516 1517 /** 1518 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask 1519 * @subtree_control: the new subtree_control mask to consider 1520 * @this_ss_mask: available subsystems 1521 * 1522 * On the default hierarchy, a subsystem may request other subsystems to be 1523 * enabled together through its ->depends_on mask. In such cases, more 1524 * subsystems than specified in "cgroup.subtree_control" may be enabled. 1525 * 1526 * This function calculates which subsystems need to be enabled if 1527 * @subtree_control is to be applied while restricted to @this_ss_mask. 1528 */ 1529 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask) 1530 { 1531 u16 cur_ss_mask = subtree_control; 1532 struct cgroup_subsys *ss; 1533 int ssid; 1534 1535 lockdep_assert_held(&cgroup_mutex); 1536 1537 cur_ss_mask |= cgrp_dfl_implicit_ss_mask; 1538 1539 while (true) { 1540 u16 new_ss_mask = cur_ss_mask; 1541 1542 do_each_subsys_mask(ss, ssid, cur_ss_mask) { 1543 new_ss_mask |= ss->depends_on; 1544 } while_each_subsys_mask(); 1545 1546 /* 1547 * Mask out subsystems which aren't available. This can 1548 * happen only if some depended-upon subsystems were bound 1549 * to non-default hierarchies. 1550 */ 1551 new_ss_mask &= this_ss_mask; 1552 1553 if (new_ss_mask == cur_ss_mask) 1554 break; 1555 cur_ss_mask = new_ss_mask; 1556 } 1557 1558 return cur_ss_mask; 1559 } 1560 1561 /** 1562 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods 1563 * @kn: the kernfs_node being serviced 1564 * 1565 * This helper undoes cgroup_kn_lock_live() and should be invoked before 1566 * the method finishes if locking succeeded. Note that once this function 1567 * returns the cgroup returned by cgroup_kn_lock_live() may become 1568 * inaccessible any time. If the caller intends to continue to access the 1569 * cgroup, it should pin it before invoking this function. 1570 */ 1571 void cgroup_kn_unlock(struct kernfs_node *kn) 1572 { 1573 struct cgroup *cgrp; 1574 1575 if (kernfs_type(kn) == KERNFS_DIR) 1576 cgrp = kn->priv; 1577 else 1578 cgrp = kn->parent->priv; 1579 1580 mutex_unlock(&cgroup_mutex); 1581 1582 kernfs_unbreak_active_protection(kn); 1583 cgroup_put(cgrp); 1584 } 1585 1586 /** 1587 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods 1588 * @kn: the kernfs_node being serviced 1589 * @drain_offline: perform offline draining on the cgroup 1590 * 1591 * This helper is to be used by a cgroup kernfs method currently servicing 1592 * @kn. It breaks the active protection, performs cgroup locking and 1593 * verifies that the associated cgroup is alive. Returns the cgroup if 1594 * alive; otherwise, %NULL. A successful return should be undone by a 1595 * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the 1596 * cgroup is drained of offlining csses before return. 1597 * 1598 * Any cgroup kernfs method implementation which requires locking the 1599 * associated cgroup should use this helper. It avoids nesting cgroup 1600 * locking under kernfs active protection and allows all kernfs operations 1601 * including self-removal. 1602 */ 1603 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline) 1604 { 1605 struct cgroup *cgrp; 1606 1607 if (kernfs_type(kn) == KERNFS_DIR) 1608 cgrp = kn->priv; 1609 else 1610 cgrp = kn->parent->priv; 1611 1612 /* 1613 * We're gonna grab cgroup_mutex which nests outside kernfs 1614 * active_ref. cgroup liveliness check alone provides enough 1615 * protection against removal. Ensure @cgrp stays accessible and 1616 * break the active_ref protection. 1617 */ 1618 if (!cgroup_tryget(cgrp)) 1619 return NULL; 1620 kernfs_break_active_protection(kn); 1621 1622 if (drain_offline) 1623 cgroup_lock_and_drain_offline(cgrp); 1624 else 1625 mutex_lock(&cgroup_mutex); 1626 1627 if (!cgroup_is_dead(cgrp)) 1628 return cgrp; 1629 1630 cgroup_kn_unlock(kn); 1631 return NULL; 1632 } 1633 1634 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) 1635 { 1636 char name[CGROUP_FILE_NAME_MAX]; 1637 1638 lockdep_assert_held(&cgroup_mutex); 1639 1640 if (cft->file_offset) { 1641 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss); 1642 struct cgroup_file *cfile = (void *)css + cft->file_offset; 1643 1644 spin_lock_irq(&cgroup_file_kn_lock); 1645 cfile->kn = NULL; 1646 spin_unlock_irq(&cgroup_file_kn_lock); 1647 1648 del_timer_sync(&cfile->notify_timer); 1649 } 1650 1651 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1652 if (cft->flags & CFTYPE_SYMLINKED) 1653 kernfs_remove_by_name(cgrp->kn, 1654 cgroup_link_name(cgrp, cft, name)); 1655 } 1656 1657 /** 1658 * css_clear_dir - remove subsys files in a cgroup directory 1659 * @css: taget css 1660 */ 1661 static void css_clear_dir(struct cgroup_subsys_state *css) 1662 { 1663 struct cgroup *cgrp = css->cgroup; 1664 struct cftype *cfts; 1665 1666 if (!(css->flags & CSS_VISIBLE)) 1667 return; 1668 1669 css->flags &= ~CSS_VISIBLE; 1670 1671 if (!css->ss) { 1672 if (cgroup_on_dfl(cgrp)) 1673 cfts = cgroup_base_files; 1674 else 1675 cfts = cgroup1_base_files; 1676 1677 cgroup_addrm_files(css, cgrp, cfts, false); 1678 } else { 1679 list_for_each_entry(cfts, &css->ss->cfts, node) 1680 cgroup_addrm_files(css, cgrp, cfts, false); 1681 } 1682 } 1683 1684 /** 1685 * css_populate_dir - create subsys files in a cgroup directory 1686 * @css: target css 1687 * 1688 * On failure, no file is added. 1689 */ 1690 static int css_populate_dir(struct cgroup_subsys_state *css) 1691 { 1692 struct cgroup *cgrp = css->cgroup; 1693 struct cftype *cfts, *failed_cfts; 1694 int ret; 1695 1696 if ((css->flags & CSS_VISIBLE) || !cgrp->kn) 1697 return 0; 1698 1699 if (!css->ss) { 1700 if (cgroup_on_dfl(cgrp)) 1701 cfts = cgroup_base_files; 1702 else 1703 cfts = cgroup1_base_files; 1704 1705 ret = cgroup_addrm_files(&cgrp->self, cgrp, cfts, true); 1706 if (ret < 0) 1707 return ret; 1708 } else { 1709 list_for_each_entry(cfts, &css->ss->cfts, node) { 1710 ret = cgroup_addrm_files(css, cgrp, cfts, true); 1711 if (ret < 0) { 1712 failed_cfts = cfts; 1713 goto err; 1714 } 1715 } 1716 } 1717 1718 css->flags |= CSS_VISIBLE; 1719 1720 return 0; 1721 err: 1722 list_for_each_entry(cfts, &css->ss->cfts, node) { 1723 if (cfts == failed_cfts) 1724 break; 1725 cgroup_addrm_files(css, cgrp, cfts, false); 1726 } 1727 return ret; 1728 } 1729 1730 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) 1731 { 1732 struct cgroup *dcgrp = &dst_root->cgrp; 1733 struct cgroup_subsys *ss; 1734 int ssid, i, ret; 1735 1736 lockdep_assert_held(&cgroup_mutex); 1737 1738 do_each_subsys_mask(ss, ssid, ss_mask) { 1739 /* 1740 * If @ss has non-root csses attached to it, can't move. 1741 * If @ss is an implicit controller, it is exempt from this 1742 * rule and can be stolen. 1743 */ 1744 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) && 1745 !ss->implicit_on_dfl) 1746 return -EBUSY; 1747 1748 /* can't move between two non-dummy roots either */ 1749 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) 1750 return -EBUSY; 1751 } while_each_subsys_mask(); 1752 1753 do_each_subsys_mask(ss, ssid, ss_mask) { 1754 struct cgroup_root *src_root = ss->root; 1755 struct cgroup *scgrp = &src_root->cgrp; 1756 struct cgroup_subsys_state *css = cgroup_css(scgrp, ss); 1757 struct css_set *cset; 1758 1759 WARN_ON(!css || cgroup_css(dcgrp, ss)); 1760 1761 /* disable from the source */ 1762 src_root->subsys_mask &= ~(1 << ssid); 1763 WARN_ON(cgroup_apply_control(scgrp)); 1764 cgroup_finalize_control(scgrp, 0); 1765 1766 /* rebind */ 1767 RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); 1768 rcu_assign_pointer(dcgrp->subsys[ssid], css); 1769 ss->root = dst_root; 1770 css->cgroup = dcgrp; 1771 1772 spin_lock_irq(&css_set_lock); 1773 hash_for_each(css_set_table, i, cset, hlist) 1774 list_move_tail(&cset->e_cset_node[ss->id], 1775 &dcgrp->e_csets[ss->id]); 1776 spin_unlock_irq(&css_set_lock); 1777 1778 /* default hierarchy doesn't enable controllers by default */ 1779 dst_root->subsys_mask |= 1 << ssid; 1780 if (dst_root == &cgrp_dfl_root) { 1781 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]); 1782 } else { 1783 dcgrp->subtree_control |= 1 << ssid; 1784 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]); 1785 } 1786 1787 ret = cgroup_apply_control(dcgrp); 1788 if (ret) 1789 pr_warn("partial failure to rebind %s controller (err=%d)\n", 1790 ss->name, ret); 1791 1792 if (ss->bind) 1793 ss->bind(css); 1794 } while_each_subsys_mask(); 1795 1796 kernfs_activate(dcgrp->kn); 1797 return 0; 1798 } 1799 1800 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, 1801 struct kernfs_root *kf_root) 1802 { 1803 int len = 0; 1804 char *buf = NULL; 1805 struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root); 1806 struct cgroup *ns_cgroup; 1807 1808 buf = kmalloc(PATH_MAX, GFP_KERNEL); 1809 if (!buf) 1810 return -ENOMEM; 1811 1812 spin_lock_irq(&css_set_lock); 1813 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); 1814 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); 1815 spin_unlock_irq(&css_set_lock); 1816 1817 if (len >= PATH_MAX) 1818 len = -ERANGE; 1819 else if (len > 0) { 1820 seq_escape(sf, buf, " \t\n\\"); 1821 len = 0; 1822 } 1823 kfree(buf); 1824 return len; 1825 } 1826 1827 enum cgroup2_param { 1828 Opt_nsdelegate, 1829 Opt_memory_localevents, 1830 nr__cgroup2_params 1831 }; 1832 1833 static const struct fs_parameter_spec cgroup2_param_specs[] = { 1834 fsparam_flag("nsdelegate", Opt_nsdelegate), 1835 fsparam_flag("memory_localevents", Opt_memory_localevents), 1836 {} 1837 }; 1838 1839 static const struct fs_parameter_description cgroup2_fs_parameters = { 1840 .name = "cgroup2", 1841 .specs = cgroup2_param_specs, 1842 }; 1843 1844 static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param) 1845 { 1846 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1847 struct fs_parse_result result; 1848 int opt; 1849 1850 opt = fs_parse(fc, &cgroup2_fs_parameters, param, &result); 1851 if (opt < 0) 1852 return opt; 1853 1854 switch (opt) { 1855 case Opt_nsdelegate: 1856 ctx->flags |= CGRP_ROOT_NS_DELEGATE; 1857 return 0; 1858 case Opt_memory_localevents: 1859 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1860 return 0; 1861 } 1862 return -EINVAL; 1863 } 1864 1865 static void apply_cgroup_root_flags(unsigned int root_flags) 1866 { 1867 if (current->nsproxy->cgroup_ns == &init_cgroup_ns) { 1868 if (root_flags & CGRP_ROOT_NS_DELEGATE) 1869 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; 1870 else 1871 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; 1872 1873 if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1874 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1875 else 1876 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1877 } 1878 } 1879 1880 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 1881 { 1882 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) 1883 seq_puts(seq, ",nsdelegate"); 1884 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1885 seq_puts(seq, ",memory_localevents"); 1886 return 0; 1887 } 1888 1889 static int cgroup_reconfigure(struct fs_context *fc) 1890 { 1891 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1892 1893 apply_cgroup_root_flags(ctx->flags); 1894 return 0; 1895 } 1896 1897 /* 1898 * To reduce the fork() overhead for systems that are not actually using 1899 * their cgroups capability, we don't maintain the lists running through 1900 * each css_set to its tasks until we see the list actually used - in other 1901 * words after the first mount. 1902 */ 1903 static bool use_task_css_set_links __read_mostly; 1904 1905 static void cgroup_enable_task_cg_lists(void) 1906 { 1907 struct task_struct *p, *g; 1908 1909 /* 1910 * We need tasklist_lock because RCU is not safe against 1911 * while_each_thread(). Besides, a forking task that has passed 1912 * cgroup_post_fork() without seeing use_task_css_set_links = 1 1913 * is not guaranteed to have its child immediately visible in the 1914 * tasklist if we walk through it with RCU. 1915 */ 1916 read_lock(&tasklist_lock); 1917 spin_lock_irq(&css_set_lock); 1918 1919 if (use_task_css_set_links) 1920 goto out_unlock; 1921 1922 use_task_css_set_links = true; 1923 1924 do_each_thread(g, p) { 1925 WARN_ON_ONCE(!list_empty(&p->cg_list) || 1926 task_css_set(p) != &init_css_set); 1927 1928 /* 1929 * We should check if the process is exiting, otherwise 1930 * it will race with cgroup_exit() in that the list 1931 * entry won't be deleted though the process has exited. 1932 * Do it while holding siglock so that we don't end up 1933 * racing against cgroup_exit(). 1934 * 1935 * Interrupts were already disabled while acquiring 1936 * the css_set_lock, so we do not need to disable it 1937 * again when acquiring the sighand->siglock here. 1938 */ 1939 spin_lock(&p->sighand->siglock); 1940 if (!(p->flags & PF_EXITING)) { 1941 struct css_set *cset = task_css_set(p); 1942 1943 if (!css_set_populated(cset)) 1944 css_set_update_populated(cset, true); 1945 list_add_tail(&p->cg_list, &cset->tasks); 1946 get_css_set(cset); 1947 cset->nr_tasks++; 1948 } 1949 spin_unlock(&p->sighand->siglock); 1950 } while_each_thread(g, p); 1951 out_unlock: 1952 spin_unlock_irq(&css_set_lock); 1953 read_unlock(&tasklist_lock); 1954 } 1955 1956 static void init_cgroup_housekeeping(struct cgroup *cgrp) 1957 { 1958 struct cgroup_subsys *ss; 1959 int ssid; 1960 1961 INIT_LIST_HEAD(&cgrp->self.sibling); 1962 INIT_LIST_HEAD(&cgrp->self.children); 1963 INIT_LIST_HEAD(&cgrp->cset_links); 1964 INIT_LIST_HEAD(&cgrp->pidlists); 1965 mutex_init(&cgrp->pidlist_mutex); 1966 cgrp->self.cgroup = cgrp; 1967 cgrp->self.flags |= CSS_ONLINE; 1968 cgrp->dom_cgrp = cgrp; 1969 cgrp->max_descendants = INT_MAX; 1970 cgrp->max_depth = INT_MAX; 1971 INIT_LIST_HEAD(&cgrp->rstat_css_list); 1972 prev_cputime_init(&cgrp->prev_cputime); 1973 1974 for_each_subsys(ss, ssid) 1975 INIT_LIST_HEAD(&cgrp->e_csets[ssid]); 1976 1977 init_waitqueue_head(&cgrp->offline_waitq); 1978 INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent); 1979 } 1980 1981 void init_cgroup_root(struct cgroup_fs_context *ctx) 1982 { 1983 struct cgroup_root *root = ctx->root; 1984 struct cgroup *cgrp = &root->cgrp; 1985 1986 INIT_LIST_HEAD(&root->root_list); 1987 atomic_set(&root->nr_cgrps, 1); 1988 cgrp->root = root; 1989 init_cgroup_housekeeping(cgrp); 1990 idr_init(&root->cgroup_idr); 1991 1992 root->flags = ctx->flags; 1993 if (ctx->release_agent) 1994 strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX); 1995 if (ctx->name) 1996 strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN); 1997 if (ctx->cpuset_clone_children) 1998 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1999 } 2000 2001 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) 2002 { 2003 LIST_HEAD(tmp_links); 2004 struct cgroup *root_cgrp = &root->cgrp; 2005 struct kernfs_syscall_ops *kf_sops; 2006 struct css_set *cset; 2007 int i, ret; 2008 2009 lockdep_assert_held(&cgroup_mutex); 2010 2011 ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL); 2012 if (ret < 0) 2013 goto out; 2014 root_cgrp->id = ret; 2015 root_cgrp->ancestor_ids[0] = ret; 2016 2017 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 2018 0, GFP_KERNEL); 2019 if (ret) 2020 goto out; 2021 2022 /* 2023 * We're accessing css_set_count without locking css_set_lock here, 2024 * but that's OK - it can only be increased by someone holding 2025 * cgroup_lock, and that's us. Later rebinding may disable 2026 * controllers on the default hierarchy and thus create new csets, 2027 * which can't be more than the existing ones. Allocate 2x. 2028 */ 2029 ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links); 2030 if (ret) 2031 goto cancel_ref; 2032 2033 ret = cgroup_init_root_id(root); 2034 if (ret) 2035 goto cancel_ref; 2036 2037 kf_sops = root == &cgrp_dfl_root ? 2038 &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; 2039 2040 root->kf_root = kernfs_create_root(kf_sops, 2041 KERNFS_ROOT_CREATE_DEACTIVATED | 2042 KERNFS_ROOT_SUPPORT_EXPORTOP, 2043 root_cgrp); 2044 if (IS_ERR(root->kf_root)) { 2045 ret = PTR_ERR(root->kf_root); 2046 goto exit_root_id; 2047 } 2048 root_cgrp->kn = root->kf_root->kn; 2049 2050 ret = css_populate_dir(&root_cgrp->self); 2051 if (ret) 2052 goto destroy_root; 2053 2054 ret = rebind_subsystems(root, ss_mask); 2055 if (ret) 2056 goto destroy_root; 2057 2058 ret = cgroup_bpf_inherit(root_cgrp); 2059 WARN_ON_ONCE(ret); 2060 2061 trace_cgroup_setup_root(root); 2062 2063 /* 2064 * There must be no failure case after here, since rebinding takes 2065 * care of subsystems' refcounts, which are explicitly dropped in 2066 * the failure exit path. 2067 */ 2068 list_add(&root->root_list, &cgroup_roots); 2069 cgroup_root_count++; 2070 2071 /* 2072 * Link the root cgroup in this hierarchy into all the css_set 2073 * objects. 2074 */ 2075 spin_lock_irq(&css_set_lock); 2076 hash_for_each(css_set_table, i, cset, hlist) { 2077 link_css_set(&tmp_links, cset, root_cgrp); 2078 if (css_set_populated(cset)) 2079 cgroup_update_populated(root_cgrp, true); 2080 } 2081 spin_unlock_irq(&css_set_lock); 2082 2083 BUG_ON(!list_empty(&root_cgrp->self.children)); 2084 BUG_ON(atomic_read(&root->nr_cgrps) != 1); 2085 2086 kernfs_activate(root_cgrp->kn); 2087 ret = 0; 2088 goto out; 2089 2090 destroy_root: 2091 kernfs_destroy_root(root->kf_root); 2092 root->kf_root = NULL; 2093 exit_root_id: 2094 cgroup_exit_root_id(root); 2095 cancel_ref: 2096 percpu_ref_exit(&root_cgrp->self.refcnt); 2097 out: 2098 free_cgrp_cset_links(&tmp_links); 2099 return ret; 2100 } 2101 2102 int cgroup_do_get_tree(struct fs_context *fc) 2103 { 2104 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2105 int ret; 2106 2107 ctx->kfc.root = ctx->root->kf_root; 2108 if (fc->fs_type == &cgroup2_fs_type) 2109 ctx->kfc.magic = CGROUP2_SUPER_MAGIC; 2110 else 2111 ctx->kfc.magic = CGROUP_SUPER_MAGIC; 2112 ret = kernfs_get_tree(fc); 2113 2114 /* 2115 * In non-init cgroup namespace, instead of root cgroup's dentry, 2116 * we return the dentry corresponding to the cgroupns->root_cgrp. 2117 */ 2118 if (!ret && ctx->ns != &init_cgroup_ns) { 2119 struct dentry *nsdentry; 2120 struct super_block *sb = fc->root->d_sb; 2121 struct cgroup *cgrp; 2122 2123 mutex_lock(&cgroup_mutex); 2124 spin_lock_irq(&css_set_lock); 2125 2126 cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root); 2127 2128 spin_unlock_irq(&css_set_lock); 2129 mutex_unlock(&cgroup_mutex); 2130 2131 nsdentry = kernfs_node_dentry(cgrp->kn, sb); 2132 dput(fc->root); 2133 fc->root = nsdentry; 2134 if (IS_ERR(nsdentry)) { 2135 ret = PTR_ERR(nsdentry); 2136 deactivate_locked_super(sb); 2137 } 2138 } 2139 2140 if (!ctx->kfc.new_sb_created) 2141 cgroup_put(&ctx->root->cgrp); 2142 2143 return ret; 2144 } 2145 2146 /* 2147 * Destroy a cgroup filesystem context. 2148 */ 2149 static void cgroup_fs_context_free(struct fs_context *fc) 2150 { 2151 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2152 2153 kfree(ctx->name); 2154 kfree(ctx->release_agent); 2155 put_cgroup_ns(ctx->ns); 2156 kernfs_free_fs_context(fc); 2157 kfree(ctx); 2158 } 2159 2160 static int cgroup_get_tree(struct fs_context *fc) 2161 { 2162 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2163 int ret; 2164 2165 cgrp_dfl_visible = true; 2166 cgroup_get_live(&cgrp_dfl_root.cgrp); 2167 ctx->root = &cgrp_dfl_root; 2168 2169 ret = cgroup_do_get_tree(fc); 2170 if (!ret) 2171 apply_cgroup_root_flags(ctx->flags); 2172 return ret; 2173 } 2174 2175 static const struct fs_context_operations cgroup_fs_context_ops = { 2176 .free = cgroup_fs_context_free, 2177 .parse_param = cgroup2_parse_param, 2178 .get_tree = cgroup_get_tree, 2179 .reconfigure = cgroup_reconfigure, 2180 }; 2181 2182 static const struct fs_context_operations cgroup1_fs_context_ops = { 2183 .free = cgroup_fs_context_free, 2184 .parse_param = cgroup1_parse_param, 2185 .get_tree = cgroup1_get_tree, 2186 .reconfigure = cgroup1_reconfigure, 2187 }; 2188 2189 /* 2190 * Initialise the cgroup filesystem creation/reconfiguration context. Notably, 2191 * we select the namespace we're going to use. 2192 */ 2193 static int cgroup_init_fs_context(struct fs_context *fc) 2194 { 2195 struct cgroup_fs_context *ctx; 2196 2197 ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL); 2198 if (!ctx) 2199 return -ENOMEM; 2200 2201 /* 2202 * The first time anyone tries to mount a cgroup, enable the list 2203 * linking each css_set to its tasks and fix up all existing tasks. 2204 */ 2205 if (!use_task_css_set_links) 2206 cgroup_enable_task_cg_lists(); 2207 2208 ctx->ns = current->nsproxy->cgroup_ns; 2209 get_cgroup_ns(ctx->ns); 2210 fc->fs_private = &ctx->kfc; 2211 if (fc->fs_type == &cgroup2_fs_type) 2212 fc->ops = &cgroup_fs_context_ops; 2213 else 2214 fc->ops = &cgroup1_fs_context_ops; 2215 if (fc->user_ns) 2216 put_user_ns(fc->user_ns); 2217 fc->user_ns = get_user_ns(ctx->ns->user_ns); 2218 fc->global = true; 2219 return 0; 2220 } 2221 2222 static void cgroup_kill_sb(struct super_block *sb) 2223 { 2224 struct kernfs_root *kf_root = kernfs_root_from_sb(sb); 2225 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 2226 2227 /* 2228 * If @root doesn't have any children, start killing it. 2229 * This prevents new mounts by disabling percpu_ref_tryget_live(). 2230 * cgroup_mount() may wait for @root's release. 2231 * 2232 * And don't kill the default root. 2233 */ 2234 if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && 2235 !percpu_ref_is_dying(&root->cgrp.self.refcnt)) 2236 percpu_ref_kill(&root->cgrp.self.refcnt); 2237 cgroup_put(&root->cgrp); 2238 kernfs_kill_sb(sb); 2239 } 2240 2241 struct file_system_type cgroup_fs_type = { 2242 .name = "cgroup", 2243 .init_fs_context = cgroup_init_fs_context, 2244 .parameters = &cgroup1_fs_parameters, 2245 .kill_sb = cgroup_kill_sb, 2246 .fs_flags = FS_USERNS_MOUNT, 2247 }; 2248 2249 static struct file_system_type cgroup2_fs_type = { 2250 .name = "cgroup2", 2251 .init_fs_context = cgroup_init_fs_context, 2252 .parameters = &cgroup2_fs_parameters, 2253 .kill_sb = cgroup_kill_sb, 2254 .fs_flags = FS_USERNS_MOUNT, 2255 }; 2256 2257 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, 2258 struct cgroup_namespace *ns) 2259 { 2260 struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root); 2261 2262 return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen); 2263 } 2264 2265 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 2266 struct cgroup_namespace *ns) 2267 { 2268 int ret; 2269 2270 mutex_lock(&cgroup_mutex); 2271 spin_lock_irq(&css_set_lock); 2272 2273 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); 2274 2275 spin_unlock_irq(&css_set_lock); 2276 mutex_unlock(&cgroup_mutex); 2277 2278 return ret; 2279 } 2280 EXPORT_SYMBOL_GPL(cgroup_path_ns); 2281 2282 /** 2283 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy 2284 * @task: target task 2285 * @buf: the buffer to write the path into 2286 * @buflen: the length of the buffer 2287 * 2288 * Determine @task's cgroup on the first (the one with the lowest non-zero 2289 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This 2290 * function grabs cgroup_mutex and shouldn't be used inside locks used by 2291 * cgroup controller callbacks. 2292 * 2293 * Return value is the same as kernfs_path(). 2294 */ 2295 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) 2296 { 2297 struct cgroup_root *root; 2298 struct cgroup *cgrp; 2299 int hierarchy_id = 1; 2300 int ret; 2301 2302 mutex_lock(&cgroup_mutex); 2303 spin_lock_irq(&css_set_lock); 2304 2305 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); 2306 2307 if (root) { 2308 cgrp = task_cgroup_from_root(task, root); 2309 ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns); 2310 } else { 2311 /* if no hierarchy exists, everyone is in "/" */ 2312 ret = strlcpy(buf, "/", buflen); 2313 } 2314 2315 spin_unlock_irq(&css_set_lock); 2316 mutex_unlock(&cgroup_mutex); 2317 return ret; 2318 } 2319 EXPORT_SYMBOL_GPL(task_cgroup_path); 2320 2321 /** 2322 * cgroup_migrate_add_task - add a migration target task to a migration context 2323 * @task: target task 2324 * @mgctx: target migration context 2325 * 2326 * Add @task, which is a migration target, to @mgctx->tset. This function 2327 * becomes noop if @task doesn't need to be migrated. @task's css_set 2328 * should have been added as a migration source and @task->cg_list will be 2329 * moved from the css_set's tasks list to mg_tasks one. 2330 */ 2331 static void cgroup_migrate_add_task(struct task_struct *task, 2332 struct cgroup_mgctx *mgctx) 2333 { 2334 struct css_set *cset; 2335 2336 lockdep_assert_held(&css_set_lock); 2337 2338 /* @task either already exited or can't exit until the end */ 2339 if (task->flags & PF_EXITING) 2340 return; 2341 2342 /* leave @task alone if post_fork() hasn't linked it yet */ 2343 if (list_empty(&task->cg_list)) 2344 return; 2345 2346 cset = task_css_set(task); 2347 if (!cset->mg_src_cgrp) 2348 return; 2349 2350 mgctx->tset.nr_tasks++; 2351 2352 list_move_tail(&task->cg_list, &cset->mg_tasks); 2353 if (list_empty(&cset->mg_node)) 2354 list_add_tail(&cset->mg_node, 2355 &mgctx->tset.src_csets); 2356 if (list_empty(&cset->mg_dst_cset->mg_node)) 2357 list_add_tail(&cset->mg_dst_cset->mg_node, 2358 &mgctx->tset.dst_csets); 2359 } 2360 2361 /** 2362 * cgroup_taskset_first - reset taskset and return the first task 2363 * @tset: taskset of interest 2364 * @dst_cssp: output variable for the destination css 2365 * 2366 * @tset iteration is initialized and the first task is returned. 2367 */ 2368 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 2369 struct cgroup_subsys_state **dst_cssp) 2370 { 2371 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); 2372 tset->cur_task = NULL; 2373 2374 return cgroup_taskset_next(tset, dst_cssp); 2375 } 2376 2377 /** 2378 * cgroup_taskset_next - iterate to the next task in taskset 2379 * @tset: taskset of interest 2380 * @dst_cssp: output variable for the destination css 2381 * 2382 * Return the next task in @tset. Iteration must have been initialized 2383 * with cgroup_taskset_first(). 2384 */ 2385 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 2386 struct cgroup_subsys_state **dst_cssp) 2387 { 2388 struct css_set *cset = tset->cur_cset; 2389 struct task_struct *task = tset->cur_task; 2390 2391 while (&cset->mg_node != tset->csets) { 2392 if (!task) 2393 task = list_first_entry(&cset->mg_tasks, 2394 struct task_struct, cg_list); 2395 else 2396 task = list_next_entry(task, cg_list); 2397 2398 if (&task->cg_list != &cset->mg_tasks) { 2399 tset->cur_cset = cset; 2400 tset->cur_task = task; 2401 2402 /* 2403 * This function may be called both before and 2404 * after cgroup_taskset_migrate(). The two cases 2405 * can be distinguished by looking at whether @cset 2406 * has its ->mg_dst_cset set. 2407 */ 2408 if (cset->mg_dst_cset) 2409 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid]; 2410 else 2411 *dst_cssp = cset->subsys[tset->ssid]; 2412 2413 return task; 2414 } 2415 2416 cset = list_next_entry(cset, mg_node); 2417 task = NULL; 2418 } 2419 2420 return NULL; 2421 } 2422 2423 /** 2424 * cgroup_taskset_migrate - migrate a taskset 2425 * @mgctx: migration context 2426 * 2427 * Migrate tasks in @mgctx as setup by migration preparation functions. 2428 * This function fails iff one of the ->can_attach callbacks fails and 2429 * guarantees that either all or none of the tasks in @mgctx are migrated. 2430 * @mgctx is consumed regardless of success. 2431 */ 2432 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) 2433 { 2434 struct cgroup_taskset *tset = &mgctx->tset; 2435 struct cgroup_subsys *ss; 2436 struct task_struct *task, *tmp_task; 2437 struct css_set *cset, *tmp_cset; 2438 int ssid, failed_ssid, ret; 2439 2440 /* check that we can legitimately attach to the cgroup */ 2441 if (tset->nr_tasks) { 2442 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2443 if (ss->can_attach) { 2444 tset->ssid = ssid; 2445 ret = ss->can_attach(tset); 2446 if (ret) { 2447 failed_ssid = ssid; 2448 goto out_cancel_attach; 2449 } 2450 } 2451 } while_each_subsys_mask(); 2452 } 2453 2454 /* 2455 * Now that we're guaranteed success, proceed to move all tasks to 2456 * the new cgroup. There are no failure cases after here, so this 2457 * is the commit point. 2458 */ 2459 spin_lock_irq(&css_set_lock); 2460 list_for_each_entry(cset, &tset->src_csets, mg_node) { 2461 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { 2462 struct css_set *from_cset = task_css_set(task); 2463 struct css_set *to_cset = cset->mg_dst_cset; 2464 2465 get_css_set(to_cset); 2466 to_cset->nr_tasks++; 2467 css_set_move_task(task, from_cset, to_cset, true); 2468 from_cset->nr_tasks--; 2469 /* 2470 * If the source or destination cgroup is frozen, 2471 * the task might require to change its state. 2472 */ 2473 cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp, 2474 to_cset->dfl_cgrp); 2475 put_css_set_locked(from_cset); 2476 2477 } 2478 } 2479 spin_unlock_irq(&css_set_lock); 2480 2481 /* 2482 * Migration is committed, all target tasks are now on dst_csets. 2483 * Nothing is sensitive to fork() after this point. Notify 2484 * controllers that migration is complete. 2485 */ 2486 tset->csets = &tset->dst_csets; 2487 2488 if (tset->nr_tasks) { 2489 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2490 if (ss->attach) { 2491 tset->ssid = ssid; 2492 ss->attach(tset); 2493 } 2494 } while_each_subsys_mask(); 2495 } 2496 2497 ret = 0; 2498 goto out_release_tset; 2499 2500 out_cancel_attach: 2501 if (tset->nr_tasks) { 2502 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2503 if (ssid == failed_ssid) 2504 break; 2505 if (ss->cancel_attach) { 2506 tset->ssid = ssid; 2507 ss->cancel_attach(tset); 2508 } 2509 } while_each_subsys_mask(); 2510 } 2511 out_release_tset: 2512 spin_lock_irq(&css_set_lock); 2513 list_splice_init(&tset->dst_csets, &tset->src_csets); 2514 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { 2515 list_splice_tail_init(&cset->mg_tasks, &cset->tasks); 2516 list_del_init(&cset->mg_node); 2517 } 2518 spin_unlock_irq(&css_set_lock); 2519 2520 /* 2521 * Re-initialize the cgroup_taskset structure in case it is reused 2522 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() 2523 * iteration. 2524 */ 2525 tset->nr_tasks = 0; 2526 tset->csets = &tset->src_csets; 2527 return ret; 2528 } 2529 2530 /** 2531 * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination 2532 * @dst_cgrp: destination cgroup to test 2533 * 2534 * On the default hierarchy, except for the mixable, (possible) thread root 2535 * and threaded cgroups, subtree_control must be zero for migration 2536 * destination cgroups with tasks so that child cgroups don't compete 2537 * against tasks. 2538 */ 2539 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) 2540 { 2541 /* v1 doesn't have any restriction */ 2542 if (!cgroup_on_dfl(dst_cgrp)) 2543 return 0; 2544 2545 /* verify @dst_cgrp can host resources */ 2546 if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp)) 2547 return -EOPNOTSUPP; 2548 2549 /* mixables don't care */ 2550 if (cgroup_is_mixable(dst_cgrp)) 2551 return 0; 2552 2553 /* 2554 * If @dst_cgrp is already or can become a thread root or is 2555 * threaded, it doesn't matter. 2556 */ 2557 if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp)) 2558 return 0; 2559 2560 /* apply no-internal-process constraint */ 2561 if (dst_cgrp->subtree_control) 2562 return -EBUSY; 2563 2564 return 0; 2565 } 2566 2567 /** 2568 * cgroup_migrate_finish - cleanup after attach 2569 * @mgctx: migration context 2570 * 2571 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See 2572 * those functions for details. 2573 */ 2574 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) 2575 { 2576 LIST_HEAD(preloaded); 2577 struct css_set *cset, *tmp_cset; 2578 2579 lockdep_assert_held(&cgroup_mutex); 2580 2581 spin_lock_irq(&css_set_lock); 2582 2583 list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded); 2584 list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded); 2585 2586 list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) { 2587 cset->mg_src_cgrp = NULL; 2588 cset->mg_dst_cgrp = NULL; 2589 cset->mg_dst_cset = NULL; 2590 list_del_init(&cset->mg_preload_node); 2591 put_css_set_locked(cset); 2592 } 2593 2594 spin_unlock_irq(&css_set_lock); 2595 } 2596 2597 /** 2598 * cgroup_migrate_add_src - add a migration source css_set 2599 * @src_cset: the source css_set to add 2600 * @dst_cgrp: the destination cgroup 2601 * @mgctx: migration context 2602 * 2603 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin 2604 * @src_cset and add it to @mgctx->src_csets, which should later be cleaned 2605 * up by cgroup_migrate_finish(). 2606 * 2607 * This function may be called without holding cgroup_threadgroup_rwsem 2608 * even if the target is a process. Threads may be created and destroyed 2609 * but as long as cgroup_mutex is not dropped, no new css_set can be put 2610 * into play and the preloaded css_sets are guaranteed to cover all 2611 * migrations. 2612 */ 2613 void cgroup_migrate_add_src(struct css_set *src_cset, 2614 struct cgroup *dst_cgrp, 2615 struct cgroup_mgctx *mgctx) 2616 { 2617 struct cgroup *src_cgrp; 2618 2619 lockdep_assert_held(&cgroup_mutex); 2620 lockdep_assert_held(&css_set_lock); 2621 2622 /* 2623 * If ->dead, @src_set is associated with one or more dead cgroups 2624 * and doesn't contain any migratable tasks. Ignore it early so 2625 * that the rest of migration path doesn't get confused by it. 2626 */ 2627 if (src_cset->dead) 2628 return; 2629 2630 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); 2631 2632 if (!list_empty(&src_cset->mg_preload_node)) 2633 return; 2634 2635 WARN_ON(src_cset->mg_src_cgrp); 2636 WARN_ON(src_cset->mg_dst_cgrp); 2637 WARN_ON(!list_empty(&src_cset->mg_tasks)); 2638 WARN_ON(!list_empty(&src_cset->mg_node)); 2639 2640 src_cset->mg_src_cgrp = src_cgrp; 2641 src_cset->mg_dst_cgrp = dst_cgrp; 2642 get_css_set(src_cset); 2643 list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets); 2644 } 2645 2646 /** 2647 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration 2648 * @mgctx: migration context 2649 * 2650 * Tasks are about to be moved and all the source css_sets have been 2651 * preloaded to @mgctx->preloaded_src_csets. This function looks up and 2652 * pins all destination css_sets, links each to its source, and append them 2653 * to @mgctx->preloaded_dst_csets. 2654 * 2655 * This function must be called after cgroup_migrate_add_src() has been 2656 * called on each migration source css_set. After migration is performed 2657 * using cgroup_migrate(), cgroup_migrate_finish() must be called on 2658 * @mgctx. 2659 */ 2660 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) 2661 { 2662 struct css_set *src_cset, *tmp_cset; 2663 2664 lockdep_assert_held(&cgroup_mutex); 2665 2666 /* look up the dst cset for each src cset and link it to src */ 2667 list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, 2668 mg_preload_node) { 2669 struct css_set *dst_cset; 2670 struct cgroup_subsys *ss; 2671 int ssid; 2672 2673 dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); 2674 if (!dst_cset) 2675 return -ENOMEM; 2676 2677 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); 2678 2679 /* 2680 * If src cset equals dst, it's noop. Drop the src. 2681 * cgroup_migrate() will skip the cset too. Note that we 2682 * can't handle src == dst as some nodes are used by both. 2683 */ 2684 if (src_cset == dst_cset) { 2685 src_cset->mg_src_cgrp = NULL; 2686 src_cset->mg_dst_cgrp = NULL; 2687 list_del_init(&src_cset->mg_preload_node); 2688 put_css_set(src_cset); 2689 put_css_set(dst_cset); 2690 continue; 2691 } 2692 2693 src_cset->mg_dst_cset = dst_cset; 2694 2695 if (list_empty(&dst_cset->mg_preload_node)) 2696 list_add_tail(&dst_cset->mg_preload_node, 2697 &mgctx->preloaded_dst_csets); 2698 else 2699 put_css_set(dst_cset); 2700 2701 for_each_subsys(ss, ssid) 2702 if (src_cset->subsys[ssid] != dst_cset->subsys[ssid]) 2703 mgctx->ss_mask |= 1 << ssid; 2704 } 2705 2706 return 0; 2707 } 2708 2709 /** 2710 * cgroup_migrate - migrate a process or task to a cgroup 2711 * @leader: the leader of the process or the task to migrate 2712 * @threadgroup: whether @leader points to the whole process or a single task 2713 * @mgctx: migration context 2714 * 2715 * Migrate a process or task denoted by @leader. If migrating a process, 2716 * the caller must be holding cgroup_threadgroup_rwsem. The caller is also 2717 * responsible for invoking cgroup_migrate_add_src() and 2718 * cgroup_migrate_prepare_dst() on the targets before invoking this 2719 * function and following up with cgroup_migrate_finish(). 2720 * 2721 * As long as a controller's ->can_attach() doesn't fail, this function is 2722 * guaranteed to succeed. This means that, excluding ->can_attach() 2723 * failure, when migrating multiple targets, the success or failure can be 2724 * decided for all targets by invoking group_migrate_prepare_dst() before 2725 * actually starting migrating. 2726 */ 2727 int cgroup_migrate(struct task_struct *leader, bool threadgroup, 2728 struct cgroup_mgctx *mgctx) 2729 { 2730 struct task_struct *task; 2731 2732 /* 2733 * Prevent freeing of tasks while we take a snapshot. Tasks that are 2734 * already PF_EXITING could be freed from underneath us unless we 2735 * take an rcu_read_lock. 2736 */ 2737 spin_lock_irq(&css_set_lock); 2738 rcu_read_lock(); 2739 task = leader; 2740 do { 2741 cgroup_migrate_add_task(task, mgctx); 2742 if (!threadgroup) 2743 break; 2744 } while_each_thread(leader, task); 2745 rcu_read_unlock(); 2746 spin_unlock_irq(&css_set_lock); 2747 2748 return cgroup_migrate_execute(mgctx); 2749 } 2750 2751 /** 2752 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup 2753 * @dst_cgrp: the cgroup to attach to 2754 * @leader: the task or the leader of the threadgroup to be attached 2755 * @threadgroup: attach the whole threadgroup? 2756 * 2757 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. 2758 */ 2759 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, 2760 bool threadgroup) 2761 { 2762 DEFINE_CGROUP_MGCTX(mgctx); 2763 struct task_struct *task; 2764 int ret; 2765 2766 ret = cgroup_migrate_vet_dst(dst_cgrp); 2767 if (ret) 2768 return ret; 2769 2770 /* look up all src csets */ 2771 spin_lock_irq(&css_set_lock); 2772 rcu_read_lock(); 2773 task = leader; 2774 do { 2775 cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); 2776 if (!threadgroup) 2777 break; 2778 } while_each_thread(leader, task); 2779 rcu_read_unlock(); 2780 spin_unlock_irq(&css_set_lock); 2781 2782 /* prepare dst csets and commit */ 2783 ret = cgroup_migrate_prepare_dst(&mgctx); 2784 if (!ret) 2785 ret = cgroup_migrate(leader, threadgroup, &mgctx); 2786 2787 cgroup_migrate_finish(&mgctx); 2788 2789 if (!ret) 2790 TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); 2791 2792 return ret; 2793 } 2794 2795 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup) 2796 __acquires(&cgroup_threadgroup_rwsem) 2797 { 2798 struct task_struct *tsk; 2799 pid_t pid; 2800 2801 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2802 return ERR_PTR(-EINVAL); 2803 2804 percpu_down_write(&cgroup_threadgroup_rwsem); 2805 2806 rcu_read_lock(); 2807 if (pid) { 2808 tsk = find_task_by_vpid(pid); 2809 if (!tsk) { 2810 tsk = ERR_PTR(-ESRCH); 2811 goto out_unlock_threadgroup; 2812 } 2813 } else { 2814 tsk = current; 2815 } 2816 2817 if (threadgroup) 2818 tsk = tsk->group_leader; 2819 2820 /* 2821 * kthreads may acquire PF_NO_SETAFFINITY during initialization. 2822 * If userland migrates such a kthread to a non-root cgroup, it can 2823 * become trapped in a cpuset, or RT kthread may be born in a 2824 * cgroup with no rt_runtime allocated. Just say no. 2825 */ 2826 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { 2827 tsk = ERR_PTR(-EINVAL); 2828 goto out_unlock_threadgroup; 2829 } 2830 2831 get_task_struct(tsk); 2832 goto out_unlock_rcu; 2833 2834 out_unlock_threadgroup: 2835 percpu_up_write(&cgroup_threadgroup_rwsem); 2836 out_unlock_rcu: 2837 rcu_read_unlock(); 2838 return tsk; 2839 } 2840 2841 void cgroup_procs_write_finish(struct task_struct *task) 2842 __releases(&cgroup_threadgroup_rwsem) 2843 { 2844 struct cgroup_subsys *ss; 2845 int ssid; 2846 2847 /* release reference from cgroup_procs_write_start() */ 2848 put_task_struct(task); 2849 2850 percpu_up_write(&cgroup_threadgroup_rwsem); 2851 for_each_subsys(ss, ssid) 2852 if (ss->post_attach) 2853 ss->post_attach(); 2854 } 2855 2856 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask) 2857 { 2858 struct cgroup_subsys *ss; 2859 bool printed = false; 2860 int ssid; 2861 2862 do_each_subsys_mask(ss, ssid, ss_mask) { 2863 if (printed) 2864 seq_putc(seq, ' '); 2865 seq_printf(seq, "%s", ss->name); 2866 printed = true; 2867 } while_each_subsys_mask(); 2868 if (printed) 2869 seq_putc(seq, '\n'); 2870 } 2871 2872 /* show controllers which are enabled from the parent */ 2873 static int cgroup_controllers_show(struct seq_file *seq, void *v) 2874 { 2875 struct cgroup *cgrp = seq_css(seq)->cgroup; 2876 2877 cgroup_print_ss_mask(seq, cgroup_control(cgrp)); 2878 return 0; 2879 } 2880 2881 /* show controllers which are enabled for a given cgroup's children */ 2882 static int cgroup_subtree_control_show(struct seq_file *seq, void *v) 2883 { 2884 struct cgroup *cgrp = seq_css(seq)->cgroup; 2885 2886 cgroup_print_ss_mask(seq, cgrp->subtree_control); 2887 return 0; 2888 } 2889 2890 /** 2891 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy 2892 * @cgrp: root of the subtree to update csses for 2893 * 2894 * @cgrp's control masks have changed and its subtree's css associations 2895 * need to be updated accordingly. This function looks up all css_sets 2896 * which are attached to the subtree, creates the matching updated css_sets 2897 * and migrates the tasks to the new ones. 2898 */ 2899 static int cgroup_update_dfl_csses(struct cgroup *cgrp) 2900 { 2901 DEFINE_CGROUP_MGCTX(mgctx); 2902 struct cgroup_subsys_state *d_css; 2903 struct cgroup *dsct; 2904 struct css_set *src_cset; 2905 int ret; 2906 2907 lockdep_assert_held(&cgroup_mutex); 2908 2909 percpu_down_write(&cgroup_threadgroup_rwsem); 2910 2911 /* look up all csses currently attached to @cgrp's subtree */ 2912 spin_lock_irq(&css_set_lock); 2913 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2914 struct cgrp_cset_link *link; 2915 2916 list_for_each_entry(link, &dsct->cset_links, cset_link) 2917 cgroup_migrate_add_src(link->cset, dsct, &mgctx); 2918 } 2919 spin_unlock_irq(&css_set_lock); 2920 2921 /* NULL dst indicates self on default hierarchy */ 2922 ret = cgroup_migrate_prepare_dst(&mgctx); 2923 if (ret) 2924 goto out_finish; 2925 2926 spin_lock_irq(&css_set_lock); 2927 list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) { 2928 struct task_struct *task, *ntask; 2929 2930 /* all tasks in src_csets need to be migrated */ 2931 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) 2932 cgroup_migrate_add_task(task, &mgctx); 2933 } 2934 spin_unlock_irq(&css_set_lock); 2935 2936 ret = cgroup_migrate_execute(&mgctx); 2937 out_finish: 2938 cgroup_migrate_finish(&mgctx); 2939 percpu_up_write(&cgroup_threadgroup_rwsem); 2940 return ret; 2941 } 2942 2943 /** 2944 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses 2945 * @cgrp: root of the target subtree 2946 * 2947 * Because css offlining is asynchronous, userland may try to re-enable a 2948 * controller while the previous css is still around. This function grabs 2949 * cgroup_mutex and drains the previous css instances of @cgrp's subtree. 2950 */ 2951 void cgroup_lock_and_drain_offline(struct cgroup *cgrp) 2952 __acquires(&cgroup_mutex) 2953 { 2954 struct cgroup *dsct; 2955 struct cgroup_subsys_state *d_css; 2956 struct cgroup_subsys *ss; 2957 int ssid; 2958 2959 restart: 2960 mutex_lock(&cgroup_mutex); 2961 2962 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 2963 for_each_subsys(ss, ssid) { 2964 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 2965 DEFINE_WAIT(wait); 2966 2967 if (!css || !percpu_ref_is_dying(&css->refcnt)) 2968 continue; 2969 2970 cgroup_get_live(dsct); 2971 prepare_to_wait(&dsct->offline_waitq, &wait, 2972 TASK_UNINTERRUPTIBLE); 2973 2974 mutex_unlock(&cgroup_mutex); 2975 schedule(); 2976 finish_wait(&dsct->offline_waitq, &wait); 2977 2978 cgroup_put(dsct); 2979 goto restart; 2980 } 2981 } 2982 } 2983 2984 /** 2985 * cgroup_save_control - save control masks and dom_cgrp of a subtree 2986 * @cgrp: root of the target subtree 2987 * 2988 * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the 2989 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 2990 * itself. 2991 */ 2992 static void cgroup_save_control(struct cgroup *cgrp) 2993 { 2994 struct cgroup *dsct; 2995 struct cgroup_subsys_state *d_css; 2996 2997 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2998 dsct->old_subtree_control = dsct->subtree_control; 2999 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; 3000 dsct->old_dom_cgrp = dsct->dom_cgrp; 3001 } 3002 } 3003 3004 /** 3005 * cgroup_propagate_control - refresh control masks of a subtree 3006 * @cgrp: root of the target subtree 3007 * 3008 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches 3009 * ->subtree_control and propagate controller availability through the 3010 * subtree so that descendants don't have unavailable controllers enabled. 3011 */ 3012 static void cgroup_propagate_control(struct cgroup *cgrp) 3013 { 3014 struct cgroup *dsct; 3015 struct cgroup_subsys_state *d_css; 3016 3017 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3018 dsct->subtree_control &= cgroup_control(dsct); 3019 dsct->subtree_ss_mask = 3020 cgroup_calc_subtree_ss_mask(dsct->subtree_control, 3021 cgroup_ss_mask(dsct)); 3022 } 3023 } 3024 3025 /** 3026 * cgroup_restore_control - restore control masks and dom_cgrp of a subtree 3027 * @cgrp: root of the target subtree 3028 * 3029 * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the 3030 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 3031 * itself. 3032 */ 3033 static void cgroup_restore_control(struct cgroup *cgrp) 3034 { 3035 struct cgroup *dsct; 3036 struct cgroup_subsys_state *d_css; 3037 3038 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3039 dsct->subtree_control = dsct->old_subtree_control; 3040 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; 3041 dsct->dom_cgrp = dsct->old_dom_cgrp; 3042 } 3043 } 3044 3045 static bool css_visible(struct cgroup_subsys_state *css) 3046 { 3047 struct cgroup_subsys *ss = css->ss; 3048 struct cgroup *cgrp = css->cgroup; 3049 3050 if (cgroup_control(cgrp) & (1 << ss->id)) 3051 return true; 3052 if (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) 3053 return false; 3054 return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl; 3055 } 3056 3057 /** 3058 * cgroup_apply_control_enable - enable or show csses according to control 3059 * @cgrp: root of the target subtree 3060 * 3061 * Walk @cgrp's subtree and create new csses or make the existing ones 3062 * visible. A css is created invisible if it's being implicitly enabled 3063 * through dependency. An invisible css is made visible when the userland 3064 * explicitly enables it. 3065 * 3066 * Returns 0 on success, -errno on failure. On failure, csses which have 3067 * been processed already aren't cleaned up. The caller is responsible for 3068 * cleaning up with cgroup_apply_control_disable(). 3069 */ 3070 static int cgroup_apply_control_enable(struct cgroup *cgrp) 3071 { 3072 struct cgroup *dsct; 3073 struct cgroup_subsys_state *d_css; 3074 struct cgroup_subsys *ss; 3075 int ssid, ret; 3076 3077 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3078 for_each_subsys(ss, ssid) { 3079 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3080 3081 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); 3082 3083 if (!(cgroup_ss_mask(dsct) & (1 << ss->id))) 3084 continue; 3085 3086 if (!css) { 3087 css = css_create(dsct, ss); 3088 if (IS_ERR(css)) 3089 return PTR_ERR(css); 3090 } 3091 3092 if (css_visible(css)) { 3093 ret = css_populate_dir(css); 3094 if (ret) 3095 return ret; 3096 } 3097 } 3098 } 3099 3100 return 0; 3101 } 3102 3103 /** 3104 * cgroup_apply_control_disable - kill or hide csses according to control 3105 * @cgrp: root of the target subtree 3106 * 3107 * Walk @cgrp's subtree and kill and hide csses so that they match 3108 * cgroup_ss_mask() and cgroup_visible_mask(). 3109 * 3110 * A css is hidden when the userland requests it to be disabled while other 3111 * subsystems are still depending on it. The css must not actively control 3112 * resources and be in the vanilla state if it's made visible again later. 3113 * Controllers which may be depended upon should provide ->css_reset() for 3114 * this purpose. 3115 */ 3116 static void cgroup_apply_control_disable(struct cgroup *cgrp) 3117 { 3118 struct cgroup *dsct; 3119 struct cgroup_subsys_state *d_css; 3120 struct cgroup_subsys *ss; 3121 int ssid; 3122 3123 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3124 for_each_subsys(ss, ssid) { 3125 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3126 3127 WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt)); 3128 3129 if (!css) 3130 continue; 3131 3132 if (css->parent && 3133 !(cgroup_ss_mask(dsct) & (1 << ss->id))) { 3134 kill_css(css); 3135 } else if (!css_visible(css)) { 3136 css_clear_dir(css); 3137 if (ss->css_reset) 3138 ss->css_reset(css); 3139 } 3140 } 3141 } 3142 } 3143 3144 /** 3145 * cgroup_apply_control - apply control mask updates to the subtree 3146 * @cgrp: root of the target subtree 3147 * 3148 * subsystems can be enabled and disabled in a subtree using the following 3149 * steps. 3150 * 3151 * 1. Call cgroup_save_control() to stash the current state. 3152 * 2. Update ->subtree_control masks in the subtree as desired. 3153 * 3. Call cgroup_apply_control() to apply the changes. 3154 * 4. Optionally perform other related operations. 3155 * 5. Call cgroup_finalize_control() to finish up. 3156 * 3157 * This function implements step 3 and propagates the mask changes 3158 * throughout @cgrp's subtree, updates csses accordingly and perform 3159 * process migrations. 3160 */ 3161 static int cgroup_apply_control(struct cgroup *cgrp) 3162 { 3163 int ret; 3164 3165 cgroup_propagate_control(cgrp); 3166 3167 ret = cgroup_apply_control_enable(cgrp); 3168 if (ret) 3169 return ret; 3170 3171 /* 3172 * At this point, cgroup_e_css_by_mask() results reflect the new csses 3173 * making the following cgroup_update_dfl_csses() properly update 3174 * css associations of all tasks in the subtree. 3175 */ 3176 ret = cgroup_update_dfl_csses(cgrp); 3177 if (ret) 3178 return ret; 3179 3180 return 0; 3181 } 3182 3183 /** 3184 * cgroup_finalize_control - finalize control mask update 3185 * @cgrp: root of the target subtree 3186 * @ret: the result of the update 3187 * 3188 * Finalize control mask update. See cgroup_apply_control() for more info. 3189 */ 3190 static void cgroup_finalize_control(struct cgroup *cgrp, int ret) 3191 { 3192 if (ret) { 3193 cgroup_restore_control(cgrp); 3194 cgroup_propagate_control(cgrp); 3195 } 3196 3197 cgroup_apply_control_disable(cgrp); 3198 } 3199 3200 static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable) 3201 { 3202 u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask; 3203 3204 /* if nothing is getting enabled, nothing to worry about */ 3205 if (!enable) 3206 return 0; 3207 3208 /* can @cgrp host any resources? */ 3209 if (!cgroup_is_valid_domain(cgrp->dom_cgrp)) 3210 return -EOPNOTSUPP; 3211 3212 /* mixables don't care */ 3213 if (cgroup_is_mixable(cgrp)) 3214 return 0; 3215 3216 if (domain_enable) { 3217 /* can't enable domain controllers inside a thread subtree */ 3218 if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3219 return -EOPNOTSUPP; 3220 } else { 3221 /* 3222 * Threaded controllers can handle internal competitions 3223 * and are always allowed inside a (prospective) thread 3224 * subtree. 3225 */ 3226 if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3227 return 0; 3228 } 3229 3230 /* 3231 * Controllers can't be enabled for a cgroup with tasks to avoid 3232 * child cgroups competing against tasks. 3233 */ 3234 if (cgroup_has_tasks(cgrp)) 3235 return -EBUSY; 3236 3237 return 0; 3238 } 3239 3240 /* change the enabled child controllers for a cgroup in the default hierarchy */ 3241 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, 3242 char *buf, size_t nbytes, 3243 loff_t off) 3244 { 3245 u16 enable = 0, disable = 0; 3246 struct cgroup *cgrp, *child; 3247 struct cgroup_subsys *ss; 3248 char *tok; 3249 int ssid, ret; 3250 3251 /* 3252 * Parse input - space separated list of subsystem names prefixed 3253 * with either + or -. 3254 */ 3255 buf = strstrip(buf); 3256 while ((tok = strsep(&buf, " "))) { 3257 if (tok[0] == '\0') 3258 continue; 3259 do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) { 3260 if (!cgroup_ssid_enabled(ssid) || 3261 strcmp(tok + 1, ss->name)) 3262 continue; 3263 3264 if (*tok == '+') { 3265 enable |= 1 << ssid; 3266 disable &= ~(1 << ssid); 3267 } else if (*tok == '-') { 3268 disable |= 1 << ssid; 3269 enable &= ~(1 << ssid); 3270 } else { 3271 return -EINVAL; 3272 } 3273 break; 3274 } while_each_subsys_mask(); 3275 if (ssid == CGROUP_SUBSYS_COUNT) 3276 return -EINVAL; 3277 } 3278 3279 cgrp = cgroup_kn_lock_live(of->kn, true); 3280 if (!cgrp) 3281 return -ENODEV; 3282 3283 for_each_subsys(ss, ssid) { 3284 if (enable & (1 << ssid)) { 3285 if (cgrp->subtree_control & (1 << ssid)) { 3286 enable &= ~(1 << ssid); 3287 continue; 3288 } 3289 3290 if (!(cgroup_control(cgrp) & (1 << ssid))) { 3291 ret = -ENOENT; 3292 goto out_unlock; 3293 } 3294 } else if (disable & (1 << ssid)) { 3295 if (!(cgrp->subtree_control & (1 << ssid))) { 3296 disable &= ~(1 << ssid); 3297 continue; 3298 } 3299 3300 /* a child has it enabled? */ 3301 cgroup_for_each_live_child(child, cgrp) { 3302 if (child->subtree_control & (1 << ssid)) { 3303 ret = -EBUSY; 3304 goto out_unlock; 3305 } 3306 } 3307 } 3308 } 3309 3310 if (!enable && !disable) { 3311 ret = 0; 3312 goto out_unlock; 3313 } 3314 3315 ret = cgroup_vet_subtree_control_enable(cgrp, enable); 3316 if (ret) 3317 goto out_unlock; 3318 3319 /* save and update control masks and prepare csses */ 3320 cgroup_save_control(cgrp); 3321 3322 cgrp->subtree_control |= enable; 3323 cgrp->subtree_control &= ~disable; 3324 3325 ret = cgroup_apply_control(cgrp); 3326 cgroup_finalize_control(cgrp, ret); 3327 if (ret) 3328 goto out_unlock; 3329 3330 kernfs_activate(cgrp->kn); 3331 out_unlock: 3332 cgroup_kn_unlock(of->kn); 3333 return ret ?: nbytes; 3334 } 3335 3336 /** 3337 * cgroup_enable_threaded - make @cgrp threaded 3338 * @cgrp: the target cgroup 3339 * 3340 * Called when "threaded" is written to the cgroup.type interface file and 3341 * tries to make @cgrp threaded and join the parent's resource domain. 3342 * This function is never called on the root cgroup as cgroup.type doesn't 3343 * exist on it. 3344 */ 3345 static int cgroup_enable_threaded(struct cgroup *cgrp) 3346 { 3347 struct cgroup *parent = cgroup_parent(cgrp); 3348 struct cgroup *dom_cgrp = parent->dom_cgrp; 3349 struct cgroup *dsct; 3350 struct cgroup_subsys_state *d_css; 3351 int ret; 3352 3353 lockdep_assert_held(&cgroup_mutex); 3354 3355 /* noop if already threaded */ 3356 if (cgroup_is_threaded(cgrp)) 3357 return 0; 3358 3359 /* 3360 * If @cgroup is populated or has domain controllers enabled, it 3361 * can't be switched. While the below cgroup_can_be_thread_root() 3362 * test can catch the same conditions, that's only when @parent is 3363 * not mixable, so let's check it explicitly. 3364 */ 3365 if (cgroup_is_populated(cgrp) || 3366 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 3367 return -EOPNOTSUPP; 3368 3369 /* we're joining the parent's domain, ensure its validity */ 3370 if (!cgroup_is_valid_domain(dom_cgrp) || 3371 !cgroup_can_be_thread_root(dom_cgrp)) 3372 return -EOPNOTSUPP; 3373 3374 /* 3375 * The following shouldn't cause actual migrations and should 3376 * always succeed. 3377 */ 3378 cgroup_save_control(cgrp); 3379 3380 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) 3381 if (dsct == cgrp || cgroup_is_threaded(dsct)) 3382 dsct->dom_cgrp = dom_cgrp; 3383 3384 ret = cgroup_apply_control(cgrp); 3385 if (!ret) 3386 parent->nr_threaded_children++; 3387 3388 cgroup_finalize_control(cgrp, ret); 3389 return ret; 3390 } 3391 3392 static int cgroup_type_show(struct seq_file *seq, void *v) 3393 { 3394 struct cgroup *cgrp = seq_css(seq)->cgroup; 3395 3396 if (cgroup_is_threaded(cgrp)) 3397 seq_puts(seq, "threaded\n"); 3398 else if (!cgroup_is_valid_domain(cgrp)) 3399 seq_puts(seq, "domain invalid\n"); 3400 else if (cgroup_is_thread_root(cgrp)) 3401 seq_puts(seq, "domain threaded\n"); 3402 else 3403 seq_puts(seq, "domain\n"); 3404 3405 return 0; 3406 } 3407 3408 static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, 3409 size_t nbytes, loff_t off) 3410 { 3411 struct cgroup *cgrp; 3412 int ret; 3413 3414 /* only switching to threaded mode is supported */ 3415 if (strcmp(strstrip(buf), "threaded")) 3416 return -EINVAL; 3417 3418 cgrp = cgroup_kn_lock_live(of->kn, false); 3419 if (!cgrp) 3420 return -ENOENT; 3421 3422 /* threaded can only be enabled */ 3423 ret = cgroup_enable_threaded(cgrp); 3424 3425 cgroup_kn_unlock(of->kn); 3426 return ret ?: nbytes; 3427 } 3428 3429 static int cgroup_max_descendants_show(struct seq_file *seq, void *v) 3430 { 3431 struct cgroup *cgrp = seq_css(seq)->cgroup; 3432 int descendants = READ_ONCE(cgrp->max_descendants); 3433 3434 if (descendants == INT_MAX) 3435 seq_puts(seq, "max\n"); 3436 else 3437 seq_printf(seq, "%d\n", descendants); 3438 3439 return 0; 3440 } 3441 3442 static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of, 3443 char *buf, size_t nbytes, loff_t off) 3444 { 3445 struct cgroup *cgrp; 3446 int descendants; 3447 ssize_t ret; 3448 3449 buf = strstrip(buf); 3450 if (!strcmp(buf, "max")) { 3451 descendants = INT_MAX; 3452 } else { 3453 ret = kstrtoint(buf, 0, &descendants); 3454 if (ret) 3455 return ret; 3456 } 3457 3458 if (descendants < 0) 3459 return -ERANGE; 3460 3461 cgrp = cgroup_kn_lock_live(of->kn, false); 3462 if (!cgrp) 3463 return -ENOENT; 3464 3465 cgrp->max_descendants = descendants; 3466 3467 cgroup_kn_unlock(of->kn); 3468 3469 return nbytes; 3470 } 3471 3472 static int cgroup_max_depth_show(struct seq_file *seq, void *v) 3473 { 3474 struct cgroup *cgrp = seq_css(seq)->cgroup; 3475 int depth = READ_ONCE(cgrp->max_depth); 3476 3477 if (depth == INT_MAX) 3478 seq_puts(seq, "max\n"); 3479 else 3480 seq_printf(seq, "%d\n", depth); 3481 3482 return 0; 3483 } 3484 3485 static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, 3486 char *buf, size_t nbytes, loff_t off) 3487 { 3488 struct cgroup *cgrp; 3489 ssize_t ret; 3490 int depth; 3491 3492 buf = strstrip(buf); 3493 if (!strcmp(buf, "max")) { 3494 depth = INT_MAX; 3495 } else { 3496 ret = kstrtoint(buf, 0, &depth); 3497 if (ret) 3498 return ret; 3499 } 3500 3501 if (depth < 0) 3502 return -ERANGE; 3503 3504 cgrp = cgroup_kn_lock_live(of->kn, false); 3505 if (!cgrp) 3506 return -ENOENT; 3507 3508 cgrp->max_depth = depth; 3509 3510 cgroup_kn_unlock(of->kn); 3511 3512 return nbytes; 3513 } 3514 3515 static int cgroup_events_show(struct seq_file *seq, void *v) 3516 { 3517 struct cgroup *cgrp = seq_css(seq)->cgroup; 3518 3519 seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp)); 3520 seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags)); 3521 3522 return 0; 3523 } 3524 3525 static int cgroup_stat_show(struct seq_file *seq, void *v) 3526 { 3527 struct cgroup *cgroup = seq_css(seq)->cgroup; 3528 3529 seq_printf(seq, "nr_descendants %d\n", 3530 cgroup->nr_descendants); 3531 seq_printf(seq, "nr_dying_descendants %d\n", 3532 cgroup->nr_dying_descendants); 3533 3534 return 0; 3535 } 3536 3537 static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq, 3538 struct cgroup *cgrp, int ssid) 3539 { 3540 struct cgroup_subsys *ss = cgroup_subsys[ssid]; 3541 struct cgroup_subsys_state *css; 3542 int ret; 3543 3544 if (!ss->css_extra_stat_show) 3545 return 0; 3546 3547 css = cgroup_tryget_css(cgrp, ss); 3548 if (!css) 3549 return 0; 3550 3551 ret = ss->css_extra_stat_show(seq, css); 3552 css_put(css); 3553 return ret; 3554 } 3555 3556 static int cpu_stat_show(struct seq_file *seq, void *v) 3557 { 3558 struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup; 3559 int ret = 0; 3560 3561 cgroup_base_stat_cputime_show(seq); 3562 #ifdef CONFIG_CGROUP_SCHED 3563 ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id); 3564 #endif 3565 return ret; 3566 } 3567 3568 #ifdef CONFIG_PSI 3569 static int cgroup_io_pressure_show(struct seq_file *seq, void *v) 3570 { 3571 struct cgroup *cgroup = seq_css(seq)->cgroup; 3572 struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi; 3573 3574 return psi_show(seq, psi, PSI_IO); 3575 } 3576 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v) 3577 { 3578 struct cgroup *cgroup = seq_css(seq)->cgroup; 3579 struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi; 3580 3581 return psi_show(seq, psi, PSI_MEM); 3582 } 3583 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) 3584 { 3585 struct cgroup *cgroup = seq_css(seq)->cgroup; 3586 struct psi_group *psi = cgroup->id == 1 ? &psi_system : &cgroup->psi; 3587 3588 return psi_show(seq, psi, PSI_CPU); 3589 } 3590 3591 static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, 3592 size_t nbytes, enum psi_res res) 3593 { 3594 struct psi_trigger *new; 3595 struct cgroup *cgrp; 3596 3597 cgrp = cgroup_kn_lock_live(of->kn, false); 3598 if (!cgrp) 3599 return -ENODEV; 3600 3601 cgroup_get(cgrp); 3602 cgroup_kn_unlock(of->kn); 3603 3604 new = psi_trigger_create(&cgrp->psi, buf, nbytes, res); 3605 if (IS_ERR(new)) { 3606 cgroup_put(cgrp); 3607 return PTR_ERR(new); 3608 } 3609 3610 psi_trigger_replace(&of->priv, new); 3611 3612 cgroup_put(cgrp); 3613 3614 return nbytes; 3615 } 3616 3617 static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of, 3618 char *buf, size_t nbytes, 3619 loff_t off) 3620 { 3621 return cgroup_pressure_write(of, buf, nbytes, PSI_IO); 3622 } 3623 3624 static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of, 3625 char *buf, size_t nbytes, 3626 loff_t off) 3627 { 3628 return cgroup_pressure_write(of, buf, nbytes, PSI_MEM); 3629 } 3630 3631 static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of, 3632 char *buf, size_t nbytes, 3633 loff_t off) 3634 { 3635 return cgroup_pressure_write(of, buf, nbytes, PSI_CPU); 3636 } 3637 3638 static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, 3639 poll_table *pt) 3640 { 3641 return psi_trigger_poll(&of->priv, of->file, pt); 3642 } 3643 3644 static void cgroup_pressure_release(struct kernfs_open_file *of) 3645 { 3646 psi_trigger_replace(&of->priv, NULL); 3647 } 3648 #endif /* CONFIG_PSI */ 3649 3650 static int cgroup_freeze_show(struct seq_file *seq, void *v) 3651 { 3652 struct cgroup *cgrp = seq_css(seq)->cgroup; 3653 3654 seq_printf(seq, "%d\n", cgrp->freezer.freeze); 3655 3656 return 0; 3657 } 3658 3659 static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, 3660 char *buf, size_t nbytes, loff_t off) 3661 { 3662 struct cgroup *cgrp; 3663 ssize_t ret; 3664 int freeze; 3665 3666 ret = kstrtoint(strstrip(buf), 0, &freeze); 3667 if (ret) 3668 return ret; 3669 3670 if (freeze < 0 || freeze > 1) 3671 return -ERANGE; 3672 3673 cgrp = cgroup_kn_lock_live(of->kn, false); 3674 if (!cgrp) 3675 return -ENOENT; 3676 3677 cgroup_freeze(cgrp, freeze); 3678 3679 cgroup_kn_unlock(of->kn); 3680 3681 return nbytes; 3682 } 3683 3684 static int cgroup_file_open(struct kernfs_open_file *of) 3685 { 3686 struct cftype *cft = of->kn->priv; 3687 3688 if (cft->open) 3689 return cft->open(of); 3690 return 0; 3691 } 3692 3693 static void cgroup_file_release(struct kernfs_open_file *of) 3694 { 3695 struct cftype *cft = of->kn->priv; 3696 3697 if (cft->release) 3698 cft->release(of); 3699 } 3700 3701 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, 3702 size_t nbytes, loff_t off) 3703 { 3704 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; 3705 struct cgroup *cgrp = of->kn->parent->priv; 3706 struct cftype *cft = of->kn->priv; 3707 struct cgroup_subsys_state *css; 3708 int ret; 3709 3710 /* 3711 * If namespaces are delegation boundaries, disallow writes to 3712 * files in an non-init namespace root from inside the namespace 3713 * except for the files explicitly marked delegatable - 3714 * cgroup.procs and cgroup.subtree_control. 3715 */ 3716 if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) && 3717 !(cft->flags & CFTYPE_NS_DELEGATABLE) && 3718 ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp) 3719 return -EPERM; 3720 3721 if (cft->write) 3722 return cft->write(of, buf, nbytes, off); 3723 3724 /* 3725 * kernfs guarantees that a file isn't deleted with operations in 3726 * flight, which means that the matching css is and stays alive and 3727 * doesn't need to be pinned. The RCU locking is not necessary 3728 * either. It's just for the convenience of using cgroup_css(). 3729 */ 3730 rcu_read_lock(); 3731 css = cgroup_css(cgrp, cft->ss); 3732 rcu_read_unlock(); 3733 3734 if (cft->write_u64) { 3735 unsigned long long v; 3736 ret = kstrtoull(buf, 0, &v); 3737 if (!ret) 3738 ret = cft->write_u64(css, cft, v); 3739 } else if (cft->write_s64) { 3740 long long v; 3741 ret = kstrtoll(buf, 0, &v); 3742 if (!ret) 3743 ret = cft->write_s64(css, cft, v); 3744 } else { 3745 ret = -EINVAL; 3746 } 3747 3748 return ret ?: nbytes; 3749 } 3750 3751 static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt) 3752 { 3753 struct cftype *cft = of->kn->priv; 3754 3755 if (cft->poll) 3756 return cft->poll(of, pt); 3757 3758 return kernfs_generic_poll(of, pt); 3759 } 3760 3761 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) 3762 { 3763 return seq_cft(seq)->seq_start(seq, ppos); 3764 } 3765 3766 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) 3767 { 3768 return seq_cft(seq)->seq_next(seq, v, ppos); 3769 } 3770 3771 static void cgroup_seqfile_stop(struct seq_file *seq, void *v) 3772 { 3773 if (seq_cft(seq)->seq_stop) 3774 seq_cft(seq)->seq_stop(seq, v); 3775 } 3776 3777 static int cgroup_seqfile_show(struct seq_file *m, void *arg) 3778 { 3779 struct cftype *cft = seq_cft(m); 3780 struct cgroup_subsys_state *css = seq_css(m); 3781 3782 if (cft->seq_show) 3783 return cft->seq_show(m, arg); 3784 3785 if (cft->read_u64) 3786 seq_printf(m, "%llu\n", cft->read_u64(css, cft)); 3787 else if (cft->read_s64) 3788 seq_printf(m, "%lld\n", cft->read_s64(css, cft)); 3789 else 3790 return -EINVAL; 3791 return 0; 3792 } 3793 3794 static struct kernfs_ops cgroup_kf_single_ops = { 3795 .atomic_write_len = PAGE_SIZE, 3796 .open = cgroup_file_open, 3797 .release = cgroup_file_release, 3798 .write = cgroup_file_write, 3799 .poll = cgroup_file_poll, 3800 .seq_show = cgroup_seqfile_show, 3801 }; 3802 3803 static struct kernfs_ops cgroup_kf_ops = { 3804 .atomic_write_len = PAGE_SIZE, 3805 .open = cgroup_file_open, 3806 .release = cgroup_file_release, 3807 .write = cgroup_file_write, 3808 .poll = cgroup_file_poll, 3809 .seq_start = cgroup_seqfile_start, 3810 .seq_next = cgroup_seqfile_next, 3811 .seq_stop = cgroup_seqfile_stop, 3812 .seq_show = cgroup_seqfile_show, 3813 }; 3814 3815 /* set uid and gid of cgroup dirs and files to that of the creator */ 3816 static int cgroup_kn_set_ugid(struct kernfs_node *kn) 3817 { 3818 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 3819 .ia_uid = current_fsuid(), 3820 .ia_gid = current_fsgid(), }; 3821 3822 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 3823 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 3824 return 0; 3825 3826 return kernfs_setattr(kn, &iattr); 3827 } 3828 3829 static void cgroup_file_notify_timer(struct timer_list *timer) 3830 { 3831 cgroup_file_notify(container_of(timer, struct cgroup_file, 3832 notify_timer)); 3833 } 3834 3835 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, 3836 struct cftype *cft) 3837 { 3838 char name[CGROUP_FILE_NAME_MAX]; 3839 struct kernfs_node *kn; 3840 struct kernfs_node *kn_link; 3841 struct lock_class_key *key = NULL; 3842 int ret; 3843 3844 #ifdef CONFIG_DEBUG_LOCK_ALLOC 3845 key = &cft->lockdep_key; 3846 #endif 3847 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), 3848 cgroup_file_mode(cft), 3849 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 3850 0, cft->kf_ops, cft, 3851 NULL, key); 3852 if (IS_ERR(kn)) 3853 return PTR_ERR(kn); 3854 3855 ret = cgroup_kn_set_ugid(kn); 3856 if (ret) { 3857 kernfs_remove(kn); 3858 return ret; 3859 } 3860 3861 if (cft->file_offset) { 3862 struct cgroup_file *cfile = (void *)css + cft->file_offset; 3863 3864 timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); 3865 3866 spin_lock_irq(&cgroup_file_kn_lock); 3867 cfile->kn = kn; 3868 spin_unlock_irq(&cgroup_file_kn_lock); 3869 } 3870 3871 if (cft->flags & CFTYPE_SYMLINKED) { 3872 kn_link = kernfs_create_link(cgrp->kn, 3873 cgroup_link_name(cgrp, cft, name), 3874 kn); 3875 if (IS_ERR(kn_link)) 3876 return PTR_ERR(kn_link); 3877 } 3878 3879 return 0; 3880 } 3881 3882 /** 3883 * cgroup_addrm_files - add or remove files to a cgroup directory 3884 * @css: the target css 3885 * @cgrp: the target cgroup (usually css->cgroup) 3886 * @cfts: array of cftypes to be added 3887 * @is_add: whether to add or remove 3888 * 3889 * Depending on @is_add, add or remove files defined by @cfts on @cgrp. 3890 * For removals, this function never fails. 3891 */ 3892 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 3893 struct cgroup *cgrp, struct cftype cfts[], 3894 bool is_add) 3895 { 3896 struct cftype *cft, *cft_end = NULL; 3897 int ret = 0; 3898 3899 lockdep_assert_held(&cgroup_mutex); 3900 3901 restart: 3902 for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { 3903 /* does cft->flags tell us to skip this file on @cgrp? */ 3904 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) 3905 continue; 3906 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) 3907 continue; 3908 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) 3909 continue; 3910 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) 3911 continue; 3912 if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug) 3913 continue; 3914 if (is_add) { 3915 ret = cgroup_add_file(css, cgrp, cft); 3916 if (ret) { 3917 pr_warn("%s: failed to add %s, err=%d\n", 3918 __func__, cft->name, ret); 3919 cft_end = cft; 3920 is_add = false; 3921 goto restart; 3922 } 3923 } else { 3924 cgroup_rm_file(cgrp, cft); 3925 } 3926 } 3927 return ret; 3928 } 3929 3930 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) 3931 { 3932 struct cgroup_subsys *ss = cfts[0].ss; 3933 struct cgroup *root = &ss->root->cgrp; 3934 struct cgroup_subsys_state *css; 3935 int ret = 0; 3936 3937 lockdep_assert_held(&cgroup_mutex); 3938 3939 /* add/rm files for all cgroups created before */ 3940 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 3941 struct cgroup *cgrp = css->cgroup; 3942 3943 if (!(css->flags & CSS_VISIBLE)) 3944 continue; 3945 3946 ret = cgroup_addrm_files(css, cgrp, cfts, is_add); 3947 if (ret) 3948 break; 3949 } 3950 3951 if (is_add && !ret) 3952 kernfs_activate(root->kn); 3953 return ret; 3954 } 3955 3956 static void cgroup_exit_cftypes(struct cftype *cfts) 3957 { 3958 struct cftype *cft; 3959 3960 for (cft = cfts; cft->name[0] != '\0'; cft++) { 3961 /* free copy for custom atomic_write_len, see init_cftypes() */ 3962 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) 3963 kfree(cft->kf_ops); 3964 cft->kf_ops = NULL; 3965 cft->ss = NULL; 3966 3967 /* revert flags set by cgroup core while adding @cfts */ 3968 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL); 3969 } 3970 } 3971 3972 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 3973 { 3974 struct cftype *cft; 3975 3976 for (cft = cfts; cft->name[0] != '\0'; cft++) { 3977 struct kernfs_ops *kf_ops; 3978 3979 WARN_ON(cft->ss || cft->kf_ops); 3980 3981 if (cft->seq_start) 3982 kf_ops = &cgroup_kf_ops; 3983 else 3984 kf_ops = &cgroup_kf_single_ops; 3985 3986 /* 3987 * Ugh... if @cft wants a custom max_write_len, we need to 3988 * make a copy of kf_ops to set its atomic_write_len. 3989 */ 3990 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { 3991 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); 3992 if (!kf_ops) { 3993 cgroup_exit_cftypes(cfts); 3994 return -ENOMEM; 3995 } 3996 kf_ops->atomic_write_len = cft->max_write_len; 3997 } 3998 3999 cft->kf_ops = kf_ops; 4000 cft->ss = ss; 4001 } 4002 4003 return 0; 4004 } 4005 4006 static int cgroup_rm_cftypes_locked(struct cftype *cfts) 4007 { 4008 lockdep_assert_held(&cgroup_mutex); 4009 4010 if (!cfts || !cfts[0].ss) 4011 return -ENOENT; 4012 4013 list_del(&cfts->node); 4014 cgroup_apply_cftypes(cfts, false); 4015 cgroup_exit_cftypes(cfts); 4016 return 0; 4017 } 4018 4019 /** 4020 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem 4021 * @cfts: zero-length name terminated array of cftypes 4022 * 4023 * Unregister @cfts. Files described by @cfts are removed from all 4024 * existing cgroups and all future cgroups won't have them either. This 4025 * function can be called anytime whether @cfts' subsys is attached or not. 4026 * 4027 * Returns 0 on successful unregistration, -ENOENT if @cfts is not 4028 * registered. 4029 */ 4030 int cgroup_rm_cftypes(struct cftype *cfts) 4031 { 4032 int ret; 4033 4034 mutex_lock(&cgroup_mutex); 4035 ret = cgroup_rm_cftypes_locked(cfts); 4036 mutex_unlock(&cgroup_mutex); 4037 return ret; 4038 } 4039 4040 /** 4041 * cgroup_add_cftypes - add an array of cftypes to a subsystem 4042 * @ss: target cgroup subsystem 4043 * @cfts: zero-length name terminated array of cftypes 4044 * 4045 * Register @cfts to @ss. Files described by @cfts are created for all 4046 * existing cgroups to which @ss is attached and all future cgroups will 4047 * have them too. This function can be called anytime whether @ss is 4048 * attached or not. 4049 * 4050 * Returns 0 on successful registration, -errno on failure. Note that this 4051 * function currently returns 0 as long as @cfts registration is successful 4052 * even if some file creation attempts on existing cgroups fail. 4053 */ 4054 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4055 { 4056 int ret; 4057 4058 if (!cgroup_ssid_enabled(ss->id)) 4059 return 0; 4060 4061 if (!cfts || cfts[0].name[0] == '\0') 4062 return 0; 4063 4064 ret = cgroup_init_cftypes(ss, cfts); 4065 if (ret) 4066 return ret; 4067 4068 mutex_lock(&cgroup_mutex); 4069 4070 list_add_tail(&cfts->node, &ss->cfts); 4071 ret = cgroup_apply_cftypes(cfts, true); 4072 if (ret) 4073 cgroup_rm_cftypes_locked(cfts); 4074 4075 mutex_unlock(&cgroup_mutex); 4076 return ret; 4077 } 4078 4079 /** 4080 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy 4081 * @ss: target cgroup subsystem 4082 * @cfts: zero-length name terminated array of cftypes 4083 * 4084 * Similar to cgroup_add_cftypes() but the added files are only used for 4085 * the default hierarchy. 4086 */ 4087 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4088 { 4089 struct cftype *cft; 4090 4091 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 4092 cft->flags |= __CFTYPE_ONLY_ON_DFL; 4093 return cgroup_add_cftypes(ss, cfts); 4094 } 4095 4096 /** 4097 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies 4098 * @ss: target cgroup subsystem 4099 * @cfts: zero-length name terminated array of cftypes 4100 * 4101 * Similar to cgroup_add_cftypes() but the added files are only used for 4102 * the legacy hierarchies. 4103 */ 4104 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4105 { 4106 struct cftype *cft; 4107 4108 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 4109 cft->flags |= __CFTYPE_NOT_ON_DFL; 4110 return cgroup_add_cftypes(ss, cfts); 4111 } 4112 4113 /** 4114 * cgroup_file_notify - generate a file modified event for a cgroup_file 4115 * @cfile: target cgroup_file 4116 * 4117 * @cfile must have been obtained by setting cftype->file_offset. 4118 */ 4119 void cgroup_file_notify(struct cgroup_file *cfile) 4120 { 4121 unsigned long flags; 4122 4123 spin_lock_irqsave(&cgroup_file_kn_lock, flags); 4124 if (cfile->kn) { 4125 unsigned long last = cfile->notified_at; 4126 unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV; 4127 4128 if (time_in_range(jiffies, last, next)) { 4129 timer_reduce(&cfile->notify_timer, next); 4130 } else { 4131 kernfs_notify(cfile->kn); 4132 cfile->notified_at = jiffies; 4133 } 4134 } 4135 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); 4136 } 4137 4138 /** 4139 * css_next_child - find the next child of a given css 4140 * @pos: the current position (%NULL to initiate traversal) 4141 * @parent: css whose children to walk 4142 * 4143 * This function returns the next child of @parent and should be called 4144 * under either cgroup_mutex or RCU read lock. The only requirement is 4145 * that @parent and @pos are accessible. The next sibling is guaranteed to 4146 * be returned regardless of their states. 4147 * 4148 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4149 * css which finished ->css_online() is guaranteed to be visible in the 4150 * future iterations and will stay visible until the last reference is put. 4151 * A css which hasn't finished ->css_online() or already finished 4152 * ->css_offline() may show up during traversal. It's each subsystem's 4153 * responsibility to synchronize against on/offlining. 4154 */ 4155 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 4156 struct cgroup_subsys_state *parent) 4157 { 4158 struct cgroup_subsys_state *next; 4159 4160 cgroup_assert_mutex_or_rcu_locked(); 4161 4162 /* 4163 * @pos could already have been unlinked from the sibling list. 4164 * Once a cgroup is removed, its ->sibling.next is no longer 4165 * updated when its next sibling changes. CSS_RELEASED is set when 4166 * @pos is taken off list, at which time its next pointer is valid, 4167 * and, as releases are serialized, the one pointed to by the next 4168 * pointer is guaranteed to not have started release yet. This 4169 * implies that if we observe !CSS_RELEASED on @pos in this RCU 4170 * critical section, the one pointed to by its next pointer is 4171 * guaranteed to not have finished its RCU grace period even if we 4172 * have dropped rcu_read_lock() inbetween iterations. 4173 * 4174 * If @pos has CSS_RELEASED set, its next pointer can't be 4175 * dereferenced; however, as each css is given a monotonically 4176 * increasing unique serial number and always appended to the 4177 * sibling list, the next one can be found by walking the parent's 4178 * children until the first css with higher serial number than 4179 * @pos's. While this path can be slower, it happens iff iteration 4180 * races against release and the race window is very small. 4181 */ 4182 if (!pos) { 4183 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling); 4184 } else if (likely(!(pos->flags & CSS_RELEASED))) { 4185 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling); 4186 } else { 4187 list_for_each_entry_rcu(next, &parent->children, sibling) 4188 if (next->serial_nr > pos->serial_nr) 4189 break; 4190 } 4191 4192 /* 4193 * @next, if not pointing to the head, can be dereferenced and is 4194 * the next sibling. 4195 */ 4196 if (&next->sibling != &parent->children) 4197 return next; 4198 return NULL; 4199 } 4200 4201 /** 4202 * css_next_descendant_pre - find the next descendant for pre-order walk 4203 * @pos: the current position (%NULL to initiate traversal) 4204 * @root: css whose descendants to walk 4205 * 4206 * To be used by css_for_each_descendant_pre(). Find the next descendant 4207 * to visit for pre-order traversal of @root's descendants. @root is 4208 * included in the iteration and the first node to be visited. 4209 * 4210 * While this function requires cgroup_mutex or RCU read locking, it 4211 * doesn't require the whole traversal to be contained in a single critical 4212 * section. This function will return the correct next descendant as long 4213 * as both @pos and @root are accessible and @pos is a descendant of @root. 4214 * 4215 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4216 * css which finished ->css_online() is guaranteed to be visible in the 4217 * future iterations and will stay visible until the last reference is put. 4218 * A css which hasn't finished ->css_online() or already finished 4219 * ->css_offline() may show up during traversal. It's each subsystem's 4220 * responsibility to synchronize against on/offlining. 4221 */ 4222 struct cgroup_subsys_state * 4223 css_next_descendant_pre(struct cgroup_subsys_state *pos, 4224 struct cgroup_subsys_state *root) 4225 { 4226 struct cgroup_subsys_state *next; 4227 4228 cgroup_assert_mutex_or_rcu_locked(); 4229 4230 /* if first iteration, visit @root */ 4231 if (!pos) 4232 return root; 4233 4234 /* visit the first child if exists */ 4235 next = css_next_child(NULL, pos); 4236 if (next) 4237 return next; 4238 4239 /* no child, visit my or the closest ancestor's next sibling */ 4240 while (pos != root) { 4241 next = css_next_child(pos, pos->parent); 4242 if (next) 4243 return next; 4244 pos = pos->parent; 4245 } 4246 4247 return NULL; 4248 } 4249 4250 /** 4251 * css_rightmost_descendant - return the rightmost descendant of a css 4252 * @pos: css of interest 4253 * 4254 * Return the rightmost descendant of @pos. If there's no descendant, @pos 4255 * is returned. This can be used during pre-order traversal to skip 4256 * subtree of @pos. 4257 * 4258 * While this function requires cgroup_mutex or RCU read locking, it 4259 * doesn't require the whole traversal to be contained in a single critical 4260 * section. This function will return the correct rightmost descendant as 4261 * long as @pos is accessible. 4262 */ 4263 struct cgroup_subsys_state * 4264 css_rightmost_descendant(struct cgroup_subsys_state *pos) 4265 { 4266 struct cgroup_subsys_state *last, *tmp; 4267 4268 cgroup_assert_mutex_or_rcu_locked(); 4269 4270 do { 4271 last = pos; 4272 /* ->prev isn't RCU safe, walk ->next till the end */ 4273 pos = NULL; 4274 css_for_each_child(tmp, last) 4275 pos = tmp; 4276 } while (pos); 4277 4278 return last; 4279 } 4280 4281 static struct cgroup_subsys_state * 4282 css_leftmost_descendant(struct cgroup_subsys_state *pos) 4283 { 4284 struct cgroup_subsys_state *last; 4285 4286 do { 4287 last = pos; 4288 pos = css_next_child(NULL, pos); 4289 } while (pos); 4290 4291 return last; 4292 } 4293 4294 /** 4295 * css_next_descendant_post - find the next descendant for post-order walk 4296 * @pos: the current position (%NULL to initiate traversal) 4297 * @root: css whose descendants to walk 4298 * 4299 * To be used by css_for_each_descendant_post(). Find the next descendant 4300 * to visit for post-order traversal of @root's descendants. @root is 4301 * included in the iteration and the last node to be visited. 4302 * 4303 * While this function requires cgroup_mutex or RCU read locking, it 4304 * doesn't require the whole traversal to be contained in a single critical 4305 * section. This function will return the correct next descendant as long 4306 * as both @pos and @cgroup are accessible and @pos is a descendant of 4307 * @cgroup. 4308 * 4309 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4310 * css which finished ->css_online() is guaranteed to be visible in the 4311 * future iterations and will stay visible until the last reference is put. 4312 * A css which hasn't finished ->css_online() or already finished 4313 * ->css_offline() may show up during traversal. It's each subsystem's 4314 * responsibility to synchronize against on/offlining. 4315 */ 4316 struct cgroup_subsys_state * 4317 css_next_descendant_post(struct cgroup_subsys_state *pos, 4318 struct cgroup_subsys_state *root) 4319 { 4320 struct cgroup_subsys_state *next; 4321 4322 cgroup_assert_mutex_or_rcu_locked(); 4323 4324 /* if first iteration, visit leftmost descendant which may be @root */ 4325 if (!pos) 4326 return css_leftmost_descendant(root); 4327 4328 /* if we visited @root, we're done */ 4329 if (pos == root) 4330 return NULL; 4331 4332 /* if there's an unvisited sibling, visit its leftmost descendant */ 4333 next = css_next_child(pos, pos->parent); 4334 if (next) 4335 return css_leftmost_descendant(next); 4336 4337 /* no sibling left, visit parent */ 4338 return pos->parent; 4339 } 4340 4341 /** 4342 * css_has_online_children - does a css have online children 4343 * @css: the target css 4344 * 4345 * Returns %true if @css has any online children; otherwise, %false. This 4346 * function can be called from any context but the caller is responsible 4347 * for synchronizing against on/offlining as necessary. 4348 */ 4349 bool css_has_online_children(struct cgroup_subsys_state *css) 4350 { 4351 struct cgroup_subsys_state *child; 4352 bool ret = false; 4353 4354 rcu_read_lock(); 4355 css_for_each_child(child, css) { 4356 if (child->flags & CSS_ONLINE) { 4357 ret = true; 4358 break; 4359 } 4360 } 4361 rcu_read_unlock(); 4362 return ret; 4363 } 4364 4365 static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it) 4366 { 4367 struct list_head *l; 4368 struct cgrp_cset_link *link; 4369 struct css_set *cset; 4370 4371 lockdep_assert_held(&css_set_lock); 4372 4373 /* find the next threaded cset */ 4374 if (it->tcset_pos) { 4375 l = it->tcset_pos->next; 4376 4377 if (l != it->tcset_head) { 4378 it->tcset_pos = l; 4379 return container_of(l, struct css_set, 4380 threaded_csets_node); 4381 } 4382 4383 it->tcset_pos = NULL; 4384 } 4385 4386 /* find the next cset */ 4387 l = it->cset_pos; 4388 l = l->next; 4389 if (l == it->cset_head) { 4390 it->cset_pos = NULL; 4391 return NULL; 4392 } 4393 4394 if (it->ss) { 4395 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); 4396 } else { 4397 link = list_entry(l, struct cgrp_cset_link, cset_link); 4398 cset = link->cset; 4399 } 4400 4401 it->cset_pos = l; 4402 4403 /* initialize threaded css_set walking */ 4404 if (it->flags & CSS_TASK_ITER_THREADED) { 4405 if (it->cur_dcset) 4406 put_css_set_locked(it->cur_dcset); 4407 it->cur_dcset = cset; 4408 get_css_set(cset); 4409 4410 it->tcset_head = &cset->threaded_csets; 4411 it->tcset_pos = &cset->threaded_csets; 4412 } 4413 4414 return cset; 4415 } 4416 4417 /** 4418 * css_task_iter_advance_css_set - advance a task itererator to the next css_set 4419 * @it: the iterator to advance 4420 * 4421 * Advance @it to the next css_set to walk. 4422 */ 4423 static void css_task_iter_advance_css_set(struct css_task_iter *it) 4424 { 4425 struct css_set *cset; 4426 4427 lockdep_assert_held(&css_set_lock); 4428 4429 /* Advance to the next non-empty css_set */ 4430 do { 4431 cset = css_task_iter_next_css_set(it); 4432 if (!cset) { 4433 it->task_pos = NULL; 4434 return; 4435 } 4436 } while (!css_set_populated(cset)); 4437 4438 if (!list_empty(&cset->tasks)) 4439 it->task_pos = cset->tasks.next; 4440 else 4441 it->task_pos = cset->mg_tasks.next; 4442 4443 it->tasks_head = &cset->tasks; 4444 it->mg_tasks_head = &cset->mg_tasks; 4445 4446 /* 4447 * We don't keep css_sets locked across iteration steps and thus 4448 * need to take steps to ensure that iteration can be resumed after 4449 * the lock is re-acquired. Iteration is performed at two levels - 4450 * css_sets and tasks in them. 4451 * 4452 * Once created, a css_set never leaves its cgroup lists, so a 4453 * pinned css_set is guaranteed to stay put and we can resume 4454 * iteration afterwards. 4455 * 4456 * Tasks may leave @cset across iteration steps. This is resolved 4457 * by registering each iterator with the css_set currently being 4458 * walked and making css_set_move_task() advance iterators whose 4459 * next task is leaving. 4460 */ 4461 if (it->cur_cset) { 4462 list_del(&it->iters_node); 4463 put_css_set_locked(it->cur_cset); 4464 } 4465 get_css_set(cset); 4466 it->cur_cset = cset; 4467 list_add(&it->iters_node, &cset->task_iters); 4468 } 4469 4470 static void css_task_iter_advance(struct css_task_iter *it) 4471 { 4472 struct list_head *next; 4473 4474 lockdep_assert_held(&css_set_lock); 4475 repeat: 4476 if (it->task_pos) { 4477 /* 4478 * Advance iterator to find next entry. cset->tasks is 4479 * consumed first and then ->mg_tasks. After ->mg_tasks, 4480 * we move onto the next cset. 4481 */ 4482 next = it->task_pos->next; 4483 4484 if (next == it->tasks_head) 4485 next = it->mg_tasks_head->next; 4486 4487 if (next == it->mg_tasks_head) 4488 css_task_iter_advance_css_set(it); 4489 else 4490 it->task_pos = next; 4491 } else { 4492 /* called from start, proceed to the first cset */ 4493 css_task_iter_advance_css_set(it); 4494 } 4495 4496 /* if PROCS, skip over tasks which aren't group leaders */ 4497 if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && 4498 !thread_group_leader(list_entry(it->task_pos, struct task_struct, 4499 cg_list))) 4500 goto repeat; 4501 } 4502 4503 /** 4504 * css_task_iter_start - initiate task iteration 4505 * @css: the css to walk tasks of 4506 * @flags: CSS_TASK_ITER_* flags 4507 * @it: the task iterator to use 4508 * 4509 * Initiate iteration through the tasks of @css. The caller can call 4510 * css_task_iter_next() to walk through the tasks until the function 4511 * returns NULL. On completion of iteration, css_task_iter_end() must be 4512 * called. 4513 */ 4514 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 4515 struct css_task_iter *it) 4516 { 4517 /* no one should try to iterate before mounting cgroups */ 4518 WARN_ON_ONCE(!use_task_css_set_links); 4519 4520 memset(it, 0, sizeof(*it)); 4521 4522 spin_lock_irq(&css_set_lock); 4523 4524 it->ss = css->ss; 4525 it->flags = flags; 4526 4527 if (it->ss) 4528 it->cset_pos = &css->cgroup->e_csets[css->ss->id]; 4529 else 4530 it->cset_pos = &css->cgroup->cset_links; 4531 4532 it->cset_head = it->cset_pos; 4533 4534 css_task_iter_advance(it); 4535 4536 spin_unlock_irq(&css_set_lock); 4537 } 4538 4539 /** 4540 * css_task_iter_next - return the next task for the iterator 4541 * @it: the task iterator being iterated 4542 * 4543 * The "next" function for task iteration. @it should have been 4544 * initialized via css_task_iter_start(). Returns NULL when the iteration 4545 * reaches the end. 4546 */ 4547 struct task_struct *css_task_iter_next(struct css_task_iter *it) 4548 { 4549 if (it->cur_task) { 4550 put_task_struct(it->cur_task); 4551 it->cur_task = NULL; 4552 } 4553 4554 spin_lock_irq(&css_set_lock); 4555 4556 if (it->task_pos) { 4557 it->cur_task = list_entry(it->task_pos, struct task_struct, 4558 cg_list); 4559 get_task_struct(it->cur_task); 4560 css_task_iter_advance(it); 4561 } 4562 4563 spin_unlock_irq(&css_set_lock); 4564 4565 return it->cur_task; 4566 } 4567 4568 /** 4569 * css_task_iter_end - finish task iteration 4570 * @it: the task iterator to finish 4571 * 4572 * Finish task iteration started by css_task_iter_start(). 4573 */ 4574 void css_task_iter_end(struct css_task_iter *it) 4575 { 4576 if (it->cur_cset) { 4577 spin_lock_irq(&css_set_lock); 4578 list_del(&it->iters_node); 4579 put_css_set_locked(it->cur_cset); 4580 spin_unlock_irq(&css_set_lock); 4581 } 4582 4583 if (it->cur_dcset) 4584 put_css_set(it->cur_dcset); 4585 4586 if (it->cur_task) 4587 put_task_struct(it->cur_task); 4588 } 4589 4590 static void cgroup_procs_release(struct kernfs_open_file *of) 4591 { 4592 if (of->priv) { 4593 css_task_iter_end(of->priv); 4594 kfree(of->priv); 4595 } 4596 } 4597 4598 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) 4599 { 4600 struct kernfs_open_file *of = s->private; 4601 struct css_task_iter *it = of->priv; 4602 4603 return css_task_iter_next(it); 4604 } 4605 4606 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, 4607 unsigned int iter_flags) 4608 { 4609 struct kernfs_open_file *of = s->private; 4610 struct cgroup *cgrp = seq_css(s)->cgroup; 4611 struct css_task_iter *it = of->priv; 4612 4613 /* 4614 * When a seq_file is seeked, it's always traversed sequentially 4615 * from position 0, so we can simply keep iterating on !0 *pos. 4616 */ 4617 if (!it) { 4618 if (WARN_ON_ONCE((*pos)++)) 4619 return ERR_PTR(-EINVAL); 4620 4621 it = kzalloc(sizeof(*it), GFP_KERNEL); 4622 if (!it) 4623 return ERR_PTR(-ENOMEM); 4624 of->priv = it; 4625 css_task_iter_start(&cgrp->self, iter_flags, it); 4626 } else if (!(*pos)++) { 4627 css_task_iter_end(it); 4628 css_task_iter_start(&cgrp->self, iter_flags, it); 4629 } 4630 4631 return cgroup_procs_next(s, NULL, NULL); 4632 } 4633 4634 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos) 4635 { 4636 struct cgroup *cgrp = seq_css(s)->cgroup; 4637 4638 /* 4639 * All processes of a threaded subtree belong to the domain cgroup 4640 * of the subtree. Only threads can be distributed across the 4641 * subtree. Reject reads on cgroup.procs in the subtree proper. 4642 * They're always empty anyway. 4643 */ 4644 if (cgroup_is_threaded(cgrp)) 4645 return ERR_PTR(-EOPNOTSUPP); 4646 4647 return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS | 4648 CSS_TASK_ITER_THREADED); 4649 } 4650 4651 static int cgroup_procs_show(struct seq_file *s, void *v) 4652 { 4653 seq_printf(s, "%d\n", task_pid_vnr(v)); 4654 return 0; 4655 } 4656 4657 static int cgroup_procs_write_permission(struct cgroup *src_cgrp, 4658 struct cgroup *dst_cgrp, 4659 struct super_block *sb) 4660 { 4661 struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; 4662 struct cgroup *com_cgrp = src_cgrp; 4663 struct inode *inode; 4664 int ret; 4665 4666 lockdep_assert_held(&cgroup_mutex); 4667 4668 /* find the common ancestor */ 4669 while (!cgroup_is_descendant(dst_cgrp, com_cgrp)) 4670 com_cgrp = cgroup_parent(com_cgrp); 4671 4672 /* %current should be authorized to migrate to the common ancestor */ 4673 inode = kernfs_get_inode(sb, com_cgrp->procs_file.kn); 4674 if (!inode) 4675 return -ENOMEM; 4676 4677 ret = inode_permission(inode, MAY_WRITE); 4678 iput(inode); 4679 if (ret) 4680 return ret; 4681 4682 /* 4683 * If namespaces are delegation boundaries, %current must be able 4684 * to see both source and destination cgroups from its namespace. 4685 */ 4686 if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) && 4687 (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) || 4688 !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp))) 4689 return -ENOENT; 4690 4691 return 0; 4692 } 4693 4694 static ssize_t cgroup_procs_write(struct kernfs_open_file *of, 4695 char *buf, size_t nbytes, loff_t off) 4696 { 4697 struct cgroup *src_cgrp, *dst_cgrp; 4698 struct task_struct *task; 4699 ssize_t ret; 4700 4701 dst_cgrp = cgroup_kn_lock_live(of->kn, false); 4702 if (!dst_cgrp) 4703 return -ENODEV; 4704 4705 task = cgroup_procs_write_start(buf, true); 4706 ret = PTR_ERR_OR_ZERO(task); 4707 if (ret) 4708 goto out_unlock; 4709 4710 /* find the source cgroup */ 4711 spin_lock_irq(&css_set_lock); 4712 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 4713 spin_unlock_irq(&css_set_lock); 4714 4715 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, 4716 of->file->f_path.dentry->d_sb); 4717 if (ret) 4718 goto out_finish; 4719 4720 ret = cgroup_attach_task(dst_cgrp, task, true); 4721 4722 out_finish: 4723 cgroup_procs_write_finish(task); 4724 out_unlock: 4725 cgroup_kn_unlock(of->kn); 4726 4727 return ret ?: nbytes; 4728 } 4729 4730 static void *cgroup_threads_start(struct seq_file *s, loff_t *pos) 4731 { 4732 return __cgroup_procs_start(s, pos, 0); 4733 } 4734 4735 static ssize_t cgroup_threads_write(struct kernfs_open_file *of, 4736 char *buf, size_t nbytes, loff_t off) 4737 { 4738 struct cgroup *src_cgrp, *dst_cgrp; 4739 struct task_struct *task; 4740 ssize_t ret; 4741 4742 buf = strstrip(buf); 4743 4744 dst_cgrp = cgroup_kn_lock_live(of->kn, false); 4745 if (!dst_cgrp) 4746 return -ENODEV; 4747 4748 task = cgroup_procs_write_start(buf, false); 4749 ret = PTR_ERR_OR_ZERO(task); 4750 if (ret) 4751 goto out_unlock; 4752 4753 /* find the source cgroup */ 4754 spin_lock_irq(&css_set_lock); 4755 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 4756 spin_unlock_irq(&css_set_lock); 4757 4758 /* thread migrations follow the cgroup.procs delegation rule */ 4759 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, 4760 of->file->f_path.dentry->d_sb); 4761 if (ret) 4762 goto out_finish; 4763 4764 /* and must be contained in the same domain */ 4765 ret = -EOPNOTSUPP; 4766 if (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp) 4767 goto out_finish; 4768 4769 ret = cgroup_attach_task(dst_cgrp, task, false); 4770 4771 out_finish: 4772 cgroup_procs_write_finish(task); 4773 out_unlock: 4774 cgroup_kn_unlock(of->kn); 4775 4776 return ret ?: nbytes; 4777 } 4778 4779 /* cgroup core interface files for the default hierarchy */ 4780 static struct cftype cgroup_base_files[] = { 4781 { 4782 .name = "cgroup.type", 4783 .flags = CFTYPE_NOT_ON_ROOT, 4784 .seq_show = cgroup_type_show, 4785 .write = cgroup_type_write, 4786 }, 4787 { 4788 .name = "cgroup.procs", 4789 .flags = CFTYPE_NS_DELEGATABLE, 4790 .file_offset = offsetof(struct cgroup, procs_file), 4791 .release = cgroup_procs_release, 4792 .seq_start = cgroup_procs_start, 4793 .seq_next = cgroup_procs_next, 4794 .seq_show = cgroup_procs_show, 4795 .write = cgroup_procs_write, 4796 }, 4797 { 4798 .name = "cgroup.threads", 4799 .flags = CFTYPE_NS_DELEGATABLE, 4800 .release = cgroup_procs_release, 4801 .seq_start = cgroup_threads_start, 4802 .seq_next = cgroup_procs_next, 4803 .seq_show = cgroup_procs_show, 4804 .write = cgroup_threads_write, 4805 }, 4806 { 4807 .name = "cgroup.controllers", 4808 .seq_show = cgroup_controllers_show, 4809 }, 4810 { 4811 .name = "cgroup.subtree_control", 4812 .flags = CFTYPE_NS_DELEGATABLE, 4813 .seq_show = cgroup_subtree_control_show, 4814 .write = cgroup_subtree_control_write, 4815 }, 4816 { 4817 .name = "cgroup.events", 4818 .flags = CFTYPE_NOT_ON_ROOT, 4819 .file_offset = offsetof(struct cgroup, events_file), 4820 .seq_show = cgroup_events_show, 4821 }, 4822 { 4823 .name = "cgroup.max.descendants", 4824 .seq_show = cgroup_max_descendants_show, 4825 .write = cgroup_max_descendants_write, 4826 }, 4827 { 4828 .name = "cgroup.max.depth", 4829 .seq_show = cgroup_max_depth_show, 4830 .write = cgroup_max_depth_write, 4831 }, 4832 { 4833 .name = "cgroup.stat", 4834 .seq_show = cgroup_stat_show, 4835 }, 4836 { 4837 .name = "cgroup.freeze", 4838 .flags = CFTYPE_NOT_ON_ROOT, 4839 .seq_show = cgroup_freeze_show, 4840 .write = cgroup_freeze_write, 4841 }, 4842 { 4843 .name = "cpu.stat", 4844 .flags = CFTYPE_NOT_ON_ROOT, 4845 .seq_show = cpu_stat_show, 4846 }, 4847 #ifdef CONFIG_PSI 4848 { 4849 .name = "io.pressure", 4850 .seq_show = cgroup_io_pressure_show, 4851 .write = cgroup_io_pressure_write, 4852 .poll = cgroup_pressure_poll, 4853 .release = cgroup_pressure_release, 4854 }, 4855 { 4856 .name = "memory.pressure", 4857 .seq_show = cgroup_memory_pressure_show, 4858 .write = cgroup_memory_pressure_write, 4859 .poll = cgroup_pressure_poll, 4860 .release = cgroup_pressure_release, 4861 }, 4862 { 4863 .name = "cpu.pressure", 4864 .seq_show = cgroup_cpu_pressure_show, 4865 .write = cgroup_cpu_pressure_write, 4866 .poll = cgroup_pressure_poll, 4867 .release = cgroup_pressure_release, 4868 }, 4869 #endif /* CONFIG_PSI */ 4870 { } /* terminate */ 4871 }; 4872 4873 /* 4874 * css destruction is four-stage process. 4875 * 4876 * 1. Destruction starts. Killing of the percpu_ref is initiated. 4877 * Implemented in kill_css(). 4878 * 4879 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs 4880 * and thus css_tryget_online() is guaranteed to fail, the css can be 4881 * offlined by invoking offline_css(). After offlining, the base ref is 4882 * put. Implemented in css_killed_work_fn(). 4883 * 4884 * 3. When the percpu_ref reaches zero, the only possible remaining 4885 * accessors are inside RCU read sections. css_release() schedules the 4886 * RCU callback. 4887 * 4888 * 4. After the grace period, the css can be freed. Implemented in 4889 * css_free_work_fn(). 4890 * 4891 * It is actually hairier because both step 2 and 4 require process context 4892 * and thus involve punting to css->destroy_work adding two additional 4893 * steps to the already complex sequence. 4894 */ 4895 static void css_free_rwork_fn(struct work_struct *work) 4896 { 4897 struct cgroup_subsys_state *css = container_of(to_rcu_work(work), 4898 struct cgroup_subsys_state, destroy_rwork); 4899 struct cgroup_subsys *ss = css->ss; 4900 struct cgroup *cgrp = css->cgroup; 4901 4902 percpu_ref_exit(&css->refcnt); 4903 4904 if (ss) { 4905 /* css free path */ 4906 struct cgroup_subsys_state *parent = css->parent; 4907 int id = css->id; 4908 4909 ss->css_free(css); 4910 cgroup_idr_remove(&ss->css_idr, id); 4911 cgroup_put(cgrp); 4912 4913 if (parent) 4914 css_put(parent); 4915 } else { 4916 /* cgroup free path */ 4917 atomic_dec(&cgrp->root->nr_cgrps); 4918 cgroup1_pidlist_destroy_all(cgrp); 4919 cancel_work_sync(&cgrp->release_agent_work); 4920 4921 if (cgroup_parent(cgrp)) { 4922 /* 4923 * We get a ref to the parent, and put the ref when 4924 * this cgroup is being freed, so it's guaranteed 4925 * that the parent won't be destroyed before its 4926 * children. 4927 */ 4928 cgroup_put(cgroup_parent(cgrp)); 4929 kernfs_put(cgrp->kn); 4930 psi_cgroup_free(cgrp); 4931 if (cgroup_on_dfl(cgrp)) 4932 cgroup_rstat_exit(cgrp); 4933 kfree(cgrp); 4934 } else { 4935 /* 4936 * This is root cgroup's refcnt reaching zero, 4937 * which indicates that the root should be 4938 * released. 4939 */ 4940 cgroup_destroy_root(cgrp->root); 4941 } 4942 } 4943 } 4944 4945 static void css_release_work_fn(struct work_struct *work) 4946 { 4947 struct cgroup_subsys_state *css = 4948 container_of(work, struct cgroup_subsys_state, destroy_work); 4949 struct cgroup_subsys *ss = css->ss; 4950 struct cgroup *cgrp = css->cgroup; 4951 4952 mutex_lock(&cgroup_mutex); 4953 4954 css->flags |= CSS_RELEASED; 4955 list_del_rcu(&css->sibling); 4956 4957 if (ss) { 4958 /* css release path */ 4959 if (!list_empty(&css->rstat_css_node)) { 4960 cgroup_rstat_flush(cgrp); 4961 list_del_rcu(&css->rstat_css_node); 4962 } 4963 4964 cgroup_idr_replace(&ss->css_idr, NULL, css->id); 4965 if (ss->css_released) 4966 ss->css_released(css); 4967 } else { 4968 struct cgroup *tcgrp; 4969 4970 /* cgroup release path */ 4971 TRACE_CGROUP_PATH(release, cgrp); 4972 4973 if (cgroup_on_dfl(cgrp)) 4974 cgroup_rstat_flush(cgrp); 4975 4976 spin_lock_irq(&css_set_lock); 4977 for (tcgrp = cgroup_parent(cgrp); tcgrp; 4978 tcgrp = cgroup_parent(tcgrp)) 4979 tcgrp->nr_dying_descendants--; 4980 spin_unlock_irq(&css_set_lock); 4981 4982 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 4983 cgrp->id = -1; 4984 4985 /* 4986 * There are two control paths which try to determine 4987 * cgroup from dentry without going through kernfs - 4988 * cgroupstats_build() and css_tryget_online_from_dir(). 4989 * Those are supported by RCU protecting clearing of 4990 * cgrp->kn->priv backpointer. 4991 */ 4992 if (cgrp->kn) 4993 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, 4994 NULL); 4995 4996 cgroup_bpf_put(cgrp); 4997 } 4998 4999 mutex_unlock(&cgroup_mutex); 5000 5001 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5002 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5003 } 5004 5005 static void css_release(struct percpu_ref *ref) 5006 { 5007 struct cgroup_subsys_state *css = 5008 container_of(ref, struct cgroup_subsys_state, refcnt); 5009 5010 INIT_WORK(&css->destroy_work, css_release_work_fn); 5011 queue_work(cgroup_destroy_wq, &css->destroy_work); 5012 } 5013 5014 static void init_and_link_css(struct cgroup_subsys_state *css, 5015 struct cgroup_subsys *ss, struct cgroup *cgrp) 5016 { 5017 lockdep_assert_held(&cgroup_mutex); 5018 5019 cgroup_get_live(cgrp); 5020 5021 memset(css, 0, sizeof(*css)); 5022 css->cgroup = cgrp; 5023 css->ss = ss; 5024 css->id = -1; 5025 INIT_LIST_HEAD(&css->sibling); 5026 INIT_LIST_HEAD(&css->children); 5027 INIT_LIST_HEAD(&css->rstat_css_node); 5028 css->serial_nr = css_serial_nr_next++; 5029 atomic_set(&css->online_cnt, 0); 5030 5031 if (cgroup_parent(cgrp)) { 5032 css->parent = cgroup_css(cgroup_parent(cgrp), ss); 5033 css_get(css->parent); 5034 } 5035 5036 if (cgroup_on_dfl(cgrp) && ss->css_rstat_flush) 5037 list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list); 5038 5039 BUG_ON(cgroup_css(cgrp, ss)); 5040 } 5041 5042 /* invoke ->css_online() on a new CSS and mark it online if successful */ 5043 static int online_css(struct cgroup_subsys_state *css) 5044 { 5045 struct cgroup_subsys *ss = css->ss; 5046 int ret = 0; 5047 5048 lockdep_assert_held(&cgroup_mutex); 5049 5050 if (ss->css_online) 5051 ret = ss->css_online(css); 5052 if (!ret) { 5053 css->flags |= CSS_ONLINE; 5054 rcu_assign_pointer(css->cgroup->subsys[ss->id], css); 5055 5056 atomic_inc(&css->online_cnt); 5057 if (css->parent) 5058 atomic_inc(&css->parent->online_cnt); 5059 } 5060 return ret; 5061 } 5062 5063 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */ 5064 static void offline_css(struct cgroup_subsys_state *css) 5065 { 5066 struct cgroup_subsys *ss = css->ss; 5067 5068 lockdep_assert_held(&cgroup_mutex); 5069 5070 if (!(css->flags & CSS_ONLINE)) 5071 return; 5072 5073 if (ss->css_offline) 5074 ss->css_offline(css); 5075 5076 css->flags &= ~CSS_ONLINE; 5077 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); 5078 5079 wake_up_all(&css->cgroup->offline_waitq); 5080 } 5081 5082 /** 5083 * css_create - create a cgroup_subsys_state 5084 * @cgrp: the cgroup new css will be associated with 5085 * @ss: the subsys of new css 5086 * 5087 * Create a new css associated with @cgrp - @ss pair. On success, the new 5088 * css is online and installed in @cgrp. This function doesn't create the 5089 * interface files. Returns 0 on success, -errno on failure. 5090 */ 5091 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 5092 struct cgroup_subsys *ss) 5093 { 5094 struct cgroup *parent = cgroup_parent(cgrp); 5095 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss); 5096 struct cgroup_subsys_state *css; 5097 int err; 5098 5099 lockdep_assert_held(&cgroup_mutex); 5100 5101 css = ss->css_alloc(parent_css); 5102 if (!css) 5103 css = ERR_PTR(-ENOMEM); 5104 if (IS_ERR(css)) 5105 return css; 5106 5107 init_and_link_css(css, ss, cgrp); 5108 5109 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); 5110 if (err) 5111 goto err_free_css; 5112 5113 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); 5114 if (err < 0) 5115 goto err_free_css; 5116 css->id = err; 5117 5118 /* @css is ready to be brought online now, make it visible */ 5119 list_add_tail_rcu(&css->sibling, &parent_css->children); 5120 cgroup_idr_replace(&ss->css_idr, css, css->id); 5121 5122 err = online_css(css); 5123 if (err) 5124 goto err_list_del; 5125 5126 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && 5127 cgroup_parent(parent)) { 5128 pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", 5129 current->comm, current->pid, ss->name); 5130 if (!strcmp(ss->name, "memory")) 5131 pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n"); 5132 ss->warned_broken_hierarchy = true; 5133 } 5134 5135 return css; 5136 5137 err_list_del: 5138 list_del_rcu(&css->sibling); 5139 err_free_css: 5140 list_del_rcu(&css->rstat_css_node); 5141 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5142 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5143 return ERR_PTR(err); 5144 } 5145 5146 /* 5147 * The returned cgroup is fully initialized including its control mask, but 5148 * it isn't associated with its kernfs_node and doesn't have the control 5149 * mask applied. 5150 */ 5151 static struct cgroup *cgroup_create(struct cgroup *parent) 5152 { 5153 struct cgroup_root *root = parent->root; 5154 struct cgroup *cgrp, *tcgrp; 5155 int level = parent->level + 1; 5156 int ret; 5157 5158 /* allocate the cgroup and its ID, 0 is reserved for the root */ 5159 cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)), 5160 GFP_KERNEL); 5161 if (!cgrp) 5162 return ERR_PTR(-ENOMEM); 5163 5164 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL); 5165 if (ret) 5166 goto out_free_cgrp; 5167 5168 if (cgroup_on_dfl(parent)) { 5169 ret = cgroup_rstat_init(cgrp); 5170 if (ret) 5171 goto out_cancel_ref; 5172 } 5173 5174 /* 5175 * Temporarily set the pointer to NULL, so idr_find() won't return 5176 * a half-baked cgroup. 5177 */ 5178 cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL); 5179 if (cgrp->id < 0) { 5180 ret = -ENOMEM; 5181 goto out_stat_exit; 5182 } 5183 5184 init_cgroup_housekeeping(cgrp); 5185 5186 cgrp->self.parent = &parent->self; 5187 cgrp->root = root; 5188 cgrp->level = level; 5189 5190 ret = psi_cgroup_alloc(cgrp); 5191 if (ret) 5192 goto out_idr_free; 5193 5194 ret = cgroup_bpf_inherit(cgrp); 5195 if (ret) 5196 goto out_psi_free; 5197 5198 /* 5199 * New cgroup inherits effective freeze counter, and 5200 * if the parent has to be frozen, the child has too. 5201 */ 5202 cgrp->freezer.e_freeze = parent->freezer.e_freeze; 5203 if (cgrp->freezer.e_freeze) 5204 set_bit(CGRP_FROZEN, &cgrp->flags); 5205 5206 spin_lock_irq(&css_set_lock); 5207 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5208 cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; 5209 5210 if (tcgrp != cgrp) { 5211 tcgrp->nr_descendants++; 5212 5213 /* 5214 * If the new cgroup is frozen, all ancestor cgroups 5215 * get a new frozen descendant, but their state can't 5216 * change because of this. 5217 */ 5218 if (cgrp->freezer.e_freeze) 5219 tcgrp->freezer.nr_frozen_descendants++; 5220 } 5221 } 5222 spin_unlock_irq(&css_set_lock); 5223 5224 if (notify_on_release(parent)) 5225 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); 5226 5227 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) 5228 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); 5229 5230 cgrp->self.serial_nr = css_serial_nr_next++; 5231 5232 /* allocation complete, commit to creation */ 5233 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children); 5234 atomic_inc(&root->nr_cgrps); 5235 cgroup_get_live(parent); 5236 5237 /* 5238 * @cgrp is now fully operational. If something fails after this 5239 * point, it'll be released via the normal destruction path. 5240 */ 5241 cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id); 5242 5243 /* 5244 * On the default hierarchy, a child doesn't automatically inherit 5245 * subtree_control from the parent. Each is configured manually. 5246 */ 5247 if (!cgroup_on_dfl(cgrp)) 5248 cgrp->subtree_control = cgroup_control(cgrp); 5249 5250 cgroup_propagate_control(cgrp); 5251 5252 return cgrp; 5253 5254 out_psi_free: 5255 psi_cgroup_free(cgrp); 5256 out_idr_free: 5257 cgroup_idr_remove(&root->cgroup_idr, cgrp->id); 5258 out_stat_exit: 5259 if (cgroup_on_dfl(parent)) 5260 cgroup_rstat_exit(cgrp); 5261 out_cancel_ref: 5262 percpu_ref_exit(&cgrp->self.refcnt); 5263 out_free_cgrp: 5264 kfree(cgrp); 5265 return ERR_PTR(ret); 5266 } 5267 5268 static bool cgroup_check_hierarchy_limits(struct cgroup *parent) 5269 { 5270 struct cgroup *cgroup; 5271 int ret = false; 5272 int level = 1; 5273 5274 lockdep_assert_held(&cgroup_mutex); 5275 5276 for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) { 5277 if (cgroup->nr_descendants >= cgroup->max_descendants) 5278 goto fail; 5279 5280 if (level > cgroup->max_depth) 5281 goto fail; 5282 5283 level++; 5284 } 5285 5286 ret = true; 5287 fail: 5288 return ret; 5289 } 5290 5291 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) 5292 { 5293 struct cgroup *parent, *cgrp; 5294 struct kernfs_node *kn; 5295 int ret; 5296 5297 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ 5298 if (strchr(name, '\n')) 5299 return -EINVAL; 5300 5301 parent = cgroup_kn_lock_live(parent_kn, false); 5302 if (!parent) 5303 return -ENODEV; 5304 5305 if (!cgroup_check_hierarchy_limits(parent)) { 5306 ret = -EAGAIN; 5307 goto out_unlock; 5308 } 5309 5310 cgrp = cgroup_create(parent); 5311 if (IS_ERR(cgrp)) { 5312 ret = PTR_ERR(cgrp); 5313 goto out_unlock; 5314 } 5315 5316 /* create the directory */ 5317 kn = kernfs_create_dir(parent->kn, name, mode, cgrp); 5318 if (IS_ERR(kn)) { 5319 ret = PTR_ERR(kn); 5320 goto out_destroy; 5321 } 5322 cgrp->kn = kn; 5323 5324 /* 5325 * This extra ref will be put in cgroup_free_fn() and guarantees 5326 * that @cgrp->kn is always accessible. 5327 */ 5328 kernfs_get(kn); 5329 5330 ret = cgroup_kn_set_ugid(kn); 5331 if (ret) 5332 goto out_destroy; 5333 5334 ret = css_populate_dir(&cgrp->self); 5335 if (ret) 5336 goto out_destroy; 5337 5338 ret = cgroup_apply_control_enable(cgrp); 5339 if (ret) 5340 goto out_destroy; 5341 5342 TRACE_CGROUP_PATH(mkdir, cgrp); 5343 5344 /* let's create and online css's */ 5345 kernfs_activate(kn); 5346 5347 ret = 0; 5348 goto out_unlock; 5349 5350 out_destroy: 5351 cgroup_destroy_locked(cgrp); 5352 out_unlock: 5353 cgroup_kn_unlock(parent_kn); 5354 return ret; 5355 } 5356 5357 /* 5358 * This is called when the refcnt of a css is confirmed to be killed. 5359 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to 5360 * initate destruction and put the css ref from kill_css(). 5361 */ 5362 static void css_killed_work_fn(struct work_struct *work) 5363 { 5364 struct cgroup_subsys_state *css = 5365 container_of(work, struct cgroup_subsys_state, destroy_work); 5366 5367 mutex_lock(&cgroup_mutex); 5368 5369 do { 5370 offline_css(css); 5371 css_put(css); 5372 /* @css can't go away while we're holding cgroup_mutex */ 5373 css = css->parent; 5374 } while (css && atomic_dec_and_test(&css->online_cnt)); 5375 5376 mutex_unlock(&cgroup_mutex); 5377 } 5378 5379 /* css kill confirmation processing requires process context, bounce */ 5380 static void css_killed_ref_fn(struct percpu_ref *ref) 5381 { 5382 struct cgroup_subsys_state *css = 5383 container_of(ref, struct cgroup_subsys_state, refcnt); 5384 5385 if (atomic_dec_and_test(&css->online_cnt)) { 5386 INIT_WORK(&css->destroy_work, css_killed_work_fn); 5387 queue_work(cgroup_destroy_wq, &css->destroy_work); 5388 } 5389 } 5390 5391 /** 5392 * kill_css - destroy a css 5393 * @css: css to destroy 5394 * 5395 * This function initiates destruction of @css by removing cgroup interface 5396 * files and putting its base reference. ->css_offline() will be invoked 5397 * asynchronously once css_tryget_online() is guaranteed to fail and when 5398 * the reference count reaches zero, @css will be released. 5399 */ 5400 static void kill_css(struct cgroup_subsys_state *css) 5401 { 5402 lockdep_assert_held(&cgroup_mutex); 5403 5404 if (css->flags & CSS_DYING) 5405 return; 5406 5407 css->flags |= CSS_DYING; 5408 5409 /* 5410 * This must happen before css is disassociated with its cgroup. 5411 * See seq_css() for details. 5412 */ 5413 css_clear_dir(css); 5414 5415 /* 5416 * Killing would put the base ref, but we need to keep it alive 5417 * until after ->css_offline(). 5418 */ 5419 css_get(css); 5420 5421 /* 5422 * cgroup core guarantees that, by the time ->css_offline() is 5423 * invoked, no new css reference will be given out via 5424 * css_tryget_online(). We can't simply call percpu_ref_kill() and 5425 * proceed to offlining css's because percpu_ref_kill() doesn't 5426 * guarantee that the ref is seen as killed on all CPUs on return. 5427 * 5428 * Use percpu_ref_kill_and_confirm() to get notifications as each 5429 * css is confirmed to be seen as killed on all CPUs. 5430 */ 5431 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn); 5432 } 5433 5434 /** 5435 * cgroup_destroy_locked - the first stage of cgroup destruction 5436 * @cgrp: cgroup to be destroyed 5437 * 5438 * css's make use of percpu refcnts whose killing latency shouldn't be 5439 * exposed to userland and are RCU protected. Also, cgroup core needs to 5440 * guarantee that css_tryget_online() won't succeed by the time 5441 * ->css_offline() is invoked. To satisfy all the requirements, 5442 * destruction is implemented in the following two steps. 5443 * 5444 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all 5445 * userland visible parts and start killing the percpu refcnts of 5446 * css's. Set up so that the next stage will be kicked off once all 5447 * the percpu refcnts are confirmed to be killed. 5448 * 5449 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the 5450 * rest of destruction. Once all cgroup references are gone, the 5451 * cgroup is RCU-freed. 5452 * 5453 * This function implements s1. After this step, @cgrp is gone as far as 5454 * the userland is concerned and a new cgroup with the same name may be 5455 * created. As cgroup doesn't care about the names internally, this 5456 * doesn't cause any problem. 5457 */ 5458 static int cgroup_destroy_locked(struct cgroup *cgrp) 5459 __releases(&cgroup_mutex) __acquires(&cgroup_mutex) 5460 { 5461 struct cgroup *tcgrp, *parent = cgroup_parent(cgrp); 5462 struct cgroup_subsys_state *css; 5463 struct cgrp_cset_link *link; 5464 int ssid; 5465 5466 lockdep_assert_held(&cgroup_mutex); 5467 5468 /* 5469 * Only migration can raise populated from zero and we're already 5470 * holding cgroup_mutex. 5471 */ 5472 if (cgroup_is_populated(cgrp)) 5473 return -EBUSY; 5474 5475 /* 5476 * Make sure there's no live children. We can't test emptiness of 5477 * ->self.children as dead children linger on it while being 5478 * drained; otherwise, "rmdir parent/child parent" may fail. 5479 */ 5480 if (css_has_online_children(&cgrp->self)) 5481 return -EBUSY; 5482 5483 /* 5484 * Mark @cgrp and the associated csets dead. The former prevents 5485 * further task migration and child creation by disabling 5486 * cgroup_lock_live_group(). The latter makes the csets ignored by 5487 * the migration path. 5488 */ 5489 cgrp->self.flags &= ~CSS_ONLINE; 5490 5491 spin_lock_irq(&css_set_lock); 5492 list_for_each_entry(link, &cgrp->cset_links, cset_link) 5493 link->cset->dead = true; 5494 spin_unlock_irq(&css_set_lock); 5495 5496 /* initiate massacre of all css's */ 5497 for_each_css(css, ssid, cgrp) 5498 kill_css(css); 5499 5500 /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */ 5501 css_clear_dir(&cgrp->self); 5502 kernfs_remove(cgrp->kn); 5503 5504 if (parent && cgroup_is_threaded(cgrp)) 5505 parent->nr_threaded_children--; 5506 5507 spin_lock_irq(&css_set_lock); 5508 for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5509 tcgrp->nr_descendants--; 5510 tcgrp->nr_dying_descendants++; 5511 /* 5512 * If the dying cgroup is frozen, decrease frozen descendants 5513 * counters of ancestor cgroups. 5514 */ 5515 if (test_bit(CGRP_FROZEN, &cgrp->flags)) 5516 tcgrp->freezer.nr_frozen_descendants--; 5517 } 5518 spin_unlock_irq(&css_set_lock); 5519 5520 cgroup1_check_for_release(parent); 5521 5522 /* put the base reference */ 5523 percpu_ref_kill(&cgrp->self.refcnt); 5524 5525 return 0; 5526 }; 5527 5528 int cgroup_rmdir(struct kernfs_node *kn) 5529 { 5530 struct cgroup *cgrp; 5531 int ret = 0; 5532 5533 cgrp = cgroup_kn_lock_live(kn, false); 5534 if (!cgrp) 5535 return 0; 5536 5537 ret = cgroup_destroy_locked(cgrp); 5538 if (!ret) 5539 TRACE_CGROUP_PATH(rmdir, cgrp); 5540 5541 cgroup_kn_unlock(kn); 5542 return ret; 5543 } 5544 5545 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { 5546 .show_options = cgroup_show_options, 5547 .mkdir = cgroup_mkdir, 5548 .rmdir = cgroup_rmdir, 5549 .show_path = cgroup_show_path, 5550 }; 5551 5552 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) 5553 { 5554 struct cgroup_subsys_state *css; 5555 5556 pr_debug("Initializing cgroup subsys %s\n", ss->name); 5557 5558 mutex_lock(&cgroup_mutex); 5559 5560 idr_init(&ss->css_idr); 5561 INIT_LIST_HEAD(&ss->cfts); 5562 5563 /* Create the root cgroup state for this subsystem */ 5564 ss->root = &cgrp_dfl_root; 5565 css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); 5566 /* We don't handle early failures gracefully */ 5567 BUG_ON(IS_ERR(css)); 5568 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp); 5569 5570 /* 5571 * Root csses are never destroyed and we can't initialize 5572 * percpu_ref during early init. Disable refcnting. 5573 */ 5574 css->flags |= CSS_NO_REF; 5575 5576 if (early) { 5577 /* allocation can't be done safely during early init */ 5578 css->id = 1; 5579 } else { 5580 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL); 5581 BUG_ON(css->id < 0); 5582 } 5583 5584 /* Update the init_css_set to contain a subsys 5585 * pointer to this state - since the subsystem is 5586 * newly registered, all tasks and hence the 5587 * init_css_set is in the subsystem's root cgroup. */ 5588 init_css_set.subsys[ss->id] = css; 5589 5590 have_fork_callback |= (bool)ss->fork << ss->id; 5591 have_exit_callback |= (bool)ss->exit << ss->id; 5592 have_release_callback |= (bool)ss->release << ss->id; 5593 have_canfork_callback |= (bool)ss->can_fork << ss->id; 5594 5595 /* At system boot, before all subsystems have been 5596 * registered, no tasks have been forked, so we don't 5597 * need to invoke fork callbacks here. */ 5598 BUG_ON(!list_empty(&init_task.tasks)); 5599 5600 BUG_ON(online_css(css)); 5601 5602 mutex_unlock(&cgroup_mutex); 5603 } 5604 5605 /** 5606 * cgroup_init_early - cgroup initialization at system boot 5607 * 5608 * Initialize cgroups at system boot, and initialize any 5609 * subsystems that request early init. 5610 */ 5611 int __init cgroup_init_early(void) 5612 { 5613 static struct cgroup_fs_context __initdata ctx; 5614 struct cgroup_subsys *ss; 5615 int i; 5616 5617 ctx.root = &cgrp_dfl_root; 5618 init_cgroup_root(&ctx); 5619 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF; 5620 5621 RCU_INIT_POINTER(init_task.cgroups, &init_css_set); 5622 5623 for_each_subsys(ss, i) { 5624 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, 5625 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n", 5626 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, 5627 ss->id, ss->name); 5628 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, 5629 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); 5630 5631 ss->id = i; 5632 ss->name = cgroup_subsys_name[i]; 5633 if (!ss->legacy_name) 5634 ss->legacy_name = cgroup_subsys_name[i]; 5635 5636 if (ss->early_init) 5637 cgroup_init_subsys(ss, true); 5638 } 5639 return 0; 5640 } 5641 5642 static u16 cgroup_disable_mask __initdata; 5643 5644 /** 5645 * cgroup_init - cgroup initialization 5646 * 5647 * Register cgroup filesystem and /proc file, and initialize 5648 * any subsystems that didn't request early init. 5649 */ 5650 int __init cgroup_init(void) 5651 { 5652 struct cgroup_subsys *ss; 5653 int ssid; 5654 5655 BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); 5656 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem)); 5657 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); 5658 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); 5659 5660 cgroup_rstat_boot(); 5661 5662 /* 5663 * The latency of the synchronize_rcu() is too high for cgroups, 5664 * avoid it at the cost of forcing all readers into the slow path. 5665 */ 5666 rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); 5667 5668 get_user_ns(init_cgroup_ns.user_ns); 5669 5670 mutex_lock(&cgroup_mutex); 5671 5672 /* 5673 * Add init_css_set to the hash table so that dfl_root can link to 5674 * it during init. 5675 */ 5676 hash_add(css_set_table, &init_css_set.hlist, 5677 css_set_hash(init_css_set.subsys)); 5678 5679 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); 5680 5681 mutex_unlock(&cgroup_mutex); 5682 5683 for_each_subsys(ss, ssid) { 5684 if (ss->early_init) { 5685 struct cgroup_subsys_state *css = 5686 init_css_set.subsys[ss->id]; 5687 5688 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, 5689 GFP_KERNEL); 5690 BUG_ON(css->id < 0); 5691 } else { 5692 cgroup_init_subsys(ss, false); 5693 } 5694 5695 list_add_tail(&init_css_set.e_cset_node[ssid], 5696 &cgrp_dfl_root.cgrp.e_csets[ssid]); 5697 5698 /* 5699 * Setting dfl_root subsys_mask needs to consider the 5700 * disabled flag and cftype registration needs kmalloc, 5701 * both of which aren't available during early_init. 5702 */ 5703 if (cgroup_disable_mask & (1 << ssid)) { 5704 static_branch_disable(cgroup_subsys_enabled_key[ssid]); 5705 printk(KERN_INFO "Disabling %s control group subsystem\n", 5706 ss->name); 5707 continue; 5708 } 5709 5710 if (cgroup1_ssid_disabled(ssid)) 5711 printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n", 5712 ss->name); 5713 5714 cgrp_dfl_root.subsys_mask |= 1 << ss->id; 5715 5716 /* implicit controllers must be threaded too */ 5717 WARN_ON(ss->implicit_on_dfl && !ss->threaded); 5718 5719 if (ss->implicit_on_dfl) 5720 cgrp_dfl_implicit_ss_mask |= 1 << ss->id; 5721 else if (!ss->dfl_cftypes) 5722 cgrp_dfl_inhibit_ss_mask |= 1 << ss->id; 5723 5724 if (ss->threaded) 5725 cgrp_dfl_threaded_ss_mask |= 1 << ss->id; 5726 5727 if (ss->dfl_cftypes == ss->legacy_cftypes) { 5728 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes)); 5729 } else { 5730 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes)); 5731 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes)); 5732 } 5733 5734 if (ss->bind) 5735 ss->bind(init_css_set.subsys[ssid]); 5736 5737 mutex_lock(&cgroup_mutex); 5738 css_populate_dir(init_css_set.subsys[ssid]); 5739 mutex_unlock(&cgroup_mutex); 5740 } 5741 5742 /* init_css_set.subsys[] has been updated, re-hash */ 5743 hash_del(&init_css_set.hlist); 5744 hash_add(css_set_table, &init_css_set.hlist, 5745 css_set_hash(init_css_set.subsys)); 5746 5747 WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); 5748 WARN_ON(register_filesystem(&cgroup_fs_type)); 5749 WARN_ON(register_filesystem(&cgroup2_fs_type)); 5750 WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show)); 5751 5752 return 0; 5753 } 5754 5755 static int __init cgroup_wq_init(void) 5756 { 5757 /* 5758 * There isn't much point in executing destruction path in 5759 * parallel. Good chunk is serialized with cgroup_mutex anyway. 5760 * Use 1 for @max_active. 5761 * 5762 * We would prefer to do this in cgroup_init() above, but that 5763 * is called before init_workqueues(): so leave this until after. 5764 */ 5765 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); 5766 BUG_ON(!cgroup_destroy_wq); 5767 return 0; 5768 } 5769 core_initcall(cgroup_wq_init); 5770 5771 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id, 5772 char *buf, size_t buflen) 5773 { 5774 struct kernfs_node *kn; 5775 5776 kn = kernfs_get_node_by_id(cgrp_dfl_root.kf_root, id); 5777 if (!kn) 5778 return; 5779 kernfs_path(kn, buf, buflen); 5780 kernfs_put(kn); 5781 } 5782 5783 /* 5784 * proc_cgroup_show() 5785 * - Print task's cgroup paths into seq_file, one line for each hierarchy 5786 * - Used for /proc/<pid>/cgroup. 5787 */ 5788 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 5789 struct pid *pid, struct task_struct *tsk) 5790 { 5791 char *buf; 5792 int retval; 5793 struct cgroup_root *root; 5794 5795 retval = -ENOMEM; 5796 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5797 if (!buf) 5798 goto out; 5799 5800 mutex_lock(&cgroup_mutex); 5801 spin_lock_irq(&css_set_lock); 5802 5803 for_each_root(root) { 5804 struct cgroup_subsys *ss; 5805 struct cgroup *cgrp; 5806 int ssid, count = 0; 5807 5808 if (root == &cgrp_dfl_root && !cgrp_dfl_visible) 5809 continue; 5810 5811 seq_printf(m, "%d:", root->hierarchy_id); 5812 if (root != &cgrp_dfl_root) 5813 for_each_subsys(ss, ssid) 5814 if (root->subsys_mask & (1 << ssid)) 5815 seq_printf(m, "%s%s", count++ ? "," : "", 5816 ss->legacy_name); 5817 if (strlen(root->name)) 5818 seq_printf(m, "%sname=%s", count ? "," : "", 5819 root->name); 5820 seq_putc(m, ':'); 5821 5822 cgrp = task_cgroup_from_root(tsk, root); 5823 5824 /* 5825 * On traditional hierarchies, all zombie tasks show up as 5826 * belonging to the root cgroup. On the default hierarchy, 5827 * while a zombie doesn't show up in "cgroup.procs" and 5828 * thus can't be migrated, its /proc/PID/cgroup keeps 5829 * reporting the cgroup it belonged to before exiting. If 5830 * the cgroup is removed before the zombie is reaped, 5831 * " (deleted)" is appended to the cgroup path. 5832 */ 5833 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) { 5834 retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX, 5835 current->nsproxy->cgroup_ns); 5836 if (retval >= PATH_MAX) 5837 retval = -ENAMETOOLONG; 5838 if (retval < 0) 5839 goto out_unlock; 5840 5841 seq_puts(m, buf); 5842 } else { 5843 seq_puts(m, "/"); 5844 } 5845 5846 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp)) 5847 seq_puts(m, " (deleted)\n"); 5848 else 5849 seq_putc(m, '\n'); 5850 } 5851 5852 retval = 0; 5853 out_unlock: 5854 spin_unlock_irq(&css_set_lock); 5855 mutex_unlock(&cgroup_mutex); 5856 kfree(buf); 5857 out: 5858 return retval; 5859 } 5860 5861 /** 5862 * cgroup_fork - initialize cgroup related fields during copy_process() 5863 * @child: pointer to task_struct of forking parent process. 5864 * 5865 * A task is associated with the init_css_set until cgroup_post_fork() 5866 * attaches it to the parent's css_set. Empty cg_list indicates that 5867 * @child isn't holding reference to its css_set. 5868 */ 5869 void cgroup_fork(struct task_struct *child) 5870 { 5871 RCU_INIT_POINTER(child->cgroups, &init_css_set); 5872 INIT_LIST_HEAD(&child->cg_list); 5873 } 5874 5875 /** 5876 * cgroup_can_fork - called on a new task before the process is exposed 5877 * @child: the task in question. 5878 * 5879 * This calls the subsystem can_fork() callbacks. If the can_fork() callback 5880 * returns an error, the fork aborts with that error code. This allows for 5881 * a cgroup subsystem to conditionally allow or deny new forks. 5882 */ 5883 int cgroup_can_fork(struct task_struct *child) 5884 { 5885 struct cgroup_subsys *ss; 5886 int i, j, ret; 5887 5888 do_each_subsys_mask(ss, i, have_canfork_callback) { 5889 ret = ss->can_fork(child); 5890 if (ret) 5891 goto out_revert; 5892 } while_each_subsys_mask(); 5893 5894 return 0; 5895 5896 out_revert: 5897 for_each_subsys(ss, j) { 5898 if (j >= i) 5899 break; 5900 if (ss->cancel_fork) 5901 ss->cancel_fork(child); 5902 } 5903 5904 return ret; 5905 } 5906 5907 /** 5908 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() 5909 * @child: the task in question 5910 * 5911 * This calls the cancel_fork() callbacks if a fork failed *after* 5912 * cgroup_can_fork() succeded. 5913 */ 5914 void cgroup_cancel_fork(struct task_struct *child) 5915 { 5916 struct cgroup_subsys *ss; 5917 int i; 5918 5919 for_each_subsys(ss, i) 5920 if (ss->cancel_fork) 5921 ss->cancel_fork(child); 5922 } 5923 5924 /** 5925 * cgroup_post_fork - called on a new task after adding it to the task list 5926 * @child: the task in question 5927 * 5928 * Adds the task to the list running through its css_set if necessary and 5929 * call the subsystem fork() callbacks. Has to be after the task is 5930 * visible on the task list in case we race with the first call to 5931 * cgroup_task_iter_start() - to guarantee that the new task ends up on its 5932 * list. 5933 */ 5934 void cgroup_post_fork(struct task_struct *child) 5935 { 5936 struct cgroup_subsys *ss; 5937 int i; 5938 5939 /* 5940 * This may race against cgroup_enable_task_cg_lists(). As that 5941 * function sets use_task_css_set_links before grabbing 5942 * tasklist_lock and we just went through tasklist_lock to add 5943 * @child, it's guaranteed that either we see the set 5944 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees 5945 * @child during its iteration. 5946 * 5947 * If we won the race, @child is associated with %current's 5948 * css_set. Grabbing css_set_lock guarantees both that the 5949 * association is stable, and, on completion of the parent's 5950 * migration, @child is visible in the source of migration or 5951 * already in the destination cgroup. This guarantee is necessary 5952 * when implementing operations which need to migrate all tasks of 5953 * a cgroup to another. 5954 * 5955 * Note that if we lose to cgroup_enable_task_cg_lists(), @child 5956 * will remain in init_css_set. This is safe because all tasks are 5957 * in the init_css_set before cg_links is enabled and there's no 5958 * operation which transfers all tasks out of init_css_set. 5959 */ 5960 if (use_task_css_set_links) { 5961 struct css_set *cset; 5962 5963 spin_lock_irq(&css_set_lock); 5964 cset = task_css_set(current); 5965 if (list_empty(&child->cg_list)) { 5966 get_css_set(cset); 5967 cset->nr_tasks++; 5968 css_set_move_task(child, NULL, cset, false); 5969 } 5970 5971 /* 5972 * If the cgroup has to be frozen, the new task has too. 5973 * Let's set the JOBCTL_TRAP_FREEZE jobctl bit to get 5974 * the task into the frozen state. 5975 */ 5976 if (unlikely(cgroup_task_freeze(child))) { 5977 spin_lock(&child->sighand->siglock); 5978 WARN_ON_ONCE(child->frozen); 5979 child->jobctl |= JOBCTL_TRAP_FREEZE; 5980 spin_unlock(&child->sighand->siglock); 5981 5982 /* 5983 * Calling cgroup_update_frozen() isn't required here, 5984 * because it will be called anyway a bit later 5985 * from do_freezer_trap(). So we avoid cgroup's 5986 * transient switch from the frozen state and back. 5987 */ 5988 } 5989 5990 spin_unlock_irq(&css_set_lock); 5991 } 5992 5993 /* 5994 * Call ss->fork(). This must happen after @child is linked on 5995 * css_set; otherwise, @child might change state between ->fork() 5996 * and addition to css_set. 5997 */ 5998 do_each_subsys_mask(ss, i, have_fork_callback) { 5999 ss->fork(child); 6000 } while_each_subsys_mask(); 6001 } 6002 6003 /** 6004 * cgroup_exit - detach cgroup from exiting task 6005 * @tsk: pointer to task_struct of exiting process 6006 * 6007 * Description: Detach cgroup from @tsk and release it. 6008 * 6009 * Note that cgroups marked notify_on_release force every task in 6010 * them to take the global cgroup_mutex mutex when exiting. 6011 * This could impact scaling on very large systems. Be reluctant to 6012 * use notify_on_release cgroups where very high task exit scaling 6013 * is required on large systems. 6014 * 6015 * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We 6016 * call cgroup_exit() while the task is still competent to handle 6017 * notify_on_release(), then leave the task attached to the root cgroup in 6018 * each hierarchy for the remainder of its exit. No need to bother with 6019 * init_css_set refcnting. init_css_set never goes away and we can't race 6020 * with migration path - PF_EXITING is visible to migration path. 6021 */ 6022 void cgroup_exit(struct task_struct *tsk) 6023 { 6024 struct cgroup_subsys *ss; 6025 struct css_set *cset; 6026 int i; 6027 6028 /* 6029 * Unlink from @tsk from its css_set. As migration path can't race 6030 * with us, we can check css_set and cg_list without synchronization. 6031 */ 6032 cset = task_css_set(tsk); 6033 6034 if (!list_empty(&tsk->cg_list)) { 6035 spin_lock_irq(&css_set_lock); 6036 css_set_move_task(tsk, cset, NULL, false); 6037 cset->nr_tasks--; 6038 6039 WARN_ON_ONCE(cgroup_task_frozen(tsk)); 6040 if (unlikely(cgroup_task_freeze(tsk))) 6041 cgroup_update_frozen(task_dfl_cgroup(tsk)); 6042 6043 spin_unlock_irq(&css_set_lock); 6044 } else { 6045 get_css_set(cset); 6046 } 6047 6048 /* see cgroup_post_fork() for details */ 6049 do_each_subsys_mask(ss, i, have_exit_callback) { 6050 ss->exit(tsk); 6051 } while_each_subsys_mask(); 6052 } 6053 6054 void cgroup_release(struct task_struct *task) 6055 { 6056 struct cgroup_subsys *ss; 6057 int ssid; 6058 6059 do_each_subsys_mask(ss, ssid, have_release_callback) { 6060 ss->release(task); 6061 } while_each_subsys_mask(); 6062 } 6063 6064 void cgroup_free(struct task_struct *task) 6065 { 6066 struct css_set *cset = task_css_set(task); 6067 put_css_set(cset); 6068 } 6069 6070 static int __init cgroup_disable(char *str) 6071 { 6072 struct cgroup_subsys *ss; 6073 char *token; 6074 int i; 6075 6076 while ((token = strsep(&str, ",")) != NULL) { 6077 if (!*token) 6078 continue; 6079 6080 for_each_subsys(ss, i) { 6081 if (strcmp(token, ss->name) && 6082 strcmp(token, ss->legacy_name)) 6083 continue; 6084 cgroup_disable_mask |= 1 << i; 6085 } 6086 } 6087 return 1; 6088 } 6089 __setup("cgroup_disable=", cgroup_disable); 6090 6091 void __init __weak enable_debug_cgroup(void) { } 6092 6093 static int __init enable_cgroup_debug(char *str) 6094 { 6095 cgroup_debug = true; 6096 enable_debug_cgroup(); 6097 return 1; 6098 } 6099 __setup("cgroup_debug", enable_cgroup_debug); 6100 6101 /** 6102 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry 6103 * @dentry: directory dentry of interest 6104 * @ss: subsystem of interest 6105 * 6106 * If @dentry is a directory for a cgroup which has @ss enabled on it, try 6107 * to get the corresponding css and return it. If such css doesn't exist 6108 * or can't be pinned, an ERR_PTR value is returned. 6109 */ 6110 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 6111 struct cgroup_subsys *ss) 6112 { 6113 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 6114 struct file_system_type *s_type = dentry->d_sb->s_type; 6115 struct cgroup_subsys_state *css = NULL; 6116 struct cgroup *cgrp; 6117 6118 /* is @dentry a cgroup dir? */ 6119 if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) || 6120 !kn || kernfs_type(kn) != KERNFS_DIR) 6121 return ERR_PTR(-EBADF); 6122 6123 rcu_read_lock(); 6124 6125 /* 6126 * This path doesn't originate from kernfs and @kn could already 6127 * have been or be removed at any point. @kn->priv is RCU 6128 * protected for this access. See css_release_work_fn() for details. 6129 */ 6130 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 6131 if (cgrp) 6132 css = cgroup_css(cgrp, ss); 6133 6134 if (!css || !css_tryget_online(css)) 6135 css = ERR_PTR(-ENOENT); 6136 6137 rcu_read_unlock(); 6138 return css; 6139 } 6140 6141 /** 6142 * css_from_id - lookup css by id 6143 * @id: the cgroup id 6144 * @ss: cgroup subsys to be looked into 6145 * 6146 * Returns the css if there's valid one with @id, otherwise returns NULL. 6147 * Should be called under rcu_read_lock(). 6148 */ 6149 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) 6150 { 6151 WARN_ON_ONCE(!rcu_read_lock_held()); 6152 return idr_find(&ss->css_idr, id); 6153 } 6154 6155 /** 6156 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path 6157 * @path: path on the default hierarchy 6158 * 6159 * Find the cgroup at @path on the default hierarchy, increment its 6160 * reference count and return it. Returns pointer to the found cgroup on 6161 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR) 6162 * if @path points to a non-directory. 6163 */ 6164 struct cgroup *cgroup_get_from_path(const char *path) 6165 { 6166 struct kernfs_node *kn; 6167 struct cgroup *cgrp; 6168 6169 mutex_lock(&cgroup_mutex); 6170 6171 kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path); 6172 if (kn) { 6173 if (kernfs_type(kn) == KERNFS_DIR) { 6174 cgrp = kn->priv; 6175 cgroup_get_live(cgrp); 6176 } else { 6177 cgrp = ERR_PTR(-ENOTDIR); 6178 } 6179 kernfs_put(kn); 6180 } else { 6181 cgrp = ERR_PTR(-ENOENT); 6182 } 6183 6184 mutex_unlock(&cgroup_mutex); 6185 return cgrp; 6186 } 6187 EXPORT_SYMBOL_GPL(cgroup_get_from_path); 6188 6189 /** 6190 * cgroup_get_from_fd - get a cgroup pointer from a fd 6191 * @fd: fd obtained by open(cgroup2_dir) 6192 * 6193 * Find the cgroup from a fd which should be obtained 6194 * by opening a cgroup directory. Returns a pointer to the 6195 * cgroup on success. ERR_PTR is returned if the cgroup 6196 * cannot be found. 6197 */ 6198 struct cgroup *cgroup_get_from_fd(int fd) 6199 { 6200 struct cgroup_subsys_state *css; 6201 struct cgroup *cgrp; 6202 struct file *f; 6203 6204 f = fget_raw(fd); 6205 if (!f) 6206 return ERR_PTR(-EBADF); 6207 6208 css = css_tryget_online_from_dir(f->f_path.dentry, NULL); 6209 fput(f); 6210 if (IS_ERR(css)) 6211 return ERR_CAST(css); 6212 6213 cgrp = css->cgroup; 6214 if (!cgroup_on_dfl(cgrp)) { 6215 cgroup_put(cgrp); 6216 return ERR_PTR(-EBADF); 6217 } 6218 6219 return cgrp; 6220 } 6221 EXPORT_SYMBOL_GPL(cgroup_get_from_fd); 6222 6223 /* 6224 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 6225 * definition in cgroup-defs.h. 6226 */ 6227 #ifdef CONFIG_SOCK_CGROUP_DATA 6228 6229 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 6230 6231 DEFINE_SPINLOCK(cgroup_sk_update_lock); 6232 static bool cgroup_sk_alloc_disabled __read_mostly; 6233 6234 void cgroup_sk_alloc_disable(void) 6235 { 6236 if (cgroup_sk_alloc_disabled) 6237 return; 6238 pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n"); 6239 cgroup_sk_alloc_disabled = true; 6240 } 6241 6242 #else 6243 6244 #define cgroup_sk_alloc_disabled false 6245 6246 #endif 6247 6248 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 6249 { 6250 if (cgroup_sk_alloc_disabled) 6251 return; 6252 6253 /* Socket clone path */ 6254 if (skcd->val) { 6255 /* 6256 * We might be cloning a socket which is left in an empty 6257 * cgroup and the cgroup might have already been rmdir'd. 6258 * Don't use cgroup_get_live(). 6259 */ 6260 cgroup_get(sock_cgroup_ptr(skcd)); 6261 return; 6262 } 6263 6264 rcu_read_lock(); 6265 6266 while (true) { 6267 struct css_set *cset; 6268 6269 cset = task_css_set(current); 6270 if (likely(cgroup_tryget(cset->dfl_cgrp))) { 6271 skcd->val = (unsigned long)cset->dfl_cgrp; 6272 break; 6273 } 6274 cpu_relax(); 6275 } 6276 6277 rcu_read_unlock(); 6278 } 6279 6280 void cgroup_sk_free(struct sock_cgroup_data *skcd) 6281 { 6282 cgroup_put(sock_cgroup_ptr(skcd)); 6283 } 6284 6285 #endif /* CONFIG_SOCK_CGROUP_DATA */ 6286 6287 #ifdef CONFIG_CGROUP_BPF 6288 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, 6289 enum bpf_attach_type type, u32 flags) 6290 { 6291 int ret; 6292 6293 mutex_lock(&cgroup_mutex); 6294 ret = __cgroup_bpf_attach(cgrp, prog, type, flags); 6295 mutex_unlock(&cgroup_mutex); 6296 return ret; 6297 } 6298 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 6299 enum bpf_attach_type type, u32 flags) 6300 { 6301 int ret; 6302 6303 mutex_lock(&cgroup_mutex); 6304 ret = __cgroup_bpf_detach(cgrp, prog, type); 6305 mutex_unlock(&cgroup_mutex); 6306 return ret; 6307 } 6308 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 6309 union bpf_attr __user *uattr) 6310 { 6311 int ret; 6312 6313 mutex_lock(&cgroup_mutex); 6314 ret = __cgroup_bpf_query(cgrp, attr, uattr); 6315 mutex_unlock(&cgroup_mutex); 6316 return ret; 6317 } 6318 #endif /* CONFIG_CGROUP_BPF */ 6319 6320 #ifdef CONFIG_SYSFS 6321 static ssize_t show_delegatable_files(struct cftype *files, char *buf, 6322 ssize_t size, const char *prefix) 6323 { 6324 struct cftype *cft; 6325 ssize_t ret = 0; 6326 6327 for (cft = files; cft && cft->name[0] != '\0'; cft++) { 6328 if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) 6329 continue; 6330 6331 if (prefix) 6332 ret += snprintf(buf + ret, size - ret, "%s.", prefix); 6333 6334 ret += snprintf(buf + ret, size - ret, "%s\n", cft->name); 6335 6336 if (WARN_ON(ret >= size)) 6337 break; 6338 } 6339 6340 return ret; 6341 } 6342 6343 static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr, 6344 char *buf) 6345 { 6346 struct cgroup_subsys *ss; 6347 int ssid; 6348 ssize_t ret = 0; 6349 6350 ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret, 6351 NULL); 6352 6353 for_each_subsys(ss, ssid) 6354 ret += show_delegatable_files(ss->dfl_cftypes, buf + ret, 6355 PAGE_SIZE - ret, 6356 cgroup_subsys_name[ssid]); 6357 6358 return ret; 6359 } 6360 static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); 6361 6362 static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, 6363 char *buf) 6364 { 6365 return snprintf(buf, PAGE_SIZE, "nsdelegate\nmemory_localevents\n"); 6366 } 6367 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); 6368 6369 static struct attribute *cgroup_sysfs_attrs[] = { 6370 &cgroup_delegate_attr.attr, 6371 &cgroup_features_attr.attr, 6372 NULL, 6373 }; 6374 6375 static const struct attribute_group cgroup_sysfs_attr_group = { 6376 .attrs = cgroup_sysfs_attrs, 6377 .name = "cgroup", 6378 }; 6379 6380 static int __init cgroup_sysfs_init(void) 6381 { 6382 return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group); 6383 } 6384 subsys_initcall(cgroup_sysfs_init); 6385 #endif /* CONFIG_SYSFS */ 6386