1 /* 2 * Generic process-grouping system. 3 * 4 * Based originally on the cpuset system, extracted by Paul Menage 5 * Copyright (C) 2006 Google, Inc 6 * 7 * Notifications support 8 * Copyright (C) 2009 Nokia Corporation 9 * Author: Kirill A. Shutemov 10 * 11 * Copyright notices from the original cpuset code: 12 * -------------------------------------------------- 13 * Copyright (C) 2003 BULL SA. 14 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 15 * 16 * Portions derived from Patrick Mochel's sysfs code. 17 * sysfs is Copyright (c) 2001-3 Patrick Mochel 18 * 19 * 2003-10-10 Written by Simon Derr. 20 * 2003-10-22 Updates by Stephen Hemminger. 21 * 2004 May-July Rework by Paul Jackson. 22 * --------------------------------------------------- 23 * 24 * This file is subject to the terms and conditions of the GNU General Public 25 * License. See the file COPYING in the main directory of the Linux 26 * distribution for more details. 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include "cgroup-internal.h" 32 33 #include <linux/bpf-cgroup.h> 34 #include <linux/cred.h> 35 #include <linux/errno.h> 36 #include <linux/init_task.h> 37 #include <linux/kernel.h> 38 #include <linux/magic.h> 39 #include <linux/mutex.h> 40 #include <linux/mount.h> 41 #include <linux/pagemap.h> 42 #include <linux/proc_fs.h> 43 #include <linux/rcupdate.h> 44 #include <linux/sched.h> 45 #include <linux/sched/task.h> 46 #include <linux/slab.h> 47 #include <linux/spinlock.h> 48 #include <linux/percpu-rwsem.h> 49 #include <linux/string.h> 50 #include <linux/hashtable.h> 51 #include <linux/idr.h> 52 #include <linux/kthread.h> 53 #include <linux/atomic.h> 54 #include <linux/cpuset.h> 55 #include <linux/proc_ns.h> 56 #include <linux/nsproxy.h> 57 #include <linux/file.h> 58 #include <linux/fs_parser.h> 59 #include <linux/sched/cputime.h> 60 #include <linux/psi.h> 61 #include <net/sock.h> 62 63 #define CREATE_TRACE_POINTS 64 #include <trace/events/cgroup.h> 65 66 #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ 67 MAX_CFTYPE_NAME + 2) 68 /* let's not notify more than 100 times per second */ 69 #define CGROUP_FILE_NOTIFY_MIN_INTV DIV_ROUND_UP(HZ, 100) 70 71 /* 72 * To avoid confusing the compiler (and generating warnings) with code 73 * that attempts to access what would be a 0-element array (i.e. sized 74 * to a potentially empty array when CGROUP_SUBSYS_COUNT == 0), this 75 * constant expression can be added. 76 */ 77 #define CGROUP_HAS_SUBSYS_CONFIG (CGROUP_SUBSYS_COUNT > 0) 78 79 /* 80 * cgroup_mutex is the master lock. Any modification to cgroup or its 81 * hierarchy must be performed while holding it. 82 * 83 * css_set_lock protects task->cgroups pointer, the list of css_set 84 * objects, and the chain of tasks off each css_set. 85 * 86 * These locks are exported if CONFIG_PROVE_RCU so that accessors in 87 * cgroup.h can use them for lockdep annotations. 88 */ 89 DEFINE_MUTEX(cgroup_mutex); 90 DEFINE_SPINLOCK(css_set_lock); 91 92 #ifdef CONFIG_PROVE_RCU 93 EXPORT_SYMBOL_GPL(cgroup_mutex); 94 EXPORT_SYMBOL_GPL(css_set_lock); 95 #endif 96 97 DEFINE_SPINLOCK(trace_cgroup_path_lock); 98 char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; 99 static bool cgroup_debug __read_mostly; 100 101 /* 102 * Protects cgroup_idr and css_idr so that IDs can be released without 103 * grabbing cgroup_mutex. 104 */ 105 static DEFINE_SPINLOCK(cgroup_idr_lock); 106 107 /* 108 * Protects cgroup_file->kn for !self csses. It synchronizes notifications 109 * against file removal/re-creation across css hiding. 110 */ 111 static DEFINE_SPINLOCK(cgroup_file_kn_lock); 112 113 DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem); 114 115 #define cgroup_assert_mutex_or_rcu_locked() \ 116 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 117 !lockdep_is_held(&cgroup_mutex), \ 118 "cgroup_mutex or RCU read lock required"); 119 120 /* 121 * cgroup destruction makes heavy use of work items and there can be a lot 122 * of concurrent destructions. Use a separate workqueue so that cgroup 123 * destruction work items don't end up filling up max_active of system_wq 124 * which may lead to deadlock. 125 */ 126 static struct workqueue_struct *cgroup_destroy_wq; 127 128 /* generate an array of cgroup subsystem pointers */ 129 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, 130 struct cgroup_subsys *cgroup_subsys[] = { 131 #include <linux/cgroup_subsys.h> 132 }; 133 #undef SUBSYS 134 135 /* array of cgroup subsystem names */ 136 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, 137 static const char *cgroup_subsys_name[] = { 138 #include <linux/cgroup_subsys.h> 139 }; 140 #undef SUBSYS 141 142 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */ 143 #define SUBSYS(_x) \ 144 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key); \ 145 DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key); \ 146 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key); \ 147 EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key); 148 #include <linux/cgroup_subsys.h> 149 #undef SUBSYS 150 151 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key, 152 static struct static_key_true *cgroup_subsys_enabled_key[] = { 153 #include <linux/cgroup_subsys.h> 154 }; 155 #undef SUBSYS 156 157 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key, 158 static struct static_key_true *cgroup_subsys_on_dfl_key[] = { 159 #include <linux/cgroup_subsys.h> 160 }; 161 #undef SUBSYS 162 163 static DEFINE_PER_CPU(struct cgroup_rstat_cpu, cgrp_dfl_root_rstat_cpu); 164 165 /* the default hierarchy */ 166 struct cgroup_root cgrp_dfl_root = { .cgrp.rstat_cpu = &cgrp_dfl_root_rstat_cpu }; 167 EXPORT_SYMBOL_GPL(cgrp_dfl_root); 168 169 /* 170 * The default hierarchy always exists but is hidden until mounted for the 171 * first time. This is for backward compatibility. 172 */ 173 static bool cgrp_dfl_visible; 174 175 /* some controllers are not supported in the default hierarchy */ 176 static u16 cgrp_dfl_inhibit_ss_mask; 177 178 /* some controllers are implicitly enabled on the default hierarchy */ 179 static u16 cgrp_dfl_implicit_ss_mask; 180 181 /* some controllers can be threaded on the default hierarchy */ 182 static u16 cgrp_dfl_threaded_ss_mask; 183 184 /* The list of hierarchy roots */ 185 LIST_HEAD(cgroup_roots); 186 static int cgroup_root_count; 187 188 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ 189 static DEFINE_IDR(cgroup_hierarchy_idr); 190 191 /* 192 * Assign a monotonically increasing serial number to csses. It guarantees 193 * cgroups with bigger numbers are newer than those with smaller numbers. 194 * Also, as csses are always appended to the parent's ->children list, it 195 * guarantees that sibling csses are always sorted in the ascending serial 196 * number order on the list. Protected by cgroup_mutex. 197 */ 198 static u64 css_serial_nr_next = 1; 199 200 /* 201 * These bitmasks identify subsystems with specific features to avoid 202 * having to do iterative checks repeatedly. 203 */ 204 static u16 have_fork_callback __read_mostly; 205 static u16 have_exit_callback __read_mostly; 206 static u16 have_release_callback __read_mostly; 207 static u16 have_canfork_callback __read_mostly; 208 209 /* cgroup namespace for init task */ 210 struct cgroup_namespace init_cgroup_ns = { 211 .ns.count = REFCOUNT_INIT(2), 212 .user_ns = &init_user_ns, 213 .ns.ops = &cgroupns_operations, 214 .ns.inum = PROC_CGROUP_INIT_INO, 215 .root_cset = &init_css_set, 216 }; 217 218 static struct file_system_type cgroup2_fs_type; 219 static struct cftype cgroup_base_files[]; 220 221 /* cgroup optional features */ 222 enum cgroup_opt_features { 223 #ifdef CONFIG_PSI 224 OPT_FEATURE_PRESSURE, 225 #endif 226 OPT_FEATURE_COUNT 227 }; 228 229 static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = { 230 #ifdef CONFIG_PSI 231 "pressure", 232 #endif 233 }; 234 235 static u16 cgroup_feature_disable_mask __read_mostly; 236 237 static int cgroup_apply_control(struct cgroup *cgrp); 238 static void cgroup_finalize_control(struct cgroup *cgrp, int ret); 239 static void css_task_iter_skip(struct css_task_iter *it, 240 struct task_struct *task); 241 static int cgroup_destroy_locked(struct cgroup *cgrp); 242 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 243 struct cgroup_subsys *ss); 244 static void css_release(struct percpu_ref *ref); 245 static void kill_css(struct cgroup_subsys_state *css); 246 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 247 struct cgroup *cgrp, struct cftype cfts[], 248 bool is_add); 249 250 /** 251 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID 252 * @ssid: subsys ID of interest 253 * 254 * cgroup_subsys_enabled() can only be used with literal subsys names which 255 * is fine for individual subsystems but unsuitable for cgroup core. This 256 * is slower static_key_enabled() based test indexed by @ssid. 257 */ 258 bool cgroup_ssid_enabled(int ssid) 259 { 260 if (!CGROUP_HAS_SUBSYS_CONFIG) 261 return false; 262 263 return static_key_enabled(cgroup_subsys_enabled_key[ssid]); 264 } 265 266 /** 267 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy 268 * @cgrp: the cgroup of interest 269 * 270 * The default hierarchy is the v2 interface of cgroup and this function 271 * can be used to test whether a cgroup is on the default hierarchy for 272 * cases where a subsystem should behave differently depending on the 273 * interface version. 274 * 275 * List of changed behaviors: 276 * 277 * - Mount options "noprefix", "xattr", "clone_children", "release_agent" 278 * and "name" are disallowed. 279 * 280 * - When mounting an existing superblock, mount options should match. 281 * 282 * - Remount is disallowed. 283 * 284 * - rename(2) is disallowed. 285 * 286 * - "tasks" is removed. Everything should be at process granularity. Use 287 * "cgroup.procs" instead. 288 * 289 * - "cgroup.procs" is not sorted. pids will be unique unless they got 290 * recycled in-between reads. 291 * 292 * - "release_agent" and "notify_on_release" are removed. Replacement 293 * notification mechanism will be implemented. 294 * 295 * - "cgroup.clone_children" is removed. 296 * 297 * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup 298 * and its descendants contain no task; otherwise, 1. The file also 299 * generates kernfs notification which can be monitored through poll and 300 * [di]notify when the value of the file changes. 301 * 302 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and 303 * take masks of ancestors with non-empty cpus/mems, instead of being 304 * moved to an ancestor. 305 * 306 * - cpuset: a task can be moved into an empty cpuset, and again it takes 307 * masks of ancestors. 308 * 309 * - blkcg: blk-throttle becomes properly hierarchical. 310 * 311 * - debug: disallowed on the default hierarchy. 312 */ 313 bool cgroup_on_dfl(const struct cgroup *cgrp) 314 { 315 return cgrp->root == &cgrp_dfl_root; 316 } 317 318 /* IDR wrappers which synchronize using cgroup_idr_lock */ 319 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, 320 gfp_t gfp_mask) 321 { 322 int ret; 323 324 idr_preload(gfp_mask); 325 spin_lock_bh(&cgroup_idr_lock); 326 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); 327 spin_unlock_bh(&cgroup_idr_lock); 328 idr_preload_end(); 329 return ret; 330 } 331 332 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) 333 { 334 void *ret; 335 336 spin_lock_bh(&cgroup_idr_lock); 337 ret = idr_replace(idr, ptr, id); 338 spin_unlock_bh(&cgroup_idr_lock); 339 return ret; 340 } 341 342 static void cgroup_idr_remove(struct idr *idr, int id) 343 { 344 spin_lock_bh(&cgroup_idr_lock); 345 idr_remove(idr, id); 346 spin_unlock_bh(&cgroup_idr_lock); 347 } 348 349 static bool cgroup_has_tasks(struct cgroup *cgrp) 350 { 351 return cgrp->nr_populated_csets; 352 } 353 354 bool cgroup_is_threaded(struct cgroup *cgrp) 355 { 356 return cgrp->dom_cgrp != cgrp; 357 } 358 359 /* can @cgrp host both domain and threaded children? */ 360 static bool cgroup_is_mixable(struct cgroup *cgrp) 361 { 362 /* 363 * Root isn't under domain level resource control exempting it from 364 * the no-internal-process constraint, so it can serve as a thread 365 * root and a parent of resource domains at the same time. 366 */ 367 return !cgroup_parent(cgrp); 368 } 369 370 /* can @cgrp become a thread root? Should always be true for a thread root */ 371 static bool cgroup_can_be_thread_root(struct cgroup *cgrp) 372 { 373 /* mixables don't care */ 374 if (cgroup_is_mixable(cgrp)) 375 return true; 376 377 /* domain roots can't be nested under threaded */ 378 if (cgroup_is_threaded(cgrp)) 379 return false; 380 381 /* can only have either domain or threaded children */ 382 if (cgrp->nr_populated_domain_children) 383 return false; 384 385 /* and no domain controllers can be enabled */ 386 if (cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 387 return false; 388 389 return true; 390 } 391 392 /* is @cgrp root of a threaded subtree? */ 393 bool cgroup_is_thread_root(struct cgroup *cgrp) 394 { 395 /* thread root should be a domain */ 396 if (cgroup_is_threaded(cgrp)) 397 return false; 398 399 /* a domain w/ threaded children is a thread root */ 400 if (cgrp->nr_threaded_children) 401 return true; 402 403 /* 404 * A domain which has tasks and explicit threaded controllers 405 * enabled is a thread root. 406 */ 407 if (cgroup_has_tasks(cgrp) && 408 (cgrp->subtree_control & cgrp_dfl_threaded_ss_mask)) 409 return true; 410 411 return false; 412 } 413 414 /* a domain which isn't connected to the root w/o brekage can't be used */ 415 static bool cgroup_is_valid_domain(struct cgroup *cgrp) 416 { 417 /* the cgroup itself can be a thread root */ 418 if (cgroup_is_threaded(cgrp)) 419 return false; 420 421 /* but the ancestors can't be unless mixable */ 422 while ((cgrp = cgroup_parent(cgrp))) { 423 if (!cgroup_is_mixable(cgrp) && cgroup_is_thread_root(cgrp)) 424 return false; 425 if (cgroup_is_threaded(cgrp)) 426 return false; 427 } 428 429 return true; 430 } 431 432 /* subsystems visibly enabled on a cgroup */ 433 static u16 cgroup_control(struct cgroup *cgrp) 434 { 435 struct cgroup *parent = cgroup_parent(cgrp); 436 u16 root_ss_mask = cgrp->root->subsys_mask; 437 438 if (parent) { 439 u16 ss_mask = parent->subtree_control; 440 441 /* threaded cgroups can only have threaded controllers */ 442 if (cgroup_is_threaded(cgrp)) 443 ss_mask &= cgrp_dfl_threaded_ss_mask; 444 return ss_mask; 445 } 446 447 if (cgroup_on_dfl(cgrp)) 448 root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask | 449 cgrp_dfl_implicit_ss_mask); 450 return root_ss_mask; 451 } 452 453 /* subsystems enabled on a cgroup */ 454 static u16 cgroup_ss_mask(struct cgroup *cgrp) 455 { 456 struct cgroup *parent = cgroup_parent(cgrp); 457 458 if (parent) { 459 u16 ss_mask = parent->subtree_ss_mask; 460 461 /* threaded cgroups can only have threaded controllers */ 462 if (cgroup_is_threaded(cgrp)) 463 ss_mask &= cgrp_dfl_threaded_ss_mask; 464 return ss_mask; 465 } 466 467 return cgrp->root->subsys_mask; 468 } 469 470 /** 471 * cgroup_css - obtain a cgroup's css for the specified subsystem 472 * @cgrp: the cgroup of interest 473 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 474 * 475 * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This 476 * function must be called either under cgroup_mutex or rcu_read_lock() and 477 * the caller is responsible for pinning the returned css if it wants to 478 * keep accessing it outside the said locks. This function may return 479 * %NULL if @cgrp doesn't have @subsys_id enabled. 480 */ 481 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, 482 struct cgroup_subsys *ss) 483 { 484 if (CGROUP_HAS_SUBSYS_CONFIG && ss) 485 return rcu_dereference_check(cgrp->subsys[ss->id], 486 lockdep_is_held(&cgroup_mutex)); 487 else 488 return &cgrp->self; 489 } 490 491 /** 492 * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem 493 * @cgrp: the cgroup of interest 494 * @ss: the subsystem of interest 495 * 496 * Find and get @cgrp's css associated with @ss. If the css doesn't exist 497 * or is offline, %NULL is returned. 498 */ 499 static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp, 500 struct cgroup_subsys *ss) 501 { 502 struct cgroup_subsys_state *css; 503 504 rcu_read_lock(); 505 css = cgroup_css(cgrp, ss); 506 if (css && !css_tryget_online(css)) 507 css = NULL; 508 rcu_read_unlock(); 509 510 return css; 511 } 512 513 /** 514 * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss 515 * @cgrp: the cgroup of interest 516 * @ss: the subsystem of interest (%NULL returns @cgrp->self) 517 * 518 * Similar to cgroup_css() but returns the effective css, which is defined 519 * as the matching css of the nearest ancestor including self which has @ss 520 * enabled. If @ss is associated with the hierarchy @cgrp is on, this 521 * function is guaranteed to return non-NULL css. 522 */ 523 static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp, 524 struct cgroup_subsys *ss) 525 { 526 lockdep_assert_held(&cgroup_mutex); 527 528 if (!ss) 529 return &cgrp->self; 530 531 /* 532 * This function is used while updating css associations and thus 533 * can't test the csses directly. Test ss_mask. 534 */ 535 while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) { 536 cgrp = cgroup_parent(cgrp); 537 if (!cgrp) 538 return NULL; 539 } 540 541 return cgroup_css(cgrp, ss); 542 } 543 544 /** 545 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem 546 * @cgrp: the cgroup of interest 547 * @ss: the subsystem of interest 548 * 549 * Find and get the effective css of @cgrp for @ss. The effective css is 550 * defined as the matching css of the nearest ancestor including self which 551 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 552 * the root css is returned, so this function always returns a valid css. 553 * 554 * The returned css is not guaranteed to be online, and therefore it is the 555 * callers responsibility to try get a reference for it. 556 */ 557 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, 558 struct cgroup_subsys *ss) 559 { 560 struct cgroup_subsys_state *css; 561 562 if (!CGROUP_HAS_SUBSYS_CONFIG) 563 return NULL; 564 565 do { 566 css = cgroup_css(cgrp, ss); 567 568 if (css) 569 return css; 570 cgrp = cgroup_parent(cgrp); 571 } while (cgrp); 572 573 return init_css_set.subsys[ss->id]; 574 } 575 576 /** 577 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem 578 * @cgrp: the cgroup of interest 579 * @ss: the subsystem of interest 580 * 581 * Find and get the effective css of @cgrp for @ss. The effective css is 582 * defined as the matching css of the nearest ancestor including self which 583 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, 584 * the root css is returned, so this function always returns a valid css. 585 * The returned css must be put using css_put(). 586 */ 587 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, 588 struct cgroup_subsys *ss) 589 { 590 struct cgroup_subsys_state *css; 591 592 if (!CGROUP_HAS_SUBSYS_CONFIG) 593 return NULL; 594 595 rcu_read_lock(); 596 597 do { 598 css = cgroup_css(cgrp, ss); 599 600 if (css && css_tryget_online(css)) 601 goto out_unlock; 602 cgrp = cgroup_parent(cgrp); 603 } while (cgrp); 604 605 css = init_css_set.subsys[ss->id]; 606 css_get(css); 607 out_unlock: 608 rcu_read_unlock(); 609 return css; 610 } 611 EXPORT_SYMBOL_GPL(cgroup_get_e_css); 612 613 static void cgroup_get_live(struct cgroup *cgrp) 614 { 615 WARN_ON_ONCE(cgroup_is_dead(cgrp)); 616 css_get(&cgrp->self); 617 } 618 619 /** 620 * __cgroup_task_count - count the number of tasks in a cgroup. The caller 621 * is responsible for taking the css_set_lock. 622 * @cgrp: the cgroup in question 623 */ 624 int __cgroup_task_count(const struct cgroup *cgrp) 625 { 626 int count = 0; 627 struct cgrp_cset_link *link; 628 629 lockdep_assert_held(&css_set_lock); 630 631 list_for_each_entry(link, &cgrp->cset_links, cset_link) 632 count += link->cset->nr_tasks; 633 634 return count; 635 } 636 637 /** 638 * cgroup_task_count - count the number of tasks in a cgroup. 639 * @cgrp: the cgroup in question 640 */ 641 int cgroup_task_count(const struct cgroup *cgrp) 642 { 643 int count; 644 645 spin_lock_irq(&css_set_lock); 646 count = __cgroup_task_count(cgrp); 647 spin_unlock_irq(&css_set_lock); 648 649 return count; 650 } 651 652 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) 653 { 654 struct cgroup *cgrp = of->kn->parent->priv; 655 struct cftype *cft = of_cft(of); 656 657 /* 658 * This is open and unprotected implementation of cgroup_css(). 659 * seq_css() is only called from a kernfs file operation which has 660 * an active reference on the file. Because all the subsystem 661 * files are drained before a css is disassociated with a cgroup, 662 * the matching css from the cgroup's subsys table is guaranteed to 663 * be and stay valid until the enclosing operation is complete. 664 */ 665 if (CGROUP_HAS_SUBSYS_CONFIG && cft->ss) 666 return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); 667 else 668 return &cgrp->self; 669 } 670 EXPORT_SYMBOL_GPL(of_css); 671 672 /** 673 * for_each_css - iterate all css's of a cgroup 674 * @css: the iteration cursor 675 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 676 * @cgrp: the target cgroup to iterate css's of 677 * 678 * Should be called under cgroup_[tree_]mutex. 679 */ 680 #define for_each_css(css, ssid, cgrp) \ 681 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 682 if (!((css) = rcu_dereference_check( \ 683 (cgrp)->subsys[(ssid)], \ 684 lockdep_is_held(&cgroup_mutex)))) { } \ 685 else 686 687 /** 688 * for_each_e_css - iterate all effective css's of a cgroup 689 * @css: the iteration cursor 690 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end 691 * @cgrp: the target cgroup to iterate css's of 692 * 693 * Should be called under cgroup_[tree_]mutex. 694 */ 695 #define for_each_e_css(css, ssid, cgrp) \ 696 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ 697 if (!((css) = cgroup_e_css_by_mask(cgrp, \ 698 cgroup_subsys[(ssid)]))) \ 699 ; \ 700 else 701 702 /** 703 * do_each_subsys_mask - filter for_each_subsys with a bitmask 704 * @ss: the iteration cursor 705 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end 706 * @ss_mask: the bitmask 707 * 708 * The block will only run for cases where the ssid-th bit (1 << ssid) of 709 * @ss_mask is set. 710 */ 711 #define do_each_subsys_mask(ss, ssid, ss_mask) do { \ 712 unsigned long __ss_mask = (ss_mask); \ 713 if (!CGROUP_HAS_SUBSYS_CONFIG) { \ 714 (ssid) = 0; \ 715 break; \ 716 } \ 717 for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) { \ 718 (ss) = cgroup_subsys[ssid]; \ 719 { 720 721 #define while_each_subsys_mask() \ 722 } \ 723 } \ 724 } while (false) 725 726 /* iterate over child cgrps, lock should be held throughout iteration */ 727 #define cgroup_for_each_live_child(child, cgrp) \ 728 list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ 729 if (({ lockdep_assert_held(&cgroup_mutex); \ 730 cgroup_is_dead(child); })) \ 731 ; \ 732 else 733 734 /* walk live descendants in pre order */ 735 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) \ 736 css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL)) \ 737 if (({ lockdep_assert_held(&cgroup_mutex); \ 738 (dsct) = (d_css)->cgroup; \ 739 cgroup_is_dead(dsct); })) \ 740 ; \ 741 else 742 743 /* walk live descendants in postorder */ 744 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) \ 745 css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \ 746 if (({ lockdep_assert_held(&cgroup_mutex); \ 747 (dsct) = (d_css)->cgroup; \ 748 cgroup_is_dead(dsct); })) \ 749 ; \ 750 else 751 752 /* 753 * The default css_set - used by init and its children prior to any 754 * hierarchies being mounted. It contains a pointer to the root state 755 * for each subsystem. Also used to anchor the list of css_sets. Not 756 * reference-counted, to improve performance when child cgroups 757 * haven't been created. 758 */ 759 struct css_set init_css_set = { 760 .refcount = REFCOUNT_INIT(1), 761 .dom_cset = &init_css_set, 762 .tasks = LIST_HEAD_INIT(init_css_set.tasks), 763 .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), 764 .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks), 765 .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), 766 .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), 767 .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), 768 .mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node), 769 .mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node), 770 .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), 771 772 /* 773 * The following field is re-initialized when this cset gets linked 774 * in cgroup_init(). However, let's initialize the field 775 * statically too so that the default cgroup can be accessed safely 776 * early during boot. 777 */ 778 .dfl_cgrp = &cgrp_dfl_root.cgrp, 779 }; 780 781 static int css_set_count = 1; /* 1 for init_css_set */ 782 783 static bool css_set_threaded(struct css_set *cset) 784 { 785 return cset->dom_cset != cset; 786 } 787 788 /** 789 * css_set_populated - does a css_set contain any tasks? 790 * @cset: target css_set 791 * 792 * css_set_populated() should be the same as !!cset->nr_tasks at steady 793 * state. However, css_set_populated() can be called while a task is being 794 * added to or removed from the linked list before the nr_tasks is 795 * properly updated. Hence, we can't just look at ->nr_tasks here. 796 */ 797 static bool css_set_populated(struct css_set *cset) 798 { 799 lockdep_assert_held(&css_set_lock); 800 801 return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks); 802 } 803 804 /** 805 * cgroup_update_populated - update the populated count of a cgroup 806 * @cgrp: the target cgroup 807 * @populated: inc or dec populated count 808 * 809 * One of the css_sets associated with @cgrp is either getting its first 810 * task or losing the last. Update @cgrp->nr_populated_* accordingly. The 811 * count is propagated towards root so that a given cgroup's 812 * nr_populated_children is zero iff none of its descendants contain any 813 * tasks. 814 * 815 * @cgrp's interface file "cgroup.populated" is zero if both 816 * @cgrp->nr_populated_csets and @cgrp->nr_populated_children are zero and 817 * 1 otherwise. When the sum changes from or to zero, userland is notified 818 * that the content of the interface file has changed. This can be used to 819 * detect when @cgrp and its descendants become populated or empty. 820 */ 821 static void cgroup_update_populated(struct cgroup *cgrp, bool populated) 822 { 823 struct cgroup *child = NULL; 824 int adj = populated ? 1 : -1; 825 826 lockdep_assert_held(&css_set_lock); 827 828 do { 829 bool was_populated = cgroup_is_populated(cgrp); 830 831 if (!child) { 832 cgrp->nr_populated_csets += adj; 833 } else { 834 if (cgroup_is_threaded(child)) 835 cgrp->nr_populated_threaded_children += adj; 836 else 837 cgrp->nr_populated_domain_children += adj; 838 } 839 840 if (was_populated == cgroup_is_populated(cgrp)) 841 break; 842 843 cgroup1_check_for_release(cgrp); 844 TRACE_CGROUP_PATH(notify_populated, cgrp, 845 cgroup_is_populated(cgrp)); 846 cgroup_file_notify(&cgrp->events_file); 847 848 child = cgrp; 849 cgrp = cgroup_parent(cgrp); 850 } while (cgrp); 851 } 852 853 /** 854 * css_set_update_populated - update populated state of a css_set 855 * @cset: target css_set 856 * @populated: whether @cset is populated or depopulated 857 * 858 * @cset is either getting the first task or losing the last. Update the 859 * populated counters of all associated cgroups accordingly. 860 */ 861 static void css_set_update_populated(struct css_set *cset, bool populated) 862 { 863 struct cgrp_cset_link *link; 864 865 lockdep_assert_held(&css_set_lock); 866 867 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) 868 cgroup_update_populated(link->cgrp, populated); 869 } 870 871 /* 872 * @task is leaving, advance task iterators which are pointing to it so 873 * that they can resume at the next position. Advancing an iterator might 874 * remove it from the list, use safe walk. See css_task_iter_skip() for 875 * details. 876 */ 877 static void css_set_skip_task_iters(struct css_set *cset, 878 struct task_struct *task) 879 { 880 struct css_task_iter *it, *pos; 881 882 list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node) 883 css_task_iter_skip(it, task); 884 } 885 886 /** 887 * css_set_move_task - move a task from one css_set to another 888 * @task: task being moved 889 * @from_cset: css_set @task currently belongs to (may be NULL) 890 * @to_cset: new css_set @task is being moved to (may be NULL) 891 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks 892 * 893 * Move @task from @from_cset to @to_cset. If @task didn't belong to any 894 * css_set, @from_cset can be NULL. If @task is being disassociated 895 * instead of moved, @to_cset can be NULL. 896 * 897 * This function automatically handles populated counter updates and 898 * css_task_iter adjustments but the caller is responsible for managing 899 * @from_cset and @to_cset's reference counts. 900 */ 901 static void css_set_move_task(struct task_struct *task, 902 struct css_set *from_cset, struct css_set *to_cset, 903 bool use_mg_tasks) 904 { 905 lockdep_assert_held(&css_set_lock); 906 907 if (to_cset && !css_set_populated(to_cset)) 908 css_set_update_populated(to_cset, true); 909 910 if (from_cset) { 911 WARN_ON_ONCE(list_empty(&task->cg_list)); 912 913 css_set_skip_task_iters(from_cset, task); 914 list_del_init(&task->cg_list); 915 if (!css_set_populated(from_cset)) 916 css_set_update_populated(from_cset, false); 917 } else { 918 WARN_ON_ONCE(!list_empty(&task->cg_list)); 919 } 920 921 if (to_cset) { 922 /* 923 * We are synchronized through cgroup_threadgroup_rwsem 924 * against PF_EXITING setting such that we can't race 925 * against cgroup_exit()/cgroup_free() dropping the css_set. 926 */ 927 WARN_ON_ONCE(task->flags & PF_EXITING); 928 929 cgroup_move_task(task, to_cset); 930 list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks : 931 &to_cset->tasks); 932 } 933 } 934 935 /* 936 * hash table for cgroup groups. This improves the performance to find 937 * an existing css_set. This hash doesn't (currently) take into 938 * account cgroups in empty hierarchies. 939 */ 940 #define CSS_SET_HASH_BITS 7 941 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); 942 943 static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) 944 { 945 unsigned long key = 0UL; 946 struct cgroup_subsys *ss; 947 int i; 948 949 for_each_subsys(ss, i) 950 key += (unsigned long)css[i]; 951 key = (key >> 16) ^ key; 952 953 return key; 954 } 955 956 void put_css_set_locked(struct css_set *cset) 957 { 958 struct cgrp_cset_link *link, *tmp_link; 959 struct cgroup_subsys *ss; 960 int ssid; 961 962 lockdep_assert_held(&css_set_lock); 963 964 if (!refcount_dec_and_test(&cset->refcount)) 965 return; 966 967 WARN_ON_ONCE(!list_empty(&cset->threaded_csets)); 968 969 /* This css_set is dead. Unlink it and release cgroup and css refs */ 970 for_each_subsys(ss, ssid) { 971 list_del(&cset->e_cset_node[ssid]); 972 css_put(cset->subsys[ssid]); 973 } 974 hash_del(&cset->hlist); 975 css_set_count--; 976 977 list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { 978 list_del(&link->cset_link); 979 list_del(&link->cgrp_link); 980 if (cgroup_parent(link->cgrp)) 981 cgroup_put(link->cgrp); 982 kfree(link); 983 } 984 985 if (css_set_threaded(cset)) { 986 list_del(&cset->threaded_csets_node); 987 put_css_set_locked(cset->dom_cset); 988 } 989 990 kfree_rcu(cset, rcu_head); 991 } 992 993 /** 994 * compare_css_sets - helper function for find_existing_css_set(). 995 * @cset: candidate css_set being tested 996 * @old_cset: existing css_set for a task 997 * @new_cgrp: cgroup that's being entered by the task 998 * @template: desired set of css pointers in css_set (pre-calculated) 999 * 1000 * Returns true if "cset" matches "old_cset" except for the hierarchy 1001 * which "new_cgrp" belongs to, for which it should match "new_cgrp". 1002 */ 1003 static bool compare_css_sets(struct css_set *cset, 1004 struct css_set *old_cset, 1005 struct cgroup *new_cgrp, 1006 struct cgroup_subsys_state *template[]) 1007 { 1008 struct cgroup *new_dfl_cgrp; 1009 struct list_head *l1, *l2; 1010 1011 /* 1012 * On the default hierarchy, there can be csets which are 1013 * associated with the same set of cgroups but different csses. 1014 * Let's first ensure that csses match. 1015 */ 1016 if (memcmp(template, cset->subsys, sizeof(cset->subsys))) 1017 return false; 1018 1019 1020 /* @cset's domain should match the default cgroup's */ 1021 if (cgroup_on_dfl(new_cgrp)) 1022 new_dfl_cgrp = new_cgrp; 1023 else 1024 new_dfl_cgrp = old_cset->dfl_cgrp; 1025 1026 if (new_dfl_cgrp->dom_cgrp != cset->dom_cset->dfl_cgrp) 1027 return false; 1028 1029 /* 1030 * Compare cgroup pointers in order to distinguish between 1031 * different cgroups in hierarchies. As different cgroups may 1032 * share the same effective css, this comparison is always 1033 * necessary. 1034 */ 1035 l1 = &cset->cgrp_links; 1036 l2 = &old_cset->cgrp_links; 1037 while (1) { 1038 struct cgrp_cset_link *link1, *link2; 1039 struct cgroup *cgrp1, *cgrp2; 1040 1041 l1 = l1->next; 1042 l2 = l2->next; 1043 /* See if we reached the end - both lists are equal length. */ 1044 if (l1 == &cset->cgrp_links) { 1045 BUG_ON(l2 != &old_cset->cgrp_links); 1046 break; 1047 } else { 1048 BUG_ON(l2 == &old_cset->cgrp_links); 1049 } 1050 /* Locate the cgroups associated with these links. */ 1051 link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); 1052 link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); 1053 cgrp1 = link1->cgrp; 1054 cgrp2 = link2->cgrp; 1055 /* Hierarchies should be linked in the same order. */ 1056 BUG_ON(cgrp1->root != cgrp2->root); 1057 1058 /* 1059 * If this hierarchy is the hierarchy of the cgroup 1060 * that's changing, then we need to check that this 1061 * css_set points to the new cgroup; if it's any other 1062 * hierarchy, then this css_set should point to the 1063 * same cgroup as the old css_set. 1064 */ 1065 if (cgrp1->root == new_cgrp->root) { 1066 if (cgrp1 != new_cgrp) 1067 return false; 1068 } else { 1069 if (cgrp1 != cgrp2) 1070 return false; 1071 } 1072 } 1073 return true; 1074 } 1075 1076 /** 1077 * find_existing_css_set - init css array and find the matching css_set 1078 * @old_cset: the css_set that we're using before the cgroup transition 1079 * @cgrp: the cgroup that we're moving into 1080 * @template: out param for the new set of csses, should be clear on entry 1081 */ 1082 static struct css_set *find_existing_css_set(struct css_set *old_cset, 1083 struct cgroup *cgrp, 1084 struct cgroup_subsys_state *template[]) 1085 { 1086 struct cgroup_root *root = cgrp->root; 1087 struct cgroup_subsys *ss; 1088 struct css_set *cset; 1089 unsigned long key; 1090 int i; 1091 1092 /* 1093 * Build the set of subsystem state objects that we want to see in the 1094 * new css_set. While subsystems can change globally, the entries here 1095 * won't change, so no need for locking. 1096 */ 1097 for_each_subsys(ss, i) { 1098 if (root->subsys_mask & (1UL << i)) { 1099 /* 1100 * @ss is in this hierarchy, so we want the 1101 * effective css from @cgrp. 1102 */ 1103 template[i] = cgroup_e_css_by_mask(cgrp, ss); 1104 } else { 1105 /* 1106 * @ss is not in this hierarchy, so we don't want 1107 * to change the css. 1108 */ 1109 template[i] = old_cset->subsys[i]; 1110 } 1111 } 1112 1113 key = css_set_hash(template); 1114 hash_for_each_possible(css_set_table, cset, hlist, key) { 1115 if (!compare_css_sets(cset, old_cset, cgrp, template)) 1116 continue; 1117 1118 /* This css_set matches what we need */ 1119 return cset; 1120 } 1121 1122 /* No existing cgroup group matched */ 1123 return NULL; 1124 } 1125 1126 static void free_cgrp_cset_links(struct list_head *links_to_free) 1127 { 1128 struct cgrp_cset_link *link, *tmp_link; 1129 1130 list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { 1131 list_del(&link->cset_link); 1132 kfree(link); 1133 } 1134 } 1135 1136 /** 1137 * allocate_cgrp_cset_links - allocate cgrp_cset_links 1138 * @count: the number of links to allocate 1139 * @tmp_links: list_head the allocated links are put on 1140 * 1141 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links 1142 * through ->cset_link. Returns 0 on success or -errno. 1143 */ 1144 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) 1145 { 1146 struct cgrp_cset_link *link; 1147 int i; 1148 1149 INIT_LIST_HEAD(tmp_links); 1150 1151 for (i = 0; i < count; i++) { 1152 link = kzalloc(sizeof(*link), GFP_KERNEL); 1153 if (!link) { 1154 free_cgrp_cset_links(tmp_links); 1155 return -ENOMEM; 1156 } 1157 list_add(&link->cset_link, tmp_links); 1158 } 1159 return 0; 1160 } 1161 1162 /** 1163 * link_css_set - a helper function to link a css_set to a cgroup 1164 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() 1165 * @cset: the css_set to be linked 1166 * @cgrp: the destination cgroup 1167 */ 1168 static void link_css_set(struct list_head *tmp_links, struct css_set *cset, 1169 struct cgroup *cgrp) 1170 { 1171 struct cgrp_cset_link *link; 1172 1173 BUG_ON(list_empty(tmp_links)); 1174 1175 if (cgroup_on_dfl(cgrp)) 1176 cset->dfl_cgrp = cgrp; 1177 1178 link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); 1179 link->cset = cset; 1180 link->cgrp = cgrp; 1181 1182 /* 1183 * Always add links to the tail of the lists so that the lists are 1184 * in chronological order. 1185 */ 1186 list_move_tail(&link->cset_link, &cgrp->cset_links); 1187 list_add_tail(&link->cgrp_link, &cset->cgrp_links); 1188 1189 if (cgroup_parent(cgrp)) 1190 cgroup_get_live(cgrp); 1191 } 1192 1193 /** 1194 * find_css_set - return a new css_set with one cgroup updated 1195 * @old_cset: the baseline css_set 1196 * @cgrp: the cgroup to be updated 1197 * 1198 * Return a new css_set that's equivalent to @old_cset, but with @cgrp 1199 * substituted into the appropriate hierarchy. 1200 */ 1201 static struct css_set *find_css_set(struct css_set *old_cset, 1202 struct cgroup *cgrp) 1203 { 1204 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; 1205 struct css_set *cset; 1206 struct list_head tmp_links; 1207 struct cgrp_cset_link *link; 1208 struct cgroup_subsys *ss; 1209 unsigned long key; 1210 int ssid; 1211 1212 lockdep_assert_held(&cgroup_mutex); 1213 1214 /* First see if we already have a cgroup group that matches 1215 * the desired set */ 1216 spin_lock_irq(&css_set_lock); 1217 cset = find_existing_css_set(old_cset, cgrp, template); 1218 if (cset) 1219 get_css_set(cset); 1220 spin_unlock_irq(&css_set_lock); 1221 1222 if (cset) 1223 return cset; 1224 1225 cset = kzalloc(sizeof(*cset), GFP_KERNEL); 1226 if (!cset) 1227 return NULL; 1228 1229 /* Allocate all the cgrp_cset_link objects that we'll need */ 1230 if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { 1231 kfree(cset); 1232 return NULL; 1233 } 1234 1235 refcount_set(&cset->refcount, 1); 1236 cset->dom_cset = cset; 1237 INIT_LIST_HEAD(&cset->tasks); 1238 INIT_LIST_HEAD(&cset->mg_tasks); 1239 INIT_LIST_HEAD(&cset->dying_tasks); 1240 INIT_LIST_HEAD(&cset->task_iters); 1241 INIT_LIST_HEAD(&cset->threaded_csets); 1242 INIT_HLIST_NODE(&cset->hlist); 1243 INIT_LIST_HEAD(&cset->cgrp_links); 1244 INIT_LIST_HEAD(&cset->mg_src_preload_node); 1245 INIT_LIST_HEAD(&cset->mg_dst_preload_node); 1246 INIT_LIST_HEAD(&cset->mg_node); 1247 1248 /* Copy the set of subsystem state objects generated in 1249 * find_existing_css_set() */ 1250 memcpy(cset->subsys, template, sizeof(cset->subsys)); 1251 1252 spin_lock_irq(&css_set_lock); 1253 /* Add reference counts and links from the new css_set. */ 1254 list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { 1255 struct cgroup *c = link->cgrp; 1256 1257 if (c->root == cgrp->root) 1258 c = cgrp; 1259 link_css_set(&tmp_links, cset, c); 1260 } 1261 1262 BUG_ON(!list_empty(&tmp_links)); 1263 1264 css_set_count++; 1265 1266 /* Add @cset to the hash table */ 1267 key = css_set_hash(cset->subsys); 1268 hash_add(css_set_table, &cset->hlist, key); 1269 1270 for_each_subsys(ss, ssid) { 1271 struct cgroup_subsys_state *css = cset->subsys[ssid]; 1272 1273 list_add_tail(&cset->e_cset_node[ssid], 1274 &css->cgroup->e_csets[ssid]); 1275 css_get(css); 1276 } 1277 1278 spin_unlock_irq(&css_set_lock); 1279 1280 /* 1281 * If @cset should be threaded, look up the matching dom_cset and 1282 * link them up. We first fully initialize @cset then look for the 1283 * dom_cset. It's simpler this way and safe as @cset is guaranteed 1284 * to stay empty until we return. 1285 */ 1286 if (cgroup_is_threaded(cset->dfl_cgrp)) { 1287 struct css_set *dcset; 1288 1289 dcset = find_css_set(cset, cset->dfl_cgrp->dom_cgrp); 1290 if (!dcset) { 1291 put_css_set(cset); 1292 return NULL; 1293 } 1294 1295 spin_lock_irq(&css_set_lock); 1296 cset->dom_cset = dcset; 1297 list_add_tail(&cset->threaded_csets_node, 1298 &dcset->threaded_csets); 1299 spin_unlock_irq(&css_set_lock); 1300 } 1301 1302 return cset; 1303 } 1304 1305 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) 1306 { 1307 struct cgroup *root_cgrp = kernfs_root_to_node(kf_root)->priv; 1308 1309 return root_cgrp->root; 1310 } 1311 1312 static int cgroup_init_root_id(struct cgroup_root *root) 1313 { 1314 int id; 1315 1316 lockdep_assert_held(&cgroup_mutex); 1317 1318 id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); 1319 if (id < 0) 1320 return id; 1321 1322 root->hierarchy_id = id; 1323 return 0; 1324 } 1325 1326 static void cgroup_exit_root_id(struct cgroup_root *root) 1327 { 1328 lockdep_assert_held(&cgroup_mutex); 1329 1330 idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); 1331 } 1332 1333 void cgroup_free_root(struct cgroup_root *root) 1334 { 1335 kfree(root); 1336 } 1337 1338 static void cgroup_destroy_root(struct cgroup_root *root) 1339 { 1340 struct cgroup *cgrp = &root->cgrp; 1341 struct cgrp_cset_link *link, *tmp_link; 1342 1343 trace_cgroup_destroy_root(root); 1344 1345 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); 1346 1347 BUG_ON(atomic_read(&root->nr_cgrps)); 1348 BUG_ON(!list_empty(&cgrp->self.children)); 1349 1350 /* Rebind all subsystems back to the default hierarchy */ 1351 WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask)); 1352 1353 /* 1354 * Release all the links from cset_links to this hierarchy's 1355 * root cgroup 1356 */ 1357 spin_lock_irq(&css_set_lock); 1358 1359 list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { 1360 list_del(&link->cset_link); 1361 list_del(&link->cgrp_link); 1362 kfree(link); 1363 } 1364 1365 spin_unlock_irq(&css_set_lock); 1366 1367 if (!list_empty(&root->root_list)) { 1368 list_del(&root->root_list); 1369 cgroup_root_count--; 1370 } 1371 1372 cgroup_exit_root_id(root); 1373 1374 mutex_unlock(&cgroup_mutex); 1375 1376 cgroup_rstat_exit(cgrp); 1377 kernfs_destroy_root(root->kf_root); 1378 cgroup_free_root(root); 1379 } 1380 1381 /* 1382 * look up cgroup associated with current task's cgroup namespace on the 1383 * specified hierarchy 1384 */ 1385 static struct cgroup * 1386 current_cgns_cgroup_from_root(struct cgroup_root *root) 1387 { 1388 struct cgroup *res = NULL; 1389 struct css_set *cset; 1390 1391 lockdep_assert_held(&css_set_lock); 1392 1393 rcu_read_lock(); 1394 1395 cset = current->nsproxy->cgroup_ns->root_cset; 1396 if (cset == &init_css_set) { 1397 res = &root->cgrp; 1398 } else if (root == &cgrp_dfl_root) { 1399 res = cset->dfl_cgrp; 1400 } else { 1401 struct cgrp_cset_link *link; 1402 1403 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1404 struct cgroup *c = link->cgrp; 1405 1406 if (c->root == root) { 1407 res = c; 1408 break; 1409 } 1410 } 1411 } 1412 rcu_read_unlock(); 1413 1414 BUG_ON(!res); 1415 return res; 1416 } 1417 1418 /* look up cgroup associated with given css_set on the specified hierarchy */ 1419 static struct cgroup *cset_cgroup_from_root(struct css_set *cset, 1420 struct cgroup_root *root) 1421 { 1422 struct cgroup *res = NULL; 1423 1424 lockdep_assert_held(&cgroup_mutex); 1425 lockdep_assert_held(&css_set_lock); 1426 1427 if (cset == &init_css_set) { 1428 res = &root->cgrp; 1429 } else if (root == &cgrp_dfl_root) { 1430 res = cset->dfl_cgrp; 1431 } else { 1432 struct cgrp_cset_link *link; 1433 1434 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 1435 struct cgroup *c = link->cgrp; 1436 1437 if (c->root == root) { 1438 res = c; 1439 break; 1440 } 1441 } 1442 } 1443 1444 BUG_ON(!res); 1445 return res; 1446 } 1447 1448 /* 1449 * Return the cgroup for "task" from the given hierarchy. Must be 1450 * called with cgroup_mutex and css_set_lock held. 1451 */ 1452 struct cgroup *task_cgroup_from_root(struct task_struct *task, 1453 struct cgroup_root *root) 1454 { 1455 /* 1456 * No need to lock the task - since we hold css_set_lock the 1457 * task can't change groups. 1458 */ 1459 return cset_cgroup_from_root(task_css_set(task), root); 1460 } 1461 1462 /* 1463 * A task must hold cgroup_mutex to modify cgroups. 1464 * 1465 * Any task can increment and decrement the count field without lock. 1466 * So in general, code holding cgroup_mutex can't rely on the count 1467 * field not changing. However, if the count goes to zero, then only 1468 * cgroup_attach_task() can increment it again. Because a count of zero 1469 * means that no tasks are currently attached, therefore there is no 1470 * way a task attached to that cgroup can fork (the other way to 1471 * increment the count). So code holding cgroup_mutex can safely 1472 * assume that if the count is zero, it will stay zero. Similarly, if 1473 * a task holds cgroup_mutex on a cgroup with zero count, it 1474 * knows that the cgroup won't be removed, as cgroup_rmdir() 1475 * needs that mutex. 1476 * 1477 * A cgroup can only be deleted if both its 'count' of using tasks 1478 * is zero, and its list of 'children' cgroups is empty. Since all 1479 * tasks in the system use _some_ cgroup, and since there is always at 1480 * least one task in the system (init, pid == 1), therefore, root cgroup 1481 * always has either children cgroups and/or using tasks. So we don't 1482 * need a special hack to ensure that root cgroup cannot be deleted. 1483 * 1484 * P.S. One more locking exception. RCU is used to guard the 1485 * update of a tasks cgroup pointer by cgroup_attach_task() 1486 */ 1487 1488 static struct kernfs_syscall_ops cgroup_kf_syscall_ops; 1489 1490 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, 1491 char *buf) 1492 { 1493 struct cgroup_subsys *ss = cft->ss; 1494 1495 if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && 1496 !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { 1497 const char *dbg = (cft->flags & CFTYPE_DEBUG) ? ".__DEBUG__." : ""; 1498 1499 snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s", 1500 dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, 1501 cft->name); 1502 } else { 1503 strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX); 1504 } 1505 return buf; 1506 } 1507 1508 /** 1509 * cgroup_file_mode - deduce file mode of a control file 1510 * @cft: the control file in question 1511 * 1512 * S_IRUGO for read, S_IWUSR for write. 1513 */ 1514 static umode_t cgroup_file_mode(const struct cftype *cft) 1515 { 1516 umode_t mode = 0; 1517 1518 if (cft->read_u64 || cft->read_s64 || cft->seq_show) 1519 mode |= S_IRUGO; 1520 1521 if (cft->write_u64 || cft->write_s64 || cft->write) { 1522 if (cft->flags & CFTYPE_WORLD_WRITABLE) 1523 mode |= S_IWUGO; 1524 else 1525 mode |= S_IWUSR; 1526 } 1527 1528 return mode; 1529 } 1530 1531 /** 1532 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask 1533 * @subtree_control: the new subtree_control mask to consider 1534 * @this_ss_mask: available subsystems 1535 * 1536 * On the default hierarchy, a subsystem may request other subsystems to be 1537 * enabled together through its ->depends_on mask. In such cases, more 1538 * subsystems than specified in "cgroup.subtree_control" may be enabled. 1539 * 1540 * This function calculates which subsystems need to be enabled if 1541 * @subtree_control is to be applied while restricted to @this_ss_mask. 1542 */ 1543 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask) 1544 { 1545 u16 cur_ss_mask = subtree_control; 1546 struct cgroup_subsys *ss; 1547 int ssid; 1548 1549 lockdep_assert_held(&cgroup_mutex); 1550 1551 cur_ss_mask |= cgrp_dfl_implicit_ss_mask; 1552 1553 while (true) { 1554 u16 new_ss_mask = cur_ss_mask; 1555 1556 do_each_subsys_mask(ss, ssid, cur_ss_mask) { 1557 new_ss_mask |= ss->depends_on; 1558 } while_each_subsys_mask(); 1559 1560 /* 1561 * Mask out subsystems which aren't available. This can 1562 * happen only if some depended-upon subsystems were bound 1563 * to non-default hierarchies. 1564 */ 1565 new_ss_mask &= this_ss_mask; 1566 1567 if (new_ss_mask == cur_ss_mask) 1568 break; 1569 cur_ss_mask = new_ss_mask; 1570 } 1571 1572 return cur_ss_mask; 1573 } 1574 1575 /** 1576 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods 1577 * @kn: the kernfs_node being serviced 1578 * 1579 * This helper undoes cgroup_kn_lock_live() and should be invoked before 1580 * the method finishes if locking succeeded. Note that once this function 1581 * returns the cgroup returned by cgroup_kn_lock_live() may become 1582 * inaccessible any time. If the caller intends to continue to access the 1583 * cgroup, it should pin it before invoking this function. 1584 */ 1585 void cgroup_kn_unlock(struct kernfs_node *kn) 1586 { 1587 struct cgroup *cgrp; 1588 1589 if (kernfs_type(kn) == KERNFS_DIR) 1590 cgrp = kn->priv; 1591 else 1592 cgrp = kn->parent->priv; 1593 1594 mutex_unlock(&cgroup_mutex); 1595 1596 kernfs_unbreak_active_protection(kn); 1597 cgroup_put(cgrp); 1598 } 1599 1600 /** 1601 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods 1602 * @kn: the kernfs_node being serviced 1603 * @drain_offline: perform offline draining on the cgroup 1604 * 1605 * This helper is to be used by a cgroup kernfs method currently servicing 1606 * @kn. It breaks the active protection, performs cgroup locking and 1607 * verifies that the associated cgroup is alive. Returns the cgroup if 1608 * alive; otherwise, %NULL. A successful return should be undone by a 1609 * matching cgroup_kn_unlock() invocation. If @drain_offline is %true, the 1610 * cgroup is drained of offlining csses before return. 1611 * 1612 * Any cgroup kernfs method implementation which requires locking the 1613 * associated cgroup should use this helper. It avoids nesting cgroup 1614 * locking under kernfs active protection and allows all kernfs operations 1615 * including self-removal. 1616 */ 1617 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline) 1618 { 1619 struct cgroup *cgrp; 1620 1621 if (kernfs_type(kn) == KERNFS_DIR) 1622 cgrp = kn->priv; 1623 else 1624 cgrp = kn->parent->priv; 1625 1626 /* 1627 * We're gonna grab cgroup_mutex which nests outside kernfs 1628 * active_ref. cgroup liveliness check alone provides enough 1629 * protection against removal. Ensure @cgrp stays accessible and 1630 * break the active_ref protection. 1631 */ 1632 if (!cgroup_tryget(cgrp)) 1633 return NULL; 1634 kernfs_break_active_protection(kn); 1635 1636 if (drain_offline) 1637 cgroup_lock_and_drain_offline(cgrp); 1638 else 1639 mutex_lock(&cgroup_mutex); 1640 1641 if (!cgroup_is_dead(cgrp)) 1642 return cgrp; 1643 1644 cgroup_kn_unlock(kn); 1645 return NULL; 1646 } 1647 1648 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) 1649 { 1650 char name[CGROUP_FILE_NAME_MAX]; 1651 1652 lockdep_assert_held(&cgroup_mutex); 1653 1654 if (cft->file_offset) { 1655 struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss); 1656 struct cgroup_file *cfile = (void *)css + cft->file_offset; 1657 1658 spin_lock_irq(&cgroup_file_kn_lock); 1659 cfile->kn = NULL; 1660 spin_unlock_irq(&cgroup_file_kn_lock); 1661 1662 del_timer_sync(&cfile->notify_timer); 1663 } 1664 1665 kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); 1666 } 1667 1668 /** 1669 * css_clear_dir - remove subsys files in a cgroup directory 1670 * @css: target css 1671 */ 1672 static void css_clear_dir(struct cgroup_subsys_state *css) 1673 { 1674 struct cgroup *cgrp = css->cgroup; 1675 struct cftype *cfts; 1676 1677 if (!(css->flags & CSS_VISIBLE)) 1678 return; 1679 1680 css->flags &= ~CSS_VISIBLE; 1681 1682 if (!css->ss) { 1683 if (cgroup_on_dfl(cgrp)) 1684 cfts = cgroup_base_files; 1685 else 1686 cfts = cgroup1_base_files; 1687 1688 cgroup_addrm_files(css, cgrp, cfts, false); 1689 } else { 1690 list_for_each_entry(cfts, &css->ss->cfts, node) 1691 cgroup_addrm_files(css, cgrp, cfts, false); 1692 } 1693 } 1694 1695 /** 1696 * css_populate_dir - create subsys files in a cgroup directory 1697 * @css: target css 1698 * 1699 * On failure, no file is added. 1700 */ 1701 static int css_populate_dir(struct cgroup_subsys_state *css) 1702 { 1703 struct cgroup *cgrp = css->cgroup; 1704 struct cftype *cfts, *failed_cfts; 1705 int ret; 1706 1707 if ((css->flags & CSS_VISIBLE) || !cgrp->kn) 1708 return 0; 1709 1710 if (!css->ss) { 1711 if (cgroup_on_dfl(cgrp)) 1712 cfts = cgroup_base_files; 1713 else 1714 cfts = cgroup1_base_files; 1715 1716 ret = cgroup_addrm_files(&cgrp->self, cgrp, cfts, true); 1717 if (ret < 0) 1718 return ret; 1719 } else { 1720 list_for_each_entry(cfts, &css->ss->cfts, node) { 1721 ret = cgroup_addrm_files(css, cgrp, cfts, true); 1722 if (ret < 0) { 1723 failed_cfts = cfts; 1724 goto err; 1725 } 1726 } 1727 } 1728 1729 css->flags |= CSS_VISIBLE; 1730 1731 return 0; 1732 err: 1733 list_for_each_entry(cfts, &css->ss->cfts, node) { 1734 if (cfts == failed_cfts) 1735 break; 1736 cgroup_addrm_files(css, cgrp, cfts, false); 1737 } 1738 return ret; 1739 } 1740 1741 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) 1742 { 1743 struct cgroup *dcgrp = &dst_root->cgrp; 1744 struct cgroup_subsys *ss; 1745 int ssid, i, ret; 1746 u16 dfl_disable_ss_mask = 0; 1747 1748 lockdep_assert_held(&cgroup_mutex); 1749 1750 do_each_subsys_mask(ss, ssid, ss_mask) { 1751 /* 1752 * If @ss has non-root csses attached to it, can't move. 1753 * If @ss is an implicit controller, it is exempt from this 1754 * rule and can be stolen. 1755 */ 1756 if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) && 1757 !ss->implicit_on_dfl) 1758 return -EBUSY; 1759 1760 /* can't move between two non-dummy roots either */ 1761 if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) 1762 return -EBUSY; 1763 1764 /* 1765 * Collect ssid's that need to be disabled from default 1766 * hierarchy. 1767 */ 1768 if (ss->root == &cgrp_dfl_root) 1769 dfl_disable_ss_mask |= 1 << ssid; 1770 1771 } while_each_subsys_mask(); 1772 1773 if (dfl_disable_ss_mask) { 1774 struct cgroup *scgrp = &cgrp_dfl_root.cgrp; 1775 1776 /* 1777 * Controllers from default hierarchy that need to be rebound 1778 * are all disabled together in one go. 1779 */ 1780 cgrp_dfl_root.subsys_mask &= ~dfl_disable_ss_mask; 1781 WARN_ON(cgroup_apply_control(scgrp)); 1782 cgroup_finalize_control(scgrp, 0); 1783 } 1784 1785 do_each_subsys_mask(ss, ssid, ss_mask) { 1786 struct cgroup_root *src_root = ss->root; 1787 struct cgroup *scgrp = &src_root->cgrp; 1788 struct cgroup_subsys_state *css = cgroup_css(scgrp, ss); 1789 struct css_set *cset; 1790 1791 WARN_ON(!css || cgroup_css(dcgrp, ss)); 1792 1793 if (src_root != &cgrp_dfl_root) { 1794 /* disable from the source */ 1795 src_root->subsys_mask &= ~(1 << ssid); 1796 WARN_ON(cgroup_apply_control(scgrp)); 1797 cgroup_finalize_control(scgrp, 0); 1798 } 1799 1800 /* rebind */ 1801 RCU_INIT_POINTER(scgrp->subsys[ssid], NULL); 1802 rcu_assign_pointer(dcgrp->subsys[ssid], css); 1803 ss->root = dst_root; 1804 css->cgroup = dcgrp; 1805 1806 spin_lock_irq(&css_set_lock); 1807 hash_for_each(css_set_table, i, cset, hlist) 1808 list_move_tail(&cset->e_cset_node[ss->id], 1809 &dcgrp->e_csets[ss->id]); 1810 spin_unlock_irq(&css_set_lock); 1811 1812 if (ss->css_rstat_flush) { 1813 list_del_rcu(&css->rstat_css_node); 1814 list_add_rcu(&css->rstat_css_node, 1815 &dcgrp->rstat_css_list); 1816 } 1817 1818 /* default hierarchy doesn't enable controllers by default */ 1819 dst_root->subsys_mask |= 1 << ssid; 1820 if (dst_root == &cgrp_dfl_root) { 1821 static_branch_enable(cgroup_subsys_on_dfl_key[ssid]); 1822 } else { 1823 dcgrp->subtree_control |= 1 << ssid; 1824 static_branch_disable(cgroup_subsys_on_dfl_key[ssid]); 1825 } 1826 1827 ret = cgroup_apply_control(dcgrp); 1828 if (ret) 1829 pr_warn("partial failure to rebind %s controller (err=%d)\n", 1830 ss->name, ret); 1831 1832 if (ss->bind) 1833 ss->bind(css); 1834 } while_each_subsys_mask(); 1835 1836 kernfs_activate(dcgrp->kn); 1837 return 0; 1838 } 1839 1840 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, 1841 struct kernfs_root *kf_root) 1842 { 1843 int len = 0; 1844 char *buf = NULL; 1845 struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root); 1846 struct cgroup *ns_cgroup; 1847 1848 buf = kmalloc(PATH_MAX, GFP_KERNEL); 1849 if (!buf) 1850 return -ENOMEM; 1851 1852 spin_lock_irq(&css_set_lock); 1853 ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); 1854 len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); 1855 spin_unlock_irq(&css_set_lock); 1856 1857 if (len >= PATH_MAX) 1858 len = -ERANGE; 1859 else if (len > 0) { 1860 seq_escape(sf, buf, " \t\n\\"); 1861 len = 0; 1862 } 1863 kfree(buf); 1864 return len; 1865 } 1866 1867 enum cgroup2_param { 1868 Opt_nsdelegate, 1869 Opt_memory_localevents, 1870 Opt_memory_recursiveprot, 1871 nr__cgroup2_params 1872 }; 1873 1874 static const struct fs_parameter_spec cgroup2_fs_parameters[] = { 1875 fsparam_flag("nsdelegate", Opt_nsdelegate), 1876 fsparam_flag("memory_localevents", Opt_memory_localevents), 1877 fsparam_flag("memory_recursiveprot", Opt_memory_recursiveprot), 1878 {} 1879 }; 1880 1881 static int cgroup2_parse_param(struct fs_context *fc, struct fs_parameter *param) 1882 { 1883 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1884 struct fs_parse_result result; 1885 int opt; 1886 1887 opt = fs_parse(fc, cgroup2_fs_parameters, param, &result); 1888 if (opt < 0) 1889 return opt; 1890 1891 switch (opt) { 1892 case Opt_nsdelegate: 1893 ctx->flags |= CGRP_ROOT_NS_DELEGATE; 1894 return 0; 1895 case Opt_memory_localevents: 1896 ctx->flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1897 return 0; 1898 case Opt_memory_recursiveprot: 1899 ctx->flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; 1900 return 0; 1901 } 1902 return -EINVAL; 1903 } 1904 1905 static void apply_cgroup_root_flags(unsigned int root_flags) 1906 { 1907 if (current->nsproxy->cgroup_ns == &init_cgroup_ns) { 1908 if (root_flags & CGRP_ROOT_NS_DELEGATE) 1909 cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE; 1910 else 1911 cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE; 1912 1913 if (root_flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1914 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1915 else 1916 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_LOCAL_EVENTS; 1917 1918 if (root_flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT) 1919 cgrp_dfl_root.flags |= CGRP_ROOT_MEMORY_RECURSIVE_PROT; 1920 else 1921 cgrp_dfl_root.flags &= ~CGRP_ROOT_MEMORY_RECURSIVE_PROT; 1922 } 1923 } 1924 1925 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root) 1926 { 1927 if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) 1928 seq_puts(seq, ",nsdelegate"); 1929 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) 1930 seq_puts(seq, ",memory_localevents"); 1931 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT) 1932 seq_puts(seq, ",memory_recursiveprot"); 1933 return 0; 1934 } 1935 1936 static int cgroup_reconfigure(struct fs_context *fc) 1937 { 1938 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 1939 1940 apply_cgroup_root_flags(ctx->flags); 1941 return 0; 1942 } 1943 1944 static void init_cgroup_housekeeping(struct cgroup *cgrp) 1945 { 1946 struct cgroup_subsys *ss; 1947 int ssid; 1948 1949 INIT_LIST_HEAD(&cgrp->self.sibling); 1950 INIT_LIST_HEAD(&cgrp->self.children); 1951 INIT_LIST_HEAD(&cgrp->cset_links); 1952 INIT_LIST_HEAD(&cgrp->pidlists); 1953 mutex_init(&cgrp->pidlist_mutex); 1954 cgrp->self.cgroup = cgrp; 1955 cgrp->self.flags |= CSS_ONLINE; 1956 cgrp->dom_cgrp = cgrp; 1957 cgrp->max_descendants = INT_MAX; 1958 cgrp->max_depth = INT_MAX; 1959 INIT_LIST_HEAD(&cgrp->rstat_css_list); 1960 prev_cputime_init(&cgrp->prev_cputime); 1961 1962 for_each_subsys(ss, ssid) 1963 INIT_LIST_HEAD(&cgrp->e_csets[ssid]); 1964 1965 init_waitqueue_head(&cgrp->offline_waitq); 1966 INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent); 1967 } 1968 1969 void init_cgroup_root(struct cgroup_fs_context *ctx) 1970 { 1971 struct cgroup_root *root = ctx->root; 1972 struct cgroup *cgrp = &root->cgrp; 1973 1974 INIT_LIST_HEAD(&root->root_list); 1975 atomic_set(&root->nr_cgrps, 1); 1976 cgrp->root = root; 1977 init_cgroup_housekeeping(cgrp); 1978 1979 root->flags = ctx->flags; 1980 if (ctx->release_agent) 1981 strscpy(root->release_agent_path, ctx->release_agent, PATH_MAX); 1982 if (ctx->name) 1983 strscpy(root->name, ctx->name, MAX_CGROUP_ROOT_NAMELEN); 1984 if (ctx->cpuset_clone_children) 1985 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); 1986 } 1987 1988 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) 1989 { 1990 LIST_HEAD(tmp_links); 1991 struct cgroup *root_cgrp = &root->cgrp; 1992 struct kernfs_syscall_ops *kf_sops; 1993 struct css_set *cset; 1994 int i, ret; 1995 1996 lockdep_assert_held(&cgroup_mutex); 1997 1998 ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 1999 0, GFP_KERNEL); 2000 if (ret) 2001 goto out; 2002 2003 /* 2004 * We're accessing css_set_count without locking css_set_lock here, 2005 * but that's OK - it can only be increased by someone holding 2006 * cgroup_lock, and that's us. Later rebinding may disable 2007 * controllers on the default hierarchy and thus create new csets, 2008 * which can't be more than the existing ones. Allocate 2x. 2009 */ 2010 ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links); 2011 if (ret) 2012 goto cancel_ref; 2013 2014 ret = cgroup_init_root_id(root); 2015 if (ret) 2016 goto cancel_ref; 2017 2018 kf_sops = root == &cgrp_dfl_root ? 2019 &cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops; 2020 2021 root->kf_root = kernfs_create_root(kf_sops, 2022 KERNFS_ROOT_CREATE_DEACTIVATED | 2023 KERNFS_ROOT_SUPPORT_EXPORTOP | 2024 KERNFS_ROOT_SUPPORT_USER_XATTR, 2025 root_cgrp); 2026 if (IS_ERR(root->kf_root)) { 2027 ret = PTR_ERR(root->kf_root); 2028 goto exit_root_id; 2029 } 2030 root_cgrp->kn = kernfs_root_to_node(root->kf_root); 2031 WARN_ON_ONCE(cgroup_ino(root_cgrp) != 1); 2032 root_cgrp->ancestor_ids[0] = cgroup_id(root_cgrp); 2033 2034 ret = css_populate_dir(&root_cgrp->self); 2035 if (ret) 2036 goto destroy_root; 2037 2038 ret = cgroup_rstat_init(root_cgrp); 2039 if (ret) 2040 goto destroy_root; 2041 2042 ret = rebind_subsystems(root, ss_mask); 2043 if (ret) 2044 goto exit_stats; 2045 2046 ret = cgroup_bpf_inherit(root_cgrp); 2047 WARN_ON_ONCE(ret); 2048 2049 trace_cgroup_setup_root(root); 2050 2051 /* 2052 * There must be no failure case after here, since rebinding takes 2053 * care of subsystems' refcounts, which are explicitly dropped in 2054 * the failure exit path. 2055 */ 2056 list_add(&root->root_list, &cgroup_roots); 2057 cgroup_root_count++; 2058 2059 /* 2060 * Link the root cgroup in this hierarchy into all the css_set 2061 * objects. 2062 */ 2063 spin_lock_irq(&css_set_lock); 2064 hash_for_each(css_set_table, i, cset, hlist) { 2065 link_css_set(&tmp_links, cset, root_cgrp); 2066 if (css_set_populated(cset)) 2067 cgroup_update_populated(root_cgrp, true); 2068 } 2069 spin_unlock_irq(&css_set_lock); 2070 2071 BUG_ON(!list_empty(&root_cgrp->self.children)); 2072 BUG_ON(atomic_read(&root->nr_cgrps) != 1); 2073 2074 ret = 0; 2075 goto out; 2076 2077 exit_stats: 2078 cgroup_rstat_exit(root_cgrp); 2079 destroy_root: 2080 kernfs_destroy_root(root->kf_root); 2081 root->kf_root = NULL; 2082 exit_root_id: 2083 cgroup_exit_root_id(root); 2084 cancel_ref: 2085 percpu_ref_exit(&root_cgrp->self.refcnt); 2086 out: 2087 free_cgrp_cset_links(&tmp_links); 2088 return ret; 2089 } 2090 2091 int cgroup_do_get_tree(struct fs_context *fc) 2092 { 2093 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2094 int ret; 2095 2096 ctx->kfc.root = ctx->root->kf_root; 2097 if (fc->fs_type == &cgroup2_fs_type) 2098 ctx->kfc.magic = CGROUP2_SUPER_MAGIC; 2099 else 2100 ctx->kfc.magic = CGROUP_SUPER_MAGIC; 2101 ret = kernfs_get_tree(fc); 2102 2103 /* 2104 * In non-init cgroup namespace, instead of root cgroup's dentry, 2105 * we return the dentry corresponding to the cgroupns->root_cgrp. 2106 */ 2107 if (!ret && ctx->ns != &init_cgroup_ns) { 2108 struct dentry *nsdentry; 2109 struct super_block *sb = fc->root->d_sb; 2110 struct cgroup *cgrp; 2111 2112 mutex_lock(&cgroup_mutex); 2113 spin_lock_irq(&css_set_lock); 2114 2115 cgrp = cset_cgroup_from_root(ctx->ns->root_cset, ctx->root); 2116 2117 spin_unlock_irq(&css_set_lock); 2118 mutex_unlock(&cgroup_mutex); 2119 2120 nsdentry = kernfs_node_dentry(cgrp->kn, sb); 2121 dput(fc->root); 2122 if (IS_ERR(nsdentry)) { 2123 deactivate_locked_super(sb); 2124 ret = PTR_ERR(nsdentry); 2125 nsdentry = NULL; 2126 } 2127 fc->root = nsdentry; 2128 } 2129 2130 if (!ctx->kfc.new_sb_created) 2131 cgroup_put(&ctx->root->cgrp); 2132 2133 return ret; 2134 } 2135 2136 /* 2137 * Destroy a cgroup filesystem context. 2138 */ 2139 static void cgroup_fs_context_free(struct fs_context *fc) 2140 { 2141 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2142 2143 kfree(ctx->name); 2144 kfree(ctx->release_agent); 2145 put_cgroup_ns(ctx->ns); 2146 kernfs_free_fs_context(fc); 2147 kfree(ctx); 2148 } 2149 2150 static int cgroup_get_tree(struct fs_context *fc) 2151 { 2152 struct cgroup_fs_context *ctx = cgroup_fc2context(fc); 2153 int ret; 2154 2155 cgrp_dfl_visible = true; 2156 cgroup_get_live(&cgrp_dfl_root.cgrp); 2157 ctx->root = &cgrp_dfl_root; 2158 2159 ret = cgroup_do_get_tree(fc); 2160 if (!ret) 2161 apply_cgroup_root_flags(ctx->flags); 2162 return ret; 2163 } 2164 2165 static const struct fs_context_operations cgroup_fs_context_ops = { 2166 .free = cgroup_fs_context_free, 2167 .parse_param = cgroup2_parse_param, 2168 .get_tree = cgroup_get_tree, 2169 .reconfigure = cgroup_reconfigure, 2170 }; 2171 2172 static const struct fs_context_operations cgroup1_fs_context_ops = { 2173 .free = cgroup_fs_context_free, 2174 .parse_param = cgroup1_parse_param, 2175 .get_tree = cgroup1_get_tree, 2176 .reconfigure = cgroup1_reconfigure, 2177 }; 2178 2179 /* 2180 * Initialise the cgroup filesystem creation/reconfiguration context. Notably, 2181 * we select the namespace we're going to use. 2182 */ 2183 static int cgroup_init_fs_context(struct fs_context *fc) 2184 { 2185 struct cgroup_fs_context *ctx; 2186 2187 ctx = kzalloc(sizeof(struct cgroup_fs_context), GFP_KERNEL); 2188 if (!ctx) 2189 return -ENOMEM; 2190 2191 ctx->ns = current->nsproxy->cgroup_ns; 2192 get_cgroup_ns(ctx->ns); 2193 fc->fs_private = &ctx->kfc; 2194 if (fc->fs_type == &cgroup2_fs_type) 2195 fc->ops = &cgroup_fs_context_ops; 2196 else 2197 fc->ops = &cgroup1_fs_context_ops; 2198 put_user_ns(fc->user_ns); 2199 fc->user_ns = get_user_ns(ctx->ns->user_ns); 2200 fc->global = true; 2201 return 0; 2202 } 2203 2204 static void cgroup_kill_sb(struct super_block *sb) 2205 { 2206 struct kernfs_root *kf_root = kernfs_root_from_sb(sb); 2207 struct cgroup_root *root = cgroup_root_from_kf(kf_root); 2208 2209 /* 2210 * If @root doesn't have any children, start killing it. 2211 * This prevents new mounts by disabling percpu_ref_tryget_live(). 2212 * 2213 * And don't kill the default root. 2214 */ 2215 if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && 2216 !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { 2217 cgroup_bpf_offline(&root->cgrp); 2218 percpu_ref_kill(&root->cgrp.self.refcnt); 2219 } 2220 cgroup_put(&root->cgrp); 2221 kernfs_kill_sb(sb); 2222 } 2223 2224 struct file_system_type cgroup_fs_type = { 2225 .name = "cgroup", 2226 .init_fs_context = cgroup_init_fs_context, 2227 .parameters = cgroup1_fs_parameters, 2228 .kill_sb = cgroup_kill_sb, 2229 .fs_flags = FS_USERNS_MOUNT, 2230 }; 2231 2232 static struct file_system_type cgroup2_fs_type = { 2233 .name = "cgroup2", 2234 .init_fs_context = cgroup_init_fs_context, 2235 .parameters = cgroup2_fs_parameters, 2236 .kill_sb = cgroup_kill_sb, 2237 .fs_flags = FS_USERNS_MOUNT, 2238 }; 2239 2240 #ifdef CONFIG_CPUSETS 2241 static const struct fs_context_operations cpuset_fs_context_ops = { 2242 .get_tree = cgroup1_get_tree, 2243 .free = cgroup_fs_context_free, 2244 }; 2245 2246 /* 2247 * This is ugly, but preserves the userspace API for existing cpuset 2248 * users. If someone tries to mount the "cpuset" filesystem, we 2249 * silently switch it to mount "cgroup" instead 2250 */ 2251 static int cpuset_init_fs_context(struct fs_context *fc) 2252 { 2253 char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER); 2254 struct cgroup_fs_context *ctx; 2255 int err; 2256 2257 err = cgroup_init_fs_context(fc); 2258 if (err) { 2259 kfree(agent); 2260 return err; 2261 } 2262 2263 fc->ops = &cpuset_fs_context_ops; 2264 2265 ctx = cgroup_fc2context(fc); 2266 ctx->subsys_mask = 1 << cpuset_cgrp_id; 2267 ctx->flags |= CGRP_ROOT_NOPREFIX; 2268 ctx->release_agent = agent; 2269 2270 get_filesystem(&cgroup_fs_type); 2271 put_filesystem(fc->fs_type); 2272 fc->fs_type = &cgroup_fs_type; 2273 2274 return 0; 2275 } 2276 2277 static struct file_system_type cpuset_fs_type = { 2278 .name = "cpuset", 2279 .init_fs_context = cpuset_init_fs_context, 2280 .fs_flags = FS_USERNS_MOUNT, 2281 }; 2282 #endif 2283 2284 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, 2285 struct cgroup_namespace *ns) 2286 { 2287 struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root); 2288 2289 return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen); 2290 } 2291 2292 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 2293 struct cgroup_namespace *ns) 2294 { 2295 int ret; 2296 2297 mutex_lock(&cgroup_mutex); 2298 spin_lock_irq(&css_set_lock); 2299 2300 ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); 2301 2302 spin_unlock_irq(&css_set_lock); 2303 mutex_unlock(&cgroup_mutex); 2304 2305 return ret; 2306 } 2307 EXPORT_SYMBOL_GPL(cgroup_path_ns); 2308 2309 /** 2310 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy 2311 * @task: target task 2312 * @buf: the buffer to write the path into 2313 * @buflen: the length of the buffer 2314 * 2315 * Determine @task's cgroup on the first (the one with the lowest non-zero 2316 * hierarchy_id) cgroup hierarchy and copy its path into @buf. This 2317 * function grabs cgroup_mutex and shouldn't be used inside locks used by 2318 * cgroup controller callbacks. 2319 * 2320 * Return value is the same as kernfs_path(). 2321 */ 2322 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) 2323 { 2324 struct cgroup_root *root; 2325 struct cgroup *cgrp; 2326 int hierarchy_id = 1; 2327 int ret; 2328 2329 mutex_lock(&cgroup_mutex); 2330 spin_lock_irq(&css_set_lock); 2331 2332 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); 2333 2334 if (root) { 2335 cgrp = task_cgroup_from_root(task, root); 2336 ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns); 2337 } else { 2338 /* if no hierarchy exists, everyone is in "/" */ 2339 ret = strlcpy(buf, "/", buflen); 2340 } 2341 2342 spin_unlock_irq(&css_set_lock); 2343 mutex_unlock(&cgroup_mutex); 2344 return ret; 2345 } 2346 EXPORT_SYMBOL_GPL(task_cgroup_path); 2347 2348 /** 2349 * cgroup_migrate_add_task - add a migration target task to a migration context 2350 * @task: target task 2351 * @mgctx: target migration context 2352 * 2353 * Add @task, which is a migration target, to @mgctx->tset. This function 2354 * becomes noop if @task doesn't need to be migrated. @task's css_set 2355 * should have been added as a migration source and @task->cg_list will be 2356 * moved from the css_set's tasks list to mg_tasks one. 2357 */ 2358 static void cgroup_migrate_add_task(struct task_struct *task, 2359 struct cgroup_mgctx *mgctx) 2360 { 2361 struct css_set *cset; 2362 2363 lockdep_assert_held(&css_set_lock); 2364 2365 /* @task either already exited or can't exit until the end */ 2366 if (task->flags & PF_EXITING) 2367 return; 2368 2369 /* cgroup_threadgroup_rwsem protects racing against forks */ 2370 WARN_ON_ONCE(list_empty(&task->cg_list)); 2371 2372 cset = task_css_set(task); 2373 if (!cset->mg_src_cgrp) 2374 return; 2375 2376 mgctx->tset.nr_tasks++; 2377 2378 list_move_tail(&task->cg_list, &cset->mg_tasks); 2379 if (list_empty(&cset->mg_node)) 2380 list_add_tail(&cset->mg_node, 2381 &mgctx->tset.src_csets); 2382 if (list_empty(&cset->mg_dst_cset->mg_node)) 2383 list_add_tail(&cset->mg_dst_cset->mg_node, 2384 &mgctx->tset.dst_csets); 2385 } 2386 2387 /** 2388 * cgroup_taskset_first - reset taskset and return the first task 2389 * @tset: taskset of interest 2390 * @dst_cssp: output variable for the destination css 2391 * 2392 * @tset iteration is initialized and the first task is returned. 2393 */ 2394 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 2395 struct cgroup_subsys_state **dst_cssp) 2396 { 2397 tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); 2398 tset->cur_task = NULL; 2399 2400 return cgroup_taskset_next(tset, dst_cssp); 2401 } 2402 2403 /** 2404 * cgroup_taskset_next - iterate to the next task in taskset 2405 * @tset: taskset of interest 2406 * @dst_cssp: output variable for the destination css 2407 * 2408 * Return the next task in @tset. Iteration must have been initialized 2409 * with cgroup_taskset_first(). 2410 */ 2411 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 2412 struct cgroup_subsys_state **dst_cssp) 2413 { 2414 struct css_set *cset = tset->cur_cset; 2415 struct task_struct *task = tset->cur_task; 2416 2417 while (CGROUP_HAS_SUBSYS_CONFIG && &cset->mg_node != tset->csets) { 2418 if (!task) 2419 task = list_first_entry(&cset->mg_tasks, 2420 struct task_struct, cg_list); 2421 else 2422 task = list_next_entry(task, cg_list); 2423 2424 if (&task->cg_list != &cset->mg_tasks) { 2425 tset->cur_cset = cset; 2426 tset->cur_task = task; 2427 2428 /* 2429 * This function may be called both before and 2430 * after cgroup_taskset_migrate(). The two cases 2431 * can be distinguished by looking at whether @cset 2432 * has its ->mg_dst_cset set. 2433 */ 2434 if (cset->mg_dst_cset) 2435 *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid]; 2436 else 2437 *dst_cssp = cset->subsys[tset->ssid]; 2438 2439 return task; 2440 } 2441 2442 cset = list_next_entry(cset, mg_node); 2443 task = NULL; 2444 } 2445 2446 return NULL; 2447 } 2448 2449 /** 2450 * cgroup_migrate_execute - migrate a taskset 2451 * @mgctx: migration context 2452 * 2453 * Migrate tasks in @mgctx as setup by migration preparation functions. 2454 * This function fails iff one of the ->can_attach callbacks fails and 2455 * guarantees that either all or none of the tasks in @mgctx are migrated. 2456 * @mgctx is consumed regardless of success. 2457 */ 2458 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) 2459 { 2460 struct cgroup_taskset *tset = &mgctx->tset; 2461 struct cgroup_subsys *ss; 2462 struct task_struct *task, *tmp_task; 2463 struct css_set *cset, *tmp_cset; 2464 int ssid, failed_ssid, ret; 2465 2466 /* check that we can legitimately attach to the cgroup */ 2467 if (tset->nr_tasks) { 2468 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2469 if (ss->can_attach) { 2470 tset->ssid = ssid; 2471 ret = ss->can_attach(tset); 2472 if (ret) { 2473 failed_ssid = ssid; 2474 goto out_cancel_attach; 2475 } 2476 } 2477 } while_each_subsys_mask(); 2478 } 2479 2480 /* 2481 * Now that we're guaranteed success, proceed to move all tasks to 2482 * the new cgroup. There are no failure cases after here, so this 2483 * is the commit point. 2484 */ 2485 spin_lock_irq(&css_set_lock); 2486 list_for_each_entry(cset, &tset->src_csets, mg_node) { 2487 list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { 2488 struct css_set *from_cset = task_css_set(task); 2489 struct css_set *to_cset = cset->mg_dst_cset; 2490 2491 get_css_set(to_cset); 2492 to_cset->nr_tasks++; 2493 css_set_move_task(task, from_cset, to_cset, true); 2494 from_cset->nr_tasks--; 2495 /* 2496 * If the source or destination cgroup is frozen, 2497 * the task might require to change its state. 2498 */ 2499 cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp, 2500 to_cset->dfl_cgrp); 2501 put_css_set_locked(from_cset); 2502 2503 } 2504 } 2505 spin_unlock_irq(&css_set_lock); 2506 2507 /* 2508 * Migration is committed, all target tasks are now on dst_csets. 2509 * Nothing is sensitive to fork() after this point. Notify 2510 * controllers that migration is complete. 2511 */ 2512 tset->csets = &tset->dst_csets; 2513 2514 if (tset->nr_tasks) { 2515 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2516 if (ss->attach) { 2517 tset->ssid = ssid; 2518 ss->attach(tset); 2519 } 2520 } while_each_subsys_mask(); 2521 } 2522 2523 ret = 0; 2524 goto out_release_tset; 2525 2526 out_cancel_attach: 2527 if (tset->nr_tasks) { 2528 do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { 2529 if (ssid == failed_ssid) 2530 break; 2531 if (ss->cancel_attach) { 2532 tset->ssid = ssid; 2533 ss->cancel_attach(tset); 2534 } 2535 } while_each_subsys_mask(); 2536 } 2537 out_release_tset: 2538 spin_lock_irq(&css_set_lock); 2539 list_splice_init(&tset->dst_csets, &tset->src_csets); 2540 list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { 2541 list_splice_tail_init(&cset->mg_tasks, &cset->tasks); 2542 list_del_init(&cset->mg_node); 2543 } 2544 spin_unlock_irq(&css_set_lock); 2545 2546 /* 2547 * Re-initialize the cgroup_taskset structure in case it is reused 2548 * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() 2549 * iteration. 2550 */ 2551 tset->nr_tasks = 0; 2552 tset->csets = &tset->src_csets; 2553 return ret; 2554 } 2555 2556 /** 2557 * cgroup_migrate_vet_dst - verify whether a cgroup can be migration destination 2558 * @dst_cgrp: destination cgroup to test 2559 * 2560 * On the default hierarchy, except for the mixable, (possible) thread root 2561 * and threaded cgroups, subtree_control must be zero for migration 2562 * destination cgroups with tasks so that child cgroups don't compete 2563 * against tasks. 2564 */ 2565 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) 2566 { 2567 /* v1 doesn't have any restriction */ 2568 if (!cgroup_on_dfl(dst_cgrp)) 2569 return 0; 2570 2571 /* verify @dst_cgrp can host resources */ 2572 if (!cgroup_is_valid_domain(dst_cgrp->dom_cgrp)) 2573 return -EOPNOTSUPP; 2574 2575 /* mixables don't care */ 2576 if (cgroup_is_mixable(dst_cgrp)) 2577 return 0; 2578 2579 /* 2580 * If @dst_cgrp is already or can become a thread root or is 2581 * threaded, it doesn't matter. 2582 */ 2583 if (cgroup_can_be_thread_root(dst_cgrp) || cgroup_is_threaded(dst_cgrp)) 2584 return 0; 2585 2586 /* apply no-internal-process constraint */ 2587 if (dst_cgrp->subtree_control) 2588 return -EBUSY; 2589 2590 return 0; 2591 } 2592 2593 /** 2594 * cgroup_migrate_finish - cleanup after attach 2595 * @mgctx: migration context 2596 * 2597 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See 2598 * those functions for details. 2599 */ 2600 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) 2601 { 2602 struct css_set *cset, *tmp_cset; 2603 2604 lockdep_assert_held(&cgroup_mutex); 2605 2606 spin_lock_irq(&css_set_lock); 2607 2608 list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets, 2609 mg_src_preload_node) { 2610 cset->mg_src_cgrp = NULL; 2611 cset->mg_dst_cgrp = NULL; 2612 cset->mg_dst_cset = NULL; 2613 list_del_init(&cset->mg_src_preload_node); 2614 put_css_set_locked(cset); 2615 } 2616 2617 list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets, 2618 mg_dst_preload_node) { 2619 cset->mg_src_cgrp = NULL; 2620 cset->mg_dst_cgrp = NULL; 2621 cset->mg_dst_cset = NULL; 2622 list_del_init(&cset->mg_dst_preload_node); 2623 put_css_set_locked(cset); 2624 } 2625 2626 spin_unlock_irq(&css_set_lock); 2627 } 2628 2629 /** 2630 * cgroup_migrate_add_src - add a migration source css_set 2631 * @src_cset: the source css_set to add 2632 * @dst_cgrp: the destination cgroup 2633 * @mgctx: migration context 2634 * 2635 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin 2636 * @src_cset and add it to @mgctx->src_csets, which should later be cleaned 2637 * up by cgroup_migrate_finish(). 2638 * 2639 * This function may be called without holding cgroup_threadgroup_rwsem 2640 * even if the target is a process. Threads may be created and destroyed 2641 * but as long as cgroup_mutex is not dropped, no new css_set can be put 2642 * into play and the preloaded css_sets are guaranteed to cover all 2643 * migrations. 2644 */ 2645 void cgroup_migrate_add_src(struct css_set *src_cset, 2646 struct cgroup *dst_cgrp, 2647 struct cgroup_mgctx *mgctx) 2648 { 2649 struct cgroup *src_cgrp; 2650 2651 lockdep_assert_held(&cgroup_mutex); 2652 lockdep_assert_held(&css_set_lock); 2653 2654 /* 2655 * If ->dead, @src_set is associated with one or more dead cgroups 2656 * and doesn't contain any migratable tasks. Ignore it early so 2657 * that the rest of migration path doesn't get confused by it. 2658 */ 2659 if (src_cset->dead) 2660 return; 2661 2662 if (!list_empty(&src_cset->mg_src_preload_node)) 2663 return; 2664 2665 src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); 2666 2667 WARN_ON(src_cset->mg_src_cgrp); 2668 WARN_ON(src_cset->mg_dst_cgrp); 2669 WARN_ON(!list_empty(&src_cset->mg_tasks)); 2670 WARN_ON(!list_empty(&src_cset->mg_node)); 2671 2672 src_cset->mg_src_cgrp = src_cgrp; 2673 src_cset->mg_dst_cgrp = dst_cgrp; 2674 get_css_set(src_cset); 2675 list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets); 2676 } 2677 2678 /** 2679 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration 2680 * @mgctx: migration context 2681 * 2682 * Tasks are about to be moved and all the source css_sets have been 2683 * preloaded to @mgctx->preloaded_src_csets. This function looks up and 2684 * pins all destination css_sets, links each to its source, and append them 2685 * to @mgctx->preloaded_dst_csets. 2686 * 2687 * This function must be called after cgroup_migrate_add_src() has been 2688 * called on each migration source css_set. After migration is performed 2689 * using cgroup_migrate(), cgroup_migrate_finish() must be called on 2690 * @mgctx. 2691 */ 2692 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) 2693 { 2694 struct css_set *src_cset, *tmp_cset; 2695 2696 lockdep_assert_held(&cgroup_mutex); 2697 2698 /* look up the dst cset for each src cset and link it to src */ 2699 list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, 2700 mg_src_preload_node) { 2701 struct css_set *dst_cset; 2702 struct cgroup_subsys *ss; 2703 int ssid; 2704 2705 dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); 2706 if (!dst_cset) 2707 return -ENOMEM; 2708 2709 WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); 2710 2711 /* 2712 * If src cset equals dst, it's noop. Drop the src. 2713 * cgroup_migrate() will skip the cset too. Note that we 2714 * can't handle src == dst as some nodes are used by both. 2715 */ 2716 if (src_cset == dst_cset) { 2717 src_cset->mg_src_cgrp = NULL; 2718 src_cset->mg_dst_cgrp = NULL; 2719 list_del_init(&src_cset->mg_src_preload_node); 2720 put_css_set(src_cset); 2721 put_css_set(dst_cset); 2722 continue; 2723 } 2724 2725 src_cset->mg_dst_cset = dst_cset; 2726 2727 if (list_empty(&dst_cset->mg_dst_preload_node)) 2728 list_add_tail(&dst_cset->mg_dst_preload_node, 2729 &mgctx->preloaded_dst_csets); 2730 else 2731 put_css_set(dst_cset); 2732 2733 for_each_subsys(ss, ssid) 2734 if (src_cset->subsys[ssid] != dst_cset->subsys[ssid]) 2735 mgctx->ss_mask |= 1 << ssid; 2736 } 2737 2738 return 0; 2739 } 2740 2741 /** 2742 * cgroup_migrate - migrate a process or task to a cgroup 2743 * @leader: the leader of the process or the task to migrate 2744 * @threadgroup: whether @leader points to the whole process or a single task 2745 * @mgctx: migration context 2746 * 2747 * Migrate a process or task denoted by @leader. If migrating a process, 2748 * the caller must be holding cgroup_threadgroup_rwsem. The caller is also 2749 * responsible for invoking cgroup_migrate_add_src() and 2750 * cgroup_migrate_prepare_dst() on the targets before invoking this 2751 * function and following up with cgroup_migrate_finish(). 2752 * 2753 * As long as a controller's ->can_attach() doesn't fail, this function is 2754 * guaranteed to succeed. This means that, excluding ->can_attach() 2755 * failure, when migrating multiple targets, the success or failure can be 2756 * decided for all targets by invoking group_migrate_prepare_dst() before 2757 * actually starting migrating. 2758 */ 2759 int cgroup_migrate(struct task_struct *leader, bool threadgroup, 2760 struct cgroup_mgctx *mgctx) 2761 { 2762 struct task_struct *task; 2763 2764 /* 2765 * Prevent freeing of tasks while we take a snapshot. Tasks that are 2766 * already PF_EXITING could be freed from underneath us unless we 2767 * take an rcu_read_lock. 2768 */ 2769 spin_lock_irq(&css_set_lock); 2770 rcu_read_lock(); 2771 task = leader; 2772 do { 2773 cgroup_migrate_add_task(task, mgctx); 2774 if (!threadgroup) 2775 break; 2776 } while_each_thread(leader, task); 2777 rcu_read_unlock(); 2778 spin_unlock_irq(&css_set_lock); 2779 2780 return cgroup_migrate_execute(mgctx); 2781 } 2782 2783 /** 2784 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup 2785 * @dst_cgrp: the cgroup to attach to 2786 * @leader: the task or the leader of the threadgroup to be attached 2787 * @threadgroup: attach the whole threadgroup? 2788 * 2789 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. 2790 */ 2791 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, 2792 bool threadgroup) 2793 { 2794 DEFINE_CGROUP_MGCTX(mgctx); 2795 struct task_struct *task; 2796 int ret = 0; 2797 2798 /* look up all src csets */ 2799 spin_lock_irq(&css_set_lock); 2800 rcu_read_lock(); 2801 task = leader; 2802 do { 2803 cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); 2804 if (!threadgroup) 2805 break; 2806 } while_each_thread(leader, task); 2807 rcu_read_unlock(); 2808 spin_unlock_irq(&css_set_lock); 2809 2810 /* prepare dst csets and commit */ 2811 ret = cgroup_migrate_prepare_dst(&mgctx); 2812 if (!ret) 2813 ret = cgroup_migrate(leader, threadgroup, &mgctx); 2814 2815 cgroup_migrate_finish(&mgctx); 2816 2817 if (!ret) 2818 TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); 2819 2820 return ret; 2821 } 2822 2823 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, 2824 bool *locked) 2825 __acquires(&cgroup_threadgroup_rwsem) 2826 { 2827 struct task_struct *tsk; 2828 pid_t pid; 2829 2830 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2831 return ERR_PTR(-EINVAL); 2832 2833 /* 2834 * If we migrate a single thread, we don't care about threadgroup 2835 * stability. If the thread is `current`, it won't exit(2) under our 2836 * hands or change PID through exec(2). We exclude 2837 * cgroup_update_dfl_csses and other cgroup_{proc,thread}s_write 2838 * callers by cgroup_mutex. 2839 * Therefore, we can skip the global lock. 2840 */ 2841 lockdep_assert_held(&cgroup_mutex); 2842 if (pid || threadgroup) { 2843 percpu_down_write(&cgroup_threadgroup_rwsem); 2844 *locked = true; 2845 } else { 2846 *locked = false; 2847 } 2848 2849 rcu_read_lock(); 2850 if (pid) { 2851 tsk = find_task_by_vpid(pid); 2852 if (!tsk) { 2853 tsk = ERR_PTR(-ESRCH); 2854 goto out_unlock_threadgroup; 2855 } 2856 } else { 2857 tsk = current; 2858 } 2859 2860 if (threadgroup) 2861 tsk = tsk->group_leader; 2862 2863 /* 2864 * kthreads may acquire PF_NO_SETAFFINITY during initialization. 2865 * If userland migrates such a kthread to a non-root cgroup, it can 2866 * become trapped in a cpuset, or RT kthread may be born in a 2867 * cgroup with no rt_runtime allocated. Just say no. 2868 */ 2869 if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { 2870 tsk = ERR_PTR(-EINVAL); 2871 goto out_unlock_threadgroup; 2872 } 2873 2874 get_task_struct(tsk); 2875 goto out_unlock_rcu; 2876 2877 out_unlock_threadgroup: 2878 if (*locked) { 2879 percpu_up_write(&cgroup_threadgroup_rwsem); 2880 *locked = false; 2881 } 2882 out_unlock_rcu: 2883 rcu_read_unlock(); 2884 return tsk; 2885 } 2886 2887 void cgroup_procs_write_finish(struct task_struct *task, bool locked) 2888 __releases(&cgroup_threadgroup_rwsem) 2889 { 2890 struct cgroup_subsys *ss; 2891 int ssid; 2892 2893 /* release reference from cgroup_procs_write_start() */ 2894 put_task_struct(task); 2895 2896 if (locked) 2897 percpu_up_write(&cgroup_threadgroup_rwsem); 2898 for_each_subsys(ss, ssid) 2899 if (ss->post_attach) 2900 ss->post_attach(); 2901 } 2902 2903 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask) 2904 { 2905 struct cgroup_subsys *ss; 2906 bool printed = false; 2907 int ssid; 2908 2909 do_each_subsys_mask(ss, ssid, ss_mask) { 2910 if (printed) 2911 seq_putc(seq, ' '); 2912 seq_puts(seq, ss->name); 2913 printed = true; 2914 } while_each_subsys_mask(); 2915 if (printed) 2916 seq_putc(seq, '\n'); 2917 } 2918 2919 /* show controllers which are enabled from the parent */ 2920 static int cgroup_controllers_show(struct seq_file *seq, void *v) 2921 { 2922 struct cgroup *cgrp = seq_css(seq)->cgroup; 2923 2924 cgroup_print_ss_mask(seq, cgroup_control(cgrp)); 2925 return 0; 2926 } 2927 2928 /* show controllers which are enabled for a given cgroup's children */ 2929 static int cgroup_subtree_control_show(struct seq_file *seq, void *v) 2930 { 2931 struct cgroup *cgrp = seq_css(seq)->cgroup; 2932 2933 cgroup_print_ss_mask(seq, cgrp->subtree_control); 2934 return 0; 2935 } 2936 2937 /** 2938 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy 2939 * @cgrp: root of the subtree to update csses for 2940 * 2941 * @cgrp's control masks have changed and its subtree's css associations 2942 * need to be updated accordingly. This function looks up all css_sets 2943 * which are attached to the subtree, creates the matching updated css_sets 2944 * and migrates the tasks to the new ones. 2945 */ 2946 static int cgroup_update_dfl_csses(struct cgroup *cgrp) 2947 { 2948 DEFINE_CGROUP_MGCTX(mgctx); 2949 struct cgroup_subsys_state *d_css; 2950 struct cgroup *dsct; 2951 struct css_set *src_cset; 2952 int ret; 2953 2954 lockdep_assert_held(&cgroup_mutex); 2955 2956 percpu_down_write(&cgroup_threadgroup_rwsem); 2957 2958 /* look up all csses currently attached to @cgrp's subtree */ 2959 spin_lock_irq(&css_set_lock); 2960 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 2961 struct cgrp_cset_link *link; 2962 2963 list_for_each_entry(link, &dsct->cset_links, cset_link) 2964 cgroup_migrate_add_src(link->cset, dsct, &mgctx); 2965 } 2966 spin_unlock_irq(&css_set_lock); 2967 2968 /* NULL dst indicates self on default hierarchy */ 2969 ret = cgroup_migrate_prepare_dst(&mgctx); 2970 if (ret) 2971 goto out_finish; 2972 2973 spin_lock_irq(&css_set_lock); 2974 list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, 2975 mg_src_preload_node) { 2976 struct task_struct *task, *ntask; 2977 2978 /* all tasks in src_csets need to be migrated */ 2979 list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) 2980 cgroup_migrate_add_task(task, &mgctx); 2981 } 2982 spin_unlock_irq(&css_set_lock); 2983 2984 ret = cgroup_migrate_execute(&mgctx); 2985 out_finish: 2986 cgroup_migrate_finish(&mgctx); 2987 percpu_up_write(&cgroup_threadgroup_rwsem); 2988 return ret; 2989 } 2990 2991 /** 2992 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses 2993 * @cgrp: root of the target subtree 2994 * 2995 * Because css offlining is asynchronous, userland may try to re-enable a 2996 * controller while the previous css is still around. This function grabs 2997 * cgroup_mutex and drains the previous css instances of @cgrp's subtree. 2998 */ 2999 void cgroup_lock_and_drain_offline(struct cgroup *cgrp) 3000 __acquires(&cgroup_mutex) 3001 { 3002 struct cgroup *dsct; 3003 struct cgroup_subsys_state *d_css; 3004 struct cgroup_subsys *ss; 3005 int ssid; 3006 3007 restart: 3008 mutex_lock(&cgroup_mutex); 3009 3010 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3011 for_each_subsys(ss, ssid) { 3012 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3013 DEFINE_WAIT(wait); 3014 3015 if (!css || !percpu_ref_is_dying(&css->refcnt)) 3016 continue; 3017 3018 cgroup_get_live(dsct); 3019 prepare_to_wait(&dsct->offline_waitq, &wait, 3020 TASK_UNINTERRUPTIBLE); 3021 3022 mutex_unlock(&cgroup_mutex); 3023 schedule(); 3024 finish_wait(&dsct->offline_waitq, &wait); 3025 3026 cgroup_put(dsct); 3027 goto restart; 3028 } 3029 } 3030 } 3031 3032 /** 3033 * cgroup_save_control - save control masks and dom_cgrp of a subtree 3034 * @cgrp: root of the target subtree 3035 * 3036 * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the 3037 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 3038 * itself. 3039 */ 3040 static void cgroup_save_control(struct cgroup *cgrp) 3041 { 3042 struct cgroup *dsct; 3043 struct cgroup_subsys_state *d_css; 3044 3045 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3046 dsct->old_subtree_control = dsct->subtree_control; 3047 dsct->old_subtree_ss_mask = dsct->subtree_ss_mask; 3048 dsct->old_dom_cgrp = dsct->dom_cgrp; 3049 } 3050 } 3051 3052 /** 3053 * cgroup_propagate_control - refresh control masks of a subtree 3054 * @cgrp: root of the target subtree 3055 * 3056 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches 3057 * ->subtree_control and propagate controller availability through the 3058 * subtree so that descendants don't have unavailable controllers enabled. 3059 */ 3060 static void cgroup_propagate_control(struct cgroup *cgrp) 3061 { 3062 struct cgroup *dsct; 3063 struct cgroup_subsys_state *d_css; 3064 3065 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3066 dsct->subtree_control &= cgroup_control(dsct); 3067 dsct->subtree_ss_mask = 3068 cgroup_calc_subtree_ss_mask(dsct->subtree_control, 3069 cgroup_ss_mask(dsct)); 3070 } 3071 } 3072 3073 /** 3074 * cgroup_restore_control - restore control masks and dom_cgrp of a subtree 3075 * @cgrp: root of the target subtree 3076 * 3077 * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the 3078 * respective old_ prefixed fields for @cgrp's subtree including @cgrp 3079 * itself. 3080 */ 3081 static void cgroup_restore_control(struct cgroup *cgrp) 3082 { 3083 struct cgroup *dsct; 3084 struct cgroup_subsys_state *d_css; 3085 3086 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3087 dsct->subtree_control = dsct->old_subtree_control; 3088 dsct->subtree_ss_mask = dsct->old_subtree_ss_mask; 3089 dsct->dom_cgrp = dsct->old_dom_cgrp; 3090 } 3091 } 3092 3093 static bool css_visible(struct cgroup_subsys_state *css) 3094 { 3095 struct cgroup_subsys *ss = css->ss; 3096 struct cgroup *cgrp = css->cgroup; 3097 3098 if (cgroup_control(cgrp) & (1 << ss->id)) 3099 return true; 3100 if (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) 3101 return false; 3102 return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl; 3103 } 3104 3105 /** 3106 * cgroup_apply_control_enable - enable or show csses according to control 3107 * @cgrp: root of the target subtree 3108 * 3109 * Walk @cgrp's subtree and create new csses or make the existing ones 3110 * visible. A css is created invisible if it's being implicitly enabled 3111 * through dependency. An invisible css is made visible when the userland 3112 * explicitly enables it. 3113 * 3114 * Returns 0 on success, -errno on failure. On failure, csses which have 3115 * been processed already aren't cleaned up. The caller is responsible for 3116 * cleaning up with cgroup_apply_control_disable(). 3117 */ 3118 static int cgroup_apply_control_enable(struct cgroup *cgrp) 3119 { 3120 struct cgroup *dsct; 3121 struct cgroup_subsys_state *d_css; 3122 struct cgroup_subsys *ss; 3123 int ssid, ret; 3124 3125 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { 3126 for_each_subsys(ss, ssid) { 3127 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3128 3129 if (!(cgroup_ss_mask(dsct) & (1 << ss->id))) 3130 continue; 3131 3132 if (!css) { 3133 css = css_create(dsct, ss); 3134 if (IS_ERR(css)) 3135 return PTR_ERR(css); 3136 } 3137 3138 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); 3139 3140 if (css_visible(css)) { 3141 ret = css_populate_dir(css); 3142 if (ret) 3143 return ret; 3144 } 3145 } 3146 } 3147 3148 return 0; 3149 } 3150 3151 /** 3152 * cgroup_apply_control_disable - kill or hide csses according to control 3153 * @cgrp: root of the target subtree 3154 * 3155 * Walk @cgrp's subtree and kill and hide csses so that they match 3156 * cgroup_ss_mask() and cgroup_visible_mask(). 3157 * 3158 * A css is hidden when the userland requests it to be disabled while other 3159 * subsystems are still depending on it. The css must not actively control 3160 * resources and be in the vanilla state if it's made visible again later. 3161 * Controllers which may be depended upon should provide ->css_reset() for 3162 * this purpose. 3163 */ 3164 static void cgroup_apply_control_disable(struct cgroup *cgrp) 3165 { 3166 struct cgroup *dsct; 3167 struct cgroup_subsys_state *d_css; 3168 struct cgroup_subsys *ss; 3169 int ssid; 3170 3171 cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) { 3172 for_each_subsys(ss, ssid) { 3173 struct cgroup_subsys_state *css = cgroup_css(dsct, ss); 3174 3175 if (!css) 3176 continue; 3177 3178 WARN_ON_ONCE(percpu_ref_is_dying(&css->refcnt)); 3179 3180 if (css->parent && 3181 !(cgroup_ss_mask(dsct) & (1 << ss->id))) { 3182 kill_css(css); 3183 } else if (!css_visible(css)) { 3184 css_clear_dir(css); 3185 if (ss->css_reset) 3186 ss->css_reset(css); 3187 } 3188 } 3189 } 3190 } 3191 3192 /** 3193 * cgroup_apply_control - apply control mask updates to the subtree 3194 * @cgrp: root of the target subtree 3195 * 3196 * subsystems can be enabled and disabled in a subtree using the following 3197 * steps. 3198 * 3199 * 1. Call cgroup_save_control() to stash the current state. 3200 * 2. Update ->subtree_control masks in the subtree as desired. 3201 * 3. Call cgroup_apply_control() to apply the changes. 3202 * 4. Optionally perform other related operations. 3203 * 5. Call cgroup_finalize_control() to finish up. 3204 * 3205 * This function implements step 3 and propagates the mask changes 3206 * throughout @cgrp's subtree, updates csses accordingly and perform 3207 * process migrations. 3208 */ 3209 static int cgroup_apply_control(struct cgroup *cgrp) 3210 { 3211 int ret; 3212 3213 cgroup_propagate_control(cgrp); 3214 3215 ret = cgroup_apply_control_enable(cgrp); 3216 if (ret) 3217 return ret; 3218 3219 /* 3220 * At this point, cgroup_e_css_by_mask() results reflect the new csses 3221 * making the following cgroup_update_dfl_csses() properly update 3222 * css associations of all tasks in the subtree. 3223 */ 3224 ret = cgroup_update_dfl_csses(cgrp); 3225 if (ret) 3226 return ret; 3227 3228 return 0; 3229 } 3230 3231 /** 3232 * cgroup_finalize_control - finalize control mask update 3233 * @cgrp: root of the target subtree 3234 * @ret: the result of the update 3235 * 3236 * Finalize control mask update. See cgroup_apply_control() for more info. 3237 */ 3238 static void cgroup_finalize_control(struct cgroup *cgrp, int ret) 3239 { 3240 if (ret) { 3241 cgroup_restore_control(cgrp); 3242 cgroup_propagate_control(cgrp); 3243 } 3244 3245 cgroup_apply_control_disable(cgrp); 3246 } 3247 3248 static int cgroup_vet_subtree_control_enable(struct cgroup *cgrp, u16 enable) 3249 { 3250 u16 domain_enable = enable & ~cgrp_dfl_threaded_ss_mask; 3251 3252 /* if nothing is getting enabled, nothing to worry about */ 3253 if (!enable) 3254 return 0; 3255 3256 /* can @cgrp host any resources? */ 3257 if (!cgroup_is_valid_domain(cgrp->dom_cgrp)) 3258 return -EOPNOTSUPP; 3259 3260 /* mixables don't care */ 3261 if (cgroup_is_mixable(cgrp)) 3262 return 0; 3263 3264 if (domain_enable) { 3265 /* can't enable domain controllers inside a thread subtree */ 3266 if (cgroup_is_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3267 return -EOPNOTSUPP; 3268 } else { 3269 /* 3270 * Threaded controllers can handle internal competitions 3271 * and are always allowed inside a (prospective) thread 3272 * subtree. 3273 */ 3274 if (cgroup_can_be_thread_root(cgrp) || cgroup_is_threaded(cgrp)) 3275 return 0; 3276 } 3277 3278 /* 3279 * Controllers can't be enabled for a cgroup with tasks to avoid 3280 * child cgroups competing against tasks. 3281 */ 3282 if (cgroup_has_tasks(cgrp)) 3283 return -EBUSY; 3284 3285 return 0; 3286 } 3287 3288 /* change the enabled child controllers for a cgroup in the default hierarchy */ 3289 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, 3290 char *buf, size_t nbytes, 3291 loff_t off) 3292 { 3293 u16 enable = 0, disable = 0; 3294 struct cgroup *cgrp, *child; 3295 struct cgroup_subsys *ss; 3296 char *tok; 3297 int ssid, ret; 3298 3299 /* 3300 * Parse input - space separated list of subsystem names prefixed 3301 * with either + or -. 3302 */ 3303 buf = strstrip(buf); 3304 while ((tok = strsep(&buf, " "))) { 3305 if (tok[0] == '\0') 3306 continue; 3307 do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) { 3308 if (!cgroup_ssid_enabled(ssid) || 3309 strcmp(tok + 1, ss->name)) 3310 continue; 3311 3312 if (*tok == '+') { 3313 enable |= 1 << ssid; 3314 disable &= ~(1 << ssid); 3315 } else if (*tok == '-') { 3316 disable |= 1 << ssid; 3317 enable &= ~(1 << ssid); 3318 } else { 3319 return -EINVAL; 3320 } 3321 break; 3322 } while_each_subsys_mask(); 3323 if (ssid == CGROUP_SUBSYS_COUNT) 3324 return -EINVAL; 3325 } 3326 3327 cgrp = cgroup_kn_lock_live(of->kn, true); 3328 if (!cgrp) 3329 return -ENODEV; 3330 3331 for_each_subsys(ss, ssid) { 3332 if (enable & (1 << ssid)) { 3333 if (cgrp->subtree_control & (1 << ssid)) { 3334 enable &= ~(1 << ssid); 3335 continue; 3336 } 3337 3338 if (!(cgroup_control(cgrp) & (1 << ssid))) { 3339 ret = -ENOENT; 3340 goto out_unlock; 3341 } 3342 } else if (disable & (1 << ssid)) { 3343 if (!(cgrp->subtree_control & (1 << ssid))) { 3344 disable &= ~(1 << ssid); 3345 continue; 3346 } 3347 3348 /* a child has it enabled? */ 3349 cgroup_for_each_live_child(child, cgrp) { 3350 if (child->subtree_control & (1 << ssid)) { 3351 ret = -EBUSY; 3352 goto out_unlock; 3353 } 3354 } 3355 } 3356 } 3357 3358 if (!enable && !disable) { 3359 ret = 0; 3360 goto out_unlock; 3361 } 3362 3363 ret = cgroup_vet_subtree_control_enable(cgrp, enable); 3364 if (ret) 3365 goto out_unlock; 3366 3367 /* save and update control masks and prepare csses */ 3368 cgroup_save_control(cgrp); 3369 3370 cgrp->subtree_control |= enable; 3371 cgrp->subtree_control &= ~disable; 3372 3373 ret = cgroup_apply_control(cgrp); 3374 cgroup_finalize_control(cgrp, ret); 3375 if (ret) 3376 goto out_unlock; 3377 3378 kernfs_activate(cgrp->kn); 3379 out_unlock: 3380 cgroup_kn_unlock(of->kn); 3381 return ret ?: nbytes; 3382 } 3383 3384 /** 3385 * cgroup_enable_threaded - make @cgrp threaded 3386 * @cgrp: the target cgroup 3387 * 3388 * Called when "threaded" is written to the cgroup.type interface file and 3389 * tries to make @cgrp threaded and join the parent's resource domain. 3390 * This function is never called on the root cgroup as cgroup.type doesn't 3391 * exist on it. 3392 */ 3393 static int cgroup_enable_threaded(struct cgroup *cgrp) 3394 { 3395 struct cgroup *parent = cgroup_parent(cgrp); 3396 struct cgroup *dom_cgrp = parent->dom_cgrp; 3397 struct cgroup *dsct; 3398 struct cgroup_subsys_state *d_css; 3399 int ret; 3400 3401 lockdep_assert_held(&cgroup_mutex); 3402 3403 /* noop if already threaded */ 3404 if (cgroup_is_threaded(cgrp)) 3405 return 0; 3406 3407 /* 3408 * If @cgroup is populated or has domain controllers enabled, it 3409 * can't be switched. While the below cgroup_can_be_thread_root() 3410 * test can catch the same conditions, that's only when @parent is 3411 * not mixable, so let's check it explicitly. 3412 */ 3413 if (cgroup_is_populated(cgrp) || 3414 cgrp->subtree_control & ~cgrp_dfl_threaded_ss_mask) 3415 return -EOPNOTSUPP; 3416 3417 /* we're joining the parent's domain, ensure its validity */ 3418 if (!cgroup_is_valid_domain(dom_cgrp) || 3419 !cgroup_can_be_thread_root(dom_cgrp)) 3420 return -EOPNOTSUPP; 3421 3422 /* 3423 * The following shouldn't cause actual migrations and should 3424 * always succeed. 3425 */ 3426 cgroup_save_control(cgrp); 3427 3428 cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) 3429 if (dsct == cgrp || cgroup_is_threaded(dsct)) 3430 dsct->dom_cgrp = dom_cgrp; 3431 3432 ret = cgroup_apply_control(cgrp); 3433 if (!ret) 3434 parent->nr_threaded_children++; 3435 3436 cgroup_finalize_control(cgrp, ret); 3437 return ret; 3438 } 3439 3440 static int cgroup_type_show(struct seq_file *seq, void *v) 3441 { 3442 struct cgroup *cgrp = seq_css(seq)->cgroup; 3443 3444 if (cgroup_is_threaded(cgrp)) 3445 seq_puts(seq, "threaded\n"); 3446 else if (!cgroup_is_valid_domain(cgrp)) 3447 seq_puts(seq, "domain invalid\n"); 3448 else if (cgroup_is_thread_root(cgrp)) 3449 seq_puts(seq, "domain threaded\n"); 3450 else 3451 seq_puts(seq, "domain\n"); 3452 3453 return 0; 3454 } 3455 3456 static ssize_t cgroup_type_write(struct kernfs_open_file *of, char *buf, 3457 size_t nbytes, loff_t off) 3458 { 3459 struct cgroup *cgrp; 3460 int ret; 3461 3462 /* only switching to threaded mode is supported */ 3463 if (strcmp(strstrip(buf), "threaded")) 3464 return -EINVAL; 3465 3466 /* drain dying csses before we re-apply (threaded) subtree control */ 3467 cgrp = cgroup_kn_lock_live(of->kn, true); 3468 if (!cgrp) 3469 return -ENOENT; 3470 3471 /* threaded can only be enabled */ 3472 ret = cgroup_enable_threaded(cgrp); 3473 3474 cgroup_kn_unlock(of->kn); 3475 return ret ?: nbytes; 3476 } 3477 3478 static int cgroup_max_descendants_show(struct seq_file *seq, void *v) 3479 { 3480 struct cgroup *cgrp = seq_css(seq)->cgroup; 3481 int descendants = READ_ONCE(cgrp->max_descendants); 3482 3483 if (descendants == INT_MAX) 3484 seq_puts(seq, "max\n"); 3485 else 3486 seq_printf(seq, "%d\n", descendants); 3487 3488 return 0; 3489 } 3490 3491 static ssize_t cgroup_max_descendants_write(struct kernfs_open_file *of, 3492 char *buf, size_t nbytes, loff_t off) 3493 { 3494 struct cgroup *cgrp; 3495 int descendants; 3496 ssize_t ret; 3497 3498 buf = strstrip(buf); 3499 if (!strcmp(buf, "max")) { 3500 descendants = INT_MAX; 3501 } else { 3502 ret = kstrtoint(buf, 0, &descendants); 3503 if (ret) 3504 return ret; 3505 } 3506 3507 if (descendants < 0) 3508 return -ERANGE; 3509 3510 cgrp = cgroup_kn_lock_live(of->kn, false); 3511 if (!cgrp) 3512 return -ENOENT; 3513 3514 cgrp->max_descendants = descendants; 3515 3516 cgroup_kn_unlock(of->kn); 3517 3518 return nbytes; 3519 } 3520 3521 static int cgroup_max_depth_show(struct seq_file *seq, void *v) 3522 { 3523 struct cgroup *cgrp = seq_css(seq)->cgroup; 3524 int depth = READ_ONCE(cgrp->max_depth); 3525 3526 if (depth == INT_MAX) 3527 seq_puts(seq, "max\n"); 3528 else 3529 seq_printf(seq, "%d\n", depth); 3530 3531 return 0; 3532 } 3533 3534 static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, 3535 char *buf, size_t nbytes, loff_t off) 3536 { 3537 struct cgroup *cgrp; 3538 ssize_t ret; 3539 int depth; 3540 3541 buf = strstrip(buf); 3542 if (!strcmp(buf, "max")) { 3543 depth = INT_MAX; 3544 } else { 3545 ret = kstrtoint(buf, 0, &depth); 3546 if (ret) 3547 return ret; 3548 } 3549 3550 if (depth < 0) 3551 return -ERANGE; 3552 3553 cgrp = cgroup_kn_lock_live(of->kn, false); 3554 if (!cgrp) 3555 return -ENOENT; 3556 3557 cgrp->max_depth = depth; 3558 3559 cgroup_kn_unlock(of->kn); 3560 3561 return nbytes; 3562 } 3563 3564 static int cgroup_events_show(struct seq_file *seq, void *v) 3565 { 3566 struct cgroup *cgrp = seq_css(seq)->cgroup; 3567 3568 seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp)); 3569 seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags)); 3570 3571 return 0; 3572 } 3573 3574 static int cgroup_stat_show(struct seq_file *seq, void *v) 3575 { 3576 struct cgroup *cgroup = seq_css(seq)->cgroup; 3577 3578 seq_printf(seq, "nr_descendants %d\n", 3579 cgroup->nr_descendants); 3580 seq_printf(seq, "nr_dying_descendants %d\n", 3581 cgroup->nr_dying_descendants); 3582 3583 return 0; 3584 } 3585 3586 static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq, 3587 struct cgroup *cgrp, int ssid) 3588 { 3589 struct cgroup_subsys *ss = cgroup_subsys[ssid]; 3590 struct cgroup_subsys_state *css; 3591 int ret; 3592 3593 if (!ss->css_extra_stat_show) 3594 return 0; 3595 3596 css = cgroup_tryget_css(cgrp, ss); 3597 if (!css) 3598 return 0; 3599 3600 ret = ss->css_extra_stat_show(seq, css); 3601 css_put(css); 3602 return ret; 3603 } 3604 3605 static int cpu_stat_show(struct seq_file *seq, void *v) 3606 { 3607 struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup; 3608 int ret = 0; 3609 3610 cgroup_base_stat_cputime_show(seq); 3611 #ifdef CONFIG_CGROUP_SCHED 3612 ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id); 3613 #endif 3614 return ret; 3615 } 3616 3617 #ifdef CONFIG_PSI 3618 static int cgroup_io_pressure_show(struct seq_file *seq, void *v) 3619 { 3620 struct cgroup *cgrp = seq_css(seq)->cgroup; 3621 struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; 3622 3623 return psi_show(seq, psi, PSI_IO); 3624 } 3625 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v) 3626 { 3627 struct cgroup *cgrp = seq_css(seq)->cgroup; 3628 struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; 3629 3630 return psi_show(seq, psi, PSI_MEM); 3631 } 3632 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v) 3633 { 3634 struct cgroup *cgrp = seq_css(seq)->cgroup; 3635 struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; 3636 3637 return psi_show(seq, psi, PSI_CPU); 3638 } 3639 3640 static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, 3641 size_t nbytes, enum psi_res res) 3642 { 3643 struct cgroup_file_ctx *ctx = of->priv; 3644 struct psi_trigger *new; 3645 struct cgroup *cgrp; 3646 struct psi_group *psi; 3647 3648 cgrp = cgroup_kn_lock_live(of->kn, false); 3649 if (!cgrp) 3650 return -ENODEV; 3651 3652 cgroup_get(cgrp); 3653 cgroup_kn_unlock(of->kn); 3654 3655 /* Allow only one trigger per file descriptor */ 3656 if (ctx->psi.trigger) { 3657 cgroup_put(cgrp); 3658 return -EBUSY; 3659 } 3660 3661 psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; 3662 new = psi_trigger_create(psi, buf, nbytes, res); 3663 if (IS_ERR(new)) { 3664 cgroup_put(cgrp); 3665 return PTR_ERR(new); 3666 } 3667 3668 smp_store_release(&ctx->psi.trigger, new); 3669 cgroup_put(cgrp); 3670 3671 return nbytes; 3672 } 3673 3674 static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of, 3675 char *buf, size_t nbytes, 3676 loff_t off) 3677 { 3678 return cgroup_pressure_write(of, buf, nbytes, PSI_IO); 3679 } 3680 3681 static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of, 3682 char *buf, size_t nbytes, 3683 loff_t off) 3684 { 3685 return cgroup_pressure_write(of, buf, nbytes, PSI_MEM); 3686 } 3687 3688 static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of, 3689 char *buf, size_t nbytes, 3690 loff_t off) 3691 { 3692 return cgroup_pressure_write(of, buf, nbytes, PSI_CPU); 3693 } 3694 3695 static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of, 3696 poll_table *pt) 3697 { 3698 struct cgroup_file_ctx *ctx = of->priv; 3699 3700 return psi_trigger_poll(&ctx->psi.trigger, of->file, pt); 3701 } 3702 3703 static void cgroup_pressure_release(struct kernfs_open_file *of) 3704 { 3705 struct cgroup_file_ctx *ctx = of->priv; 3706 3707 psi_trigger_destroy(ctx->psi.trigger); 3708 } 3709 3710 bool cgroup_psi_enabled(void) 3711 { 3712 return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0; 3713 } 3714 3715 #else /* CONFIG_PSI */ 3716 bool cgroup_psi_enabled(void) 3717 { 3718 return false; 3719 } 3720 3721 #endif /* CONFIG_PSI */ 3722 3723 static int cgroup_freeze_show(struct seq_file *seq, void *v) 3724 { 3725 struct cgroup *cgrp = seq_css(seq)->cgroup; 3726 3727 seq_printf(seq, "%d\n", cgrp->freezer.freeze); 3728 3729 return 0; 3730 } 3731 3732 static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, 3733 char *buf, size_t nbytes, loff_t off) 3734 { 3735 struct cgroup *cgrp; 3736 ssize_t ret; 3737 int freeze; 3738 3739 ret = kstrtoint(strstrip(buf), 0, &freeze); 3740 if (ret) 3741 return ret; 3742 3743 if (freeze < 0 || freeze > 1) 3744 return -ERANGE; 3745 3746 cgrp = cgroup_kn_lock_live(of->kn, false); 3747 if (!cgrp) 3748 return -ENOENT; 3749 3750 cgroup_freeze(cgrp, freeze); 3751 3752 cgroup_kn_unlock(of->kn); 3753 3754 return nbytes; 3755 } 3756 3757 static void __cgroup_kill(struct cgroup *cgrp) 3758 { 3759 struct css_task_iter it; 3760 struct task_struct *task; 3761 3762 lockdep_assert_held(&cgroup_mutex); 3763 3764 spin_lock_irq(&css_set_lock); 3765 set_bit(CGRP_KILL, &cgrp->flags); 3766 spin_unlock_irq(&css_set_lock); 3767 3768 css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); 3769 while ((task = css_task_iter_next(&it))) { 3770 /* Ignore kernel threads here. */ 3771 if (task->flags & PF_KTHREAD) 3772 continue; 3773 3774 /* Skip tasks that are already dying. */ 3775 if (__fatal_signal_pending(task)) 3776 continue; 3777 3778 send_sig(SIGKILL, task, 0); 3779 } 3780 css_task_iter_end(&it); 3781 3782 spin_lock_irq(&css_set_lock); 3783 clear_bit(CGRP_KILL, &cgrp->flags); 3784 spin_unlock_irq(&css_set_lock); 3785 } 3786 3787 static void cgroup_kill(struct cgroup *cgrp) 3788 { 3789 struct cgroup_subsys_state *css; 3790 struct cgroup *dsct; 3791 3792 lockdep_assert_held(&cgroup_mutex); 3793 3794 cgroup_for_each_live_descendant_pre(dsct, css, cgrp) 3795 __cgroup_kill(dsct); 3796 } 3797 3798 static ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, 3799 size_t nbytes, loff_t off) 3800 { 3801 ssize_t ret = 0; 3802 int kill; 3803 struct cgroup *cgrp; 3804 3805 ret = kstrtoint(strstrip(buf), 0, &kill); 3806 if (ret) 3807 return ret; 3808 3809 if (kill != 1) 3810 return -ERANGE; 3811 3812 cgrp = cgroup_kn_lock_live(of->kn, false); 3813 if (!cgrp) 3814 return -ENOENT; 3815 3816 /* 3817 * Killing is a process directed operation, i.e. the whole thread-group 3818 * is taken down so act like we do for cgroup.procs and only make this 3819 * writable in non-threaded cgroups. 3820 */ 3821 if (cgroup_is_threaded(cgrp)) 3822 ret = -EOPNOTSUPP; 3823 else 3824 cgroup_kill(cgrp); 3825 3826 cgroup_kn_unlock(of->kn); 3827 3828 return ret ?: nbytes; 3829 } 3830 3831 static int cgroup_file_open(struct kernfs_open_file *of) 3832 { 3833 struct cftype *cft = of_cft(of); 3834 struct cgroup_file_ctx *ctx; 3835 int ret; 3836 3837 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 3838 if (!ctx) 3839 return -ENOMEM; 3840 3841 ctx->ns = current->nsproxy->cgroup_ns; 3842 get_cgroup_ns(ctx->ns); 3843 of->priv = ctx; 3844 3845 if (!cft->open) 3846 return 0; 3847 3848 ret = cft->open(of); 3849 if (ret) { 3850 put_cgroup_ns(ctx->ns); 3851 kfree(ctx); 3852 } 3853 return ret; 3854 } 3855 3856 static void cgroup_file_release(struct kernfs_open_file *of) 3857 { 3858 struct cftype *cft = of_cft(of); 3859 struct cgroup_file_ctx *ctx = of->priv; 3860 3861 if (cft->release) 3862 cft->release(of); 3863 put_cgroup_ns(ctx->ns); 3864 kfree(ctx); 3865 } 3866 3867 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, 3868 size_t nbytes, loff_t off) 3869 { 3870 struct cgroup_file_ctx *ctx = of->priv; 3871 struct cgroup *cgrp = of->kn->parent->priv; 3872 struct cftype *cft = of_cft(of); 3873 struct cgroup_subsys_state *css; 3874 int ret; 3875 3876 if (!nbytes) 3877 return 0; 3878 3879 /* 3880 * If namespaces are delegation boundaries, disallow writes to 3881 * files in an non-init namespace root from inside the namespace 3882 * except for the files explicitly marked delegatable - 3883 * cgroup.procs and cgroup.subtree_control. 3884 */ 3885 if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) && 3886 !(cft->flags & CFTYPE_NS_DELEGATABLE) && 3887 ctx->ns != &init_cgroup_ns && ctx->ns->root_cset->dfl_cgrp == cgrp) 3888 return -EPERM; 3889 3890 if (cft->write) 3891 return cft->write(of, buf, nbytes, off); 3892 3893 /* 3894 * kernfs guarantees that a file isn't deleted with operations in 3895 * flight, which means that the matching css is and stays alive and 3896 * doesn't need to be pinned. The RCU locking is not necessary 3897 * either. It's just for the convenience of using cgroup_css(). 3898 */ 3899 rcu_read_lock(); 3900 css = cgroup_css(cgrp, cft->ss); 3901 rcu_read_unlock(); 3902 3903 if (cft->write_u64) { 3904 unsigned long long v; 3905 ret = kstrtoull(buf, 0, &v); 3906 if (!ret) 3907 ret = cft->write_u64(css, cft, v); 3908 } else if (cft->write_s64) { 3909 long long v; 3910 ret = kstrtoll(buf, 0, &v); 3911 if (!ret) 3912 ret = cft->write_s64(css, cft, v); 3913 } else { 3914 ret = -EINVAL; 3915 } 3916 3917 return ret ?: nbytes; 3918 } 3919 3920 static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt) 3921 { 3922 struct cftype *cft = of_cft(of); 3923 3924 if (cft->poll) 3925 return cft->poll(of, pt); 3926 3927 return kernfs_generic_poll(of, pt); 3928 } 3929 3930 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) 3931 { 3932 return seq_cft(seq)->seq_start(seq, ppos); 3933 } 3934 3935 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) 3936 { 3937 return seq_cft(seq)->seq_next(seq, v, ppos); 3938 } 3939 3940 static void cgroup_seqfile_stop(struct seq_file *seq, void *v) 3941 { 3942 if (seq_cft(seq)->seq_stop) 3943 seq_cft(seq)->seq_stop(seq, v); 3944 } 3945 3946 static int cgroup_seqfile_show(struct seq_file *m, void *arg) 3947 { 3948 struct cftype *cft = seq_cft(m); 3949 struct cgroup_subsys_state *css = seq_css(m); 3950 3951 if (cft->seq_show) 3952 return cft->seq_show(m, arg); 3953 3954 if (cft->read_u64) 3955 seq_printf(m, "%llu\n", cft->read_u64(css, cft)); 3956 else if (cft->read_s64) 3957 seq_printf(m, "%lld\n", cft->read_s64(css, cft)); 3958 else 3959 return -EINVAL; 3960 return 0; 3961 } 3962 3963 static struct kernfs_ops cgroup_kf_single_ops = { 3964 .atomic_write_len = PAGE_SIZE, 3965 .open = cgroup_file_open, 3966 .release = cgroup_file_release, 3967 .write = cgroup_file_write, 3968 .poll = cgroup_file_poll, 3969 .seq_show = cgroup_seqfile_show, 3970 }; 3971 3972 static struct kernfs_ops cgroup_kf_ops = { 3973 .atomic_write_len = PAGE_SIZE, 3974 .open = cgroup_file_open, 3975 .release = cgroup_file_release, 3976 .write = cgroup_file_write, 3977 .poll = cgroup_file_poll, 3978 .seq_start = cgroup_seqfile_start, 3979 .seq_next = cgroup_seqfile_next, 3980 .seq_stop = cgroup_seqfile_stop, 3981 .seq_show = cgroup_seqfile_show, 3982 }; 3983 3984 /* set uid and gid of cgroup dirs and files to that of the creator */ 3985 static int cgroup_kn_set_ugid(struct kernfs_node *kn) 3986 { 3987 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 3988 .ia_uid = current_fsuid(), 3989 .ia_gid = current_fsgid(), }; 3990 3991 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 3992 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 3993 return 0; 3994 3995 return kernfs_setattr(kn, &iattr); 3996 } 3997 3998 static void cgroup_file_notify_timer(struct timer_list *timer) 3999 { 4000 cgroup_file_notify(container_of(timer, struct cgroup_file, 4001 notify_timer)); 4002 } 4003 4004 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, 4005 struct cftype *cft) 4006 { 4007 char name[CGROUP_FILE_NAME_MAX]; 4008 struct kernfs_node *kn; 4009 struct lock_class_key *key = NULL; 4010 int ret; 4011 4012 #ifdef CONFIG_DEBUG_LOCK_ALLOC 4013 key = &cft->lockdep_key; 4014 #endif 4015 kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), 4016 cgroup_file_mode(cft), 4017 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 4018 0, cft->kf_ops, cft, 4019 NULL, key); 4020 if (IS_ERR(kn)) 4021 return PTR_ERR(kn); 4022 4023 ret = cgroup_kn_set_ugid(kn); 4024 if (ret) { 4025 kernfs_remove(kn); 4026 return ret; 4027 } 4028 4029 if (cft->file_offset) { 4030 struct cgroup_file *cfile = (void *)css + cft->file_offset; 4031 4032 timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); 4033 4034 spin_lock_irq(&cgroup_file_kn_lock); 4035 cfile->kn = kn; 4036 spin_unlock_irq(&cgroup_file_kn_lock); 4037 } 4038 4039 return 0; 4040 } 4041 4042 /** 4043 * cgroup_addrm_files - add or remove files to a cgroup directory 4044 * @css: the target css 4045 * @cgrp: the target cgroup (usually css->cgroup) 4046 * @cfts: array of cftypes to be added 4047 * @is_add: whether to add or remove 4048 * 4049 * Depending on @is_add, add or remove files defined by @cfts on @cgrp. 4050 * For removals, this function never fails. 4051 */ 4052 static int cgroup_addrm_files(struct cgroup_subsys_state *css, 4053 struct cgroup *cgrp, struct cftype cfts[], 4054 bool is_add) 4055 { 4056 struct cftype *cft, *cft_end = NULL; 4057 int ret = 0; 4058 4059 lockdep_assert_held(&cgroup_mutex); 4060 4061 restart: 4062 for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) { 4063 /* does cft->flags tell us to skip this file on @cgrp? */ 4064 if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) 4065 continue; 4066 if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) 4067 continue; 4068 if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) 4069 continue; 4070 if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) 4071 continue; 4072 if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) 4073 continue; 4074 if ((cft->flags & CFTYPE_DEBUG) && !cgroup_debug) 4075 continue; 4076 if (is_add) { 4077 ret = cgroup_add_file(css, cgrp, cft); 4078 if (ret) { 4079 pr_warn("%s: failed to add %s, err=%d\n", 4080 __func__, cft->name, ret); 4081 cft_end = cft; 4082 is_add = false; 4083 goto restart; 4084 } 4085 } else { 4086 cgroup_rm_file(cgrp, cft); 4087 } 4088 } 4089 return ret; 4090 } 4091 4092 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) 4093 { 4094 struct cgroup_subsys *ss = cfts[0].ss; 4095 struct cgroup *root = &ss->root->cgrp; 4096 struct cgroup_subsys_state *css; 4097 int ret = 0; 4098 4099 lockdep_assert_held(&cgroup_mutex); 4100 4101 /* add/rm files for all cgroups created before */ 4102 css_for_each_descendant_pre(css, cgroup_css(root, ss)) { 4103 struct cgroup *cgrp = css->cgroup; 4104 4105 if (!(css->flags & CSS_VISIBLE)) 4106 continue; 4107 4108 ret = cgroup_addrm_files(css, cgrp, cfts, is_add); 4109 if (ret) 4110 break; 4111 } 4112 4113 if (is_add && !ret) 4114 kernfs_activate(root->kn); 4115 return ret; 4116 } 4117 4118 static void cgroup_exit_cftypes(struct cftype *cfts) 4119 { 4120 struct cftype *cft; 4121 4122 for (cft = cfts; cft->name[0] != '\0'; cft++) { 4123 /* free copy for custom atomic_write_len, see init_cftypes() */ 4124 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) 4125 kfree(cft->kf_ops); 4126 cft->kf_ops = NULL; 4127 cft->ss = NULL; 4128 4129 /* revert flags set by cgroup core while adding @cfts */ 4130 cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL); 4131 } 4132 } 4133 4134 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4135 { 4136 struct cftype *cft; 4137 4138 for (cft = cfts; cft->name[0] != '\0'; cft++) { 4139 struct kernfs_ops *kf_ops; 4140 4141 WARN_ON(cft->ss || cft->kf_ops); 4142 4143 if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) 4144 continue; 4145 4146 if (cft->seq_start) 4147 kf_ops = &cgroup_kf_ops; 4148 else 4149 kf_ops = &cgroup_kf_single_ops; 4150 4151 /* 4152 * Ugh... if @cft wants a custom max_write_len, we need to 4153 * make a copy of kf_ops to set its atomic_write_len. 4154 */ 4155 if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { 4156 kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); 4157 if (!kf_ops) { 4158 cgroup_exit_cftypes(cfts); 4159 return -ENOMEM; 4160 } 4161 kf_ops->atomic_write_len = cft->max_write_len; 4162 } 4163 4164 cft->kf_ops = kf_ops; 4165 cft->ss = ss; 4166 } 4167 4168 return 0; 4169 } 4170 4171 static int cgroup_rm_cftypes_locked(struct cftype *cfts) 4172 { 4173 lockdep_assert_held(&cgroup_mutex); 4174 4175 if (!cfts || !cfts[0].ss) 4176 return -ENOENT; 4177 4178 list_del(&cfts->node); 4179 cgroup_apply_cftypes(cfts, false); 4180 cgroup_exit_cftypes(cfts); 4181 return 0; 4182 } 4183 4184 /** 4185 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem 4186 * @cfts: zero-length name terminated array of cftypes 4187 * 4188 * Unregister @cfts. Files described by @cfts are removed from all 4189 * existing cgroups and all future cgroups won't have them either. This 4190 * function can be called anytime whether @cfts' subsys is attached or not. 4191 * 4192 * Returns 0 on successful unregistration, -ENOENT if @cfts is not 4193 * registered. 4194 */ 4195 int cgroup_rm_cftypes(struct cftype *cfts) 4196 { 4197 int ret; 4198 4199 mutex_lock(&cgroup_mutex); 4200 ret = cgroup_rm_cftypes_locked(cfts); 4201 mutex_unlock(&cgroup_mutex); 4202 return ret; 4203 } 4204 4205 /** 4206 * cgroup_add_cftypes - add an array of cftypes to a subsystem 4207 * @ss: target cgroup subsystem 4208 * @cfts: zero-length name terminated array of cftypes 4209 * 4210 * Register @cfts to @ss. Files described by @cfts are created for all 4211 * existing cgroups to which @ss is attached and all future cgroups will 4212 * have them too. This function can be called anytime whether @ss is 4213 * attached or not. 4214 * 4215 * Returns 0 on successful registration, -errno on failure. Note that this 4216 * function currently returns 0 as long as @cfts registration is successful 4217 * even if some file creation attempts on existing cgroups fail. 4218 */ 4219 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4220 { 4221 int ret; 4222 4223 if (!cgroup_ssid_enabled(ss->id)) 4224 return 0; 4225 4226 if (!cfts || cfts[0].name[0] == '\0') 4227 return 0; 4228 4229 ret = cgroup_init_cftypes(ss, cfts); 4230 if (ret) 4231 return ret; 4232 4233 mutex_lock(&cgroup_mutex); 4234 4235 list_add_tail(&cfts->node, &ss->cfts); 4236 ret = cgroup_apply_cftypes(cfts, true); 4237 if (ret) 4238 cgroup_rm_cftypes_locked(cfts); 4239 4240 mutex_unlock(&cgroup_mutex); 4241 return ret; 4242 } 4243 4244 /** 4245 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy 4246 * @ss: target cgroup subsystem 4247 * @cfts: zero-length name terminated array of cftypes 4248 * 4249 * Similar to cgroup_add_cftypes() but the added files are only used for 4250 * the default hierarchy. 4251 */ 4252 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4253 { 4254 struct cftype *cft; 4255 4256 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 4257 cft->flags |= __CFTYPE_ONLY_ON_DFL; 4258 return cgroup_add_cftypes(ss, cfts); 4259 } 4260 4261 /** 4262 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies 4263 * @ss: target cgroup subsystem 4264 * @cfts: zero-length name terminated array of cftypes 4265 * 4266 * Similar to cgroup_add_cftypes() but the added files are only used for 4267 * the legacy hierarchies. 4268 */ 4269 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) 4270 { 4271 struct cftype *cft; 4272 4273 for (cft = cfts; cft && cft->name[0] != '\0'; cft++) 4274 cft->flags |= __CFTYPE_NOT_ON_DFL; 4275 return cgroup_add_cftypes(ss, cfts); 4276 } 4277 4278 /** 4279 * cgroup_file_notify - generate a file modified event for a cgroup_file 4280 * @cfile: target cgroup_file 4281 * 4282 * @cfile must have been obtained by setting cftype->file_offset. 4283 */ 4284 void cgroup_file_notify(struct cgroup_file *cfile) 4285 { 4286 unsigned long flags; 4287 4288 spin_lock_irqsave(&cgroup_file_kn_lock, flags); 4289 if (cfile->kn) { 4290 unsigned long last = cfile->notified_at; 4291 unsigned long next = last + CGROUP_FILE_NOTIFY_MIN_INTV; 4292 4293 if (time_in_range(jiffies, last, next)) { 4294 timer_reduce(&cfile->notify_timer, next); 4295 } else { 4296 kernfs_notify(cfile->kn); 4297 cfile->notified_at = jiffies; 4298 } 4299 } 4300 spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); 4301 } 4302 4303 /** 4304 * css_next_child - find the next child of a given css 4305 * @pos: the current position (%NULL to initiate traversal) 4306 * @parent: css whose children to walk 4307 * 4308 * This function returns the next child of @parent and should be called 4309 * under either cgroup_mutex or RCU read lock. The only requirement is 4310 * that @parent and @pos are accessible. The next sibling is guaranteed to 4311 * be returned regardless of their states. 4312 * 4313 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4314 * css which finished ->css_online() is guaranteed to be visible in the 4315 * future iterations and will stay visible until the last reference is put. 4316 * A css which hasn't finished ->css_online() or already finished 4317 * ->css_offline() may show up during traversal. It's each subsystem's 4318 * responsibility to synchronize against on/offlining. 4319 */ 4320 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 4321 struct cgroup_subsys_state *parent) 4322 { 4323 struct cgroup_subsys_state *next; 4324 4325 cgroup_assert_mutex_or_rcu_locked(); 4326 4327 /* 4328 * @pos could already have been unlinked from the sibling list. 4329 * Once a cgroup is removed, its ->sibling.next is no longer 4330 * updated when its next sibling changes. CSS_RELEASED is set when 4331 * @pos is taken off list, at which time its next pointer is valid, 4332 * and, as releases are serialized, the one pointed to by the next 4333 * pointer is guaranteed to not have started release yet. This 4334 * implies that if we observe !CSS_RELEASED on @pos in this RCU 4335 * critical section, the one pointed to by its next pointer is 4336 * guaranteed to not have finished its RCU grace period even if we 4337 * have dropped rcu_read_lock() in-between iterations. 4338 * 4339 * If @pos has CSS_RELEASED set, its next pointer can't be 4340 * dereferenced; however, as each css is given a monotonically 4341 * increasing unique serial number and always appended to the 4342 * sibling list, the next one can be found by walking the parent's 4343 * children until the first css with higher serial number than 4344 * @pos's. While this path can be slower, it happens iff iteration 4345 * races against release and the race window is very small. 4346 */ 4347 if (!pos) { 4348 next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling); 4349 } else if (likely(!(pos->flags & CSS_RELEASED))) { 4350 next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling); 4351 } else { 4352 list_for_each_entry_rcu(next, &parent->children, sibling, 4353 lockdep_is_held(&cgroup_mutex)) 4354 if (next->serial_nr > pos->serial_nr) 4355 break; 4356 } 4357 4358 /* 4359 * @next, if not pointing to the head, can be dereferenced and is 4360 * the next sibling. 4361 */ 4362 if (&next->sibling != &parent->children) 4363 return next; 4364 return NULL; 4365 } 4366 4367 /** 4368 * css_next_descendant_pre - find the next descendant for pre-order walk 4369 * @pos: the current position (%NULL to initiate traversal) 4370 * @root: css whose descendants to walk 4371 * 4372 * To be used by css_for_each_descendant_pre(). Find the next descendant 4373 * to visit for pre-order traversal of @root's descendants. @root is 4374 * included in the iteration and the first node to be visited. 4375 * 4376 * While this function requires cgroup_mutex or RCU read locking, it 4377 * doesn't require the whole traversal to be contained in a single critical 4378 * section. This function will return the correct next descendant as long 4379 * as both @pos and @root are accessible and @pos is a descendant of @root. 4380 * 4381 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4382 * css which finished ->css_online() is guaranteed to be visible in the 4383 * future iterations and will stay visible until the last reference is put. 4384 * A css which hasn't finished ->css_online() or already finished 4385 * ->css_offline() may show up during traversal. It's each subsystem's 4386 * responsibility to synchronize against on/offlining. 4387 */ 4388 struct cgroup_subsys_state * 4389 css_next_descendant_pre(struct cgroup_subsys_state *pos, 4390 struct cgroup_subsys_state *root) 4391 { 4392 struct cgroup_subsys_state *next; 4393 4394 cgroup_assert_mutex_or_rcu_locked(); 4395 4396 /* if first iteration, visit @root */ 4397 if (!pos) 4398 return root; 4399 4400 /* visit the first child if exists */ 4401 next = css_next_child(NULL, pos); 4402 if (next) 4403 return next; 4404 4405 /* no child, visit my or the closest ancestor's next sibling */ 4406 while (pos != root) { 4407 next = css_next_child(pos, pos->parent); 4408 if (next) 4409 return next; 4410 pos = pos->parent; 4411 } 4412 4413 return NULL; 4414 } 4415 EXPORT_SYMBOL_GPL(css_next_descendant_pre); 4416 4417 /** 4418 * css_rightmost_descendant - return the rightmost descendant of a css 4419 * @pos: css of interest 4420 * 4421 * Return the rightmost descendant of @pos. If there's no descendant, @pos 4422 * is returned. This can be used during pre-order traversal to skip 4423 * subtree of @pos. 4424 * 4425 * While this function requires cgroup_mutex or RCU read locking, it 4426 * doesn't require the whole traversal to be contained in a single critical 4427 * section. This function will return the correct rightmost descendant as 4428 * long as @pos is accessible. 4429 */ 4430 struct cgroup_subsys_state * 4431 css_rightmost_descendant(struct cgroup_subsys_state *pos) 4432 { 4433 struct cgroup_subsys_state *last, *tmp; 4434 4435 cgroup_assert_mutex_or_rcu_locked(); 4436 4437 do { 4438 last = pos; 4439 /* ->prev isn't RCU safe, walk ->next till the end */ 4440 pos = NULL; 4441 css_for_each_child(tmp, last) 4442 pos = tmp; 4443 } while (pos); 4444 4445 return last; 4446 } 4447 4448 static struct cgroup_subsys_state * 4449 css_leftmost_descendant(struct cgroup_subsys_state *pos) 4450 { 4451 struct cgroup_subsys_state *last; 4452 4453 do { 4454 last = pos; 4455 pos = css_next_child(NULL, pos); 4456 } while (pos); 4457 4458 return last; 4459 } 4460 4461 /** 4462 * css_next_descendant_post - find the next descendant for post-order walk 4463 * @pos: the current position (%NULL to initiate traversal) 4464 * @root: css whose descendants to walk 4465 * 4466 * To be used by css_for_each_descendant_post(). Find the next descendant 4467 * to visit for post-order traversal of @root's descendants. @root is 4468 * included in the iteration and the last node to be visited. 4469 * 4470 * While this function requires cgroup_mutex or RCU read locking, it 4471 * doesn't require the whole traversal to be contained in a single critical 4472 * section. This function will return the correct next descendant as long 4473 * as both @pos and @cgroup are accessible and @pos is a descendant of 4474 * @cgroup. 4475 * 4476 * If a subsystem synchronizes ->css_online() and the start of iteration, a 4477 * css which finished ->css_online() is guaranteed to be visible in the 4478 * future iterations and will stay visible until the last reference is put. 4479 * A css which hasn't finished ->css_online() or already finished 4480 * ->css_offline() may show up during traversal. It's each subsystem's 4481 * responsibility to synchronize against on/offlining. 4482 */ 4483 struct cgroup_subsys_state * 4484 css_next_descendant_post(struct cgroup_subsys_state *pos, 4485 struct cgroup_subsys_state *root) 4486 { 4487 struct cgroup_subsys_state *next; 4488 4489 cgroup_assert_mutex_or_rcu_locked(); 4490 4491 /* if first iteration, visit leftmost descendant which may be @root */ 4492 if (!pos) 4493 return css_leftmost_descendant(root); 4494 4495 /* if we visited @root, we're done */ 4496 if (pos == root) 4497 return NULL; 4498 4499 /* if there's an unvisited sibling, visit its leftmost descendant */ 4500 next = css_next_child(pos, pos->parent); 4501 if (next) 4502 return css_leftmost_descendant(next); 4503 4504 /* no sibling left, visit parent */ 4505 return pos->parent; 4506 } 4507 4508 /** 4509 * css_has_online_children - does a css have online children 4510 * @css: the target css 4511 * 4512 * Returns %true if @css has any online children; otherwise, %false. This 4513 * function can be called from any context but the caller is responsible 4514 * for synchronizing against on/offlining as necessary. 4515 */ 4516 bool css_has_online_children(struct cgroup_subsys_state *css) 4517 { 4518 struct cgroup_subsys_state *child; 4519 bool ret = false; 4520 4521 rcu_read_lock(); 4522 css_for_each_child(child, css) { 4523 if (child->flags & CSS_ONLINE) { 4524 ret = true; 4525 break; 4526 } 4527 } 4528 rcu_read_unlock(); 4529 return ret; 4530 } 4531 4532 static struct css_set *css_task_iter_next_css_set(struct css_task_iter *it) 4533 { 4534 struct list_head *l; 4535 struct cgrp_cset_link *link; 4536 struct css_set *cset; 4537 4538 lockdep_assert_held(&css_set_lock); 4539 4540 /* find the next threaded cset */ 4541 if (it->tcset_pos) { 4542 l = it->tcset_pos->next; 4543 4544 if (l != it->tcset_head) { 4545 it->tcset_pos = l; 4546 return container_of(l, struct css_set, 4547 threaded_csets_node); 4548 } 4549 4550 it->tcset_pos = NULL; 4551 } 4552 4553 /* find the next cset */ 4554 l = it->cset_pos; 4555 l = l->next; 4556 if (l == it->cset_head) { 4557 it->cset_pos = NULL; 4558 return NULL; 4559 } 4560 4561 if (it->ss) { 4562 cset = container_of(l, struct css_set, e_cset_node[it->ss->id]); 4563 } else { 4564 link = list_entry(l, struct cgrp_cset_link, cset_link); 4565 cset = link->cset; 4566 } 4567 4568 it->cset_pos = l; 4569 4570 /* initialize threaded css_set walking */ 4571 if (it->flags & CSS_TASK_ITER_THREADED) { 4572 if (it->cur_dcset) 4573 put_css_set_locked(it->cur_dcset); 4574 it->cur_dcset = cset; 4575 get_css_set(cset); 4576 4577 it->tcset_head = &cset->threaded_csets; 4578 it->tcset_pos = &cset->threaded_csets; 4579 } 4580 4581 return cset; 4582 } 4583 4584 /** 4585 * css_task_iter_advance_css_set - advance a task iterator to the next css_set 4586 * @it: the iterator to advance 4587 * 4588 * Advance @it to the next css_set to walk. 4589 */ 4590 static void css_task_iter_advance_css_set(struct css_task_iter *it) 4591 { 4592 struct css_set *cset; 4593 4594 lockdep_assert_held(&css_set_lock); 4595 4596 /* Advance to the next non-empty css_set and find first non-empty tasks list*/ 4597 while ((cset = css_task_iter_next_css_set(it))) { 4598 if (!list_empty(&cset->tasks)) { 4599 it->cur_tasks_head = &cset->tasks; 4600 break; 4601 } else if (!list_empty(&cset->mg_tasks)) { 4602 it->cur_tasks_head = &cset->mg_tasks; 4603 break; 4604 } else if (!list_empty(&cset->dying_tasks)) { 4605 it->cur_tasks_head = &cset->dying_tasks; 4606 break; 4607 } 4608 } 4609 if (!cset) { 4610 it->task_pos = NULL; 4611 return; 4612 } 4613 it->task_pos = it->cur_tasks_head->next; 4614 4615 /* 4616 * We don't keep css_sets locked across iteration steps and thus 4617 * need to take steps to ensure that iteration can be resumed after 4618 * the lock is re-acquired. Iteration is performed at two levels - 4619 * css_sets and tasks in them. 4620 * 4621 * Once created, a css_set never leaves its cgroup lists, so a 4622 * pinned css_set is guaranteed to stay put and we can resume 4623 * iteration afterwards. 4624 * 4625 * Tasks may leave @cset across iteration steps. This is resolved 4626 * by registering each iterator with the css_set currently being 4627 * walked and making css_set_move_task() advance iterators whose 4628 * next task is leaving. 4629 */ 4630 if (it->cur_cset) { 4631 list_del(&it->iters_node); 4632 put_css_set_locked(it->cur_cset); 4633 } 4634 get_css_set(cset); 4635 it->cur_cset = cset; 4636 list_add(&it->iters_node, &cset->task_iters); 4637 } 4638 4639 static void css_task_iter_skip(struct css_task_iter *it, 4640 struct task_struct *task) 4641 { 4642 lockdep_assert_held(&css_set_lock); 4643 4644 if (it->task_pos == &task->cg_list) { 4645 it->task_pos = it->task_pos->next; 4646 it->flags |= CSS_TASK_ITER_SKIPPED; 4647 } 4648 } 4649 4650 static void css_task_iter_advance(struct css_task_iter *it) 4651 { 4652 struct task_struct *task; 4653 4654 lockdep_assert_held(&css_set_lock); 4655 repeat: 4656 if (it->task_pos) { 4657 /* 4658 * Advance iterator to find next entry. We go through cset 4659 * tasks, mg_tasks and dying_tasks, when consumed we move onto 4660 * the next cset. 4661 */ 4662 if (it->flags & CSS_TASK_ITER_SKIPPED) 4663 it->flags &= ~CSS_TASK_ITER_SKIPPED; 4664 else 4665 it->task_pos = it->task_pos->next; 4666 4667 if (it->task_pos == &it->cur_cset->tasks) { 4668 it->cur_tasks_head = &it->cur_cset->mg_tasks; 4669 it->task_pos = it->cur_tasks_head->next; 4670 } 4671 if (it->task_pos == &it->cur_cset->mg_tasks) { 4672 it->cur_tasks_head = &it->cur_cset->dying_tasks; 4673 it->task_pos = it->cur_tasks_head->next; 4674 } 4675 if (it->task_pos == &it->cur_cset->dying_tasks) 4676 css_task_iter_advance_css_set(it); 4677 } else { 4678 /* called from start, proceed to the first cset */ 4679 css_task_iter_advance_css_set(it); 4680 } 4681 4682 if (!it->task_pos) 4683 return; 4684 4685 task = list_entry(it->task_pos, struct task_struct, cg_list); 4686 4687 if (it->flags & CSS_TASK_ITER_PROCS) { 4688 /* if PROCS, skip over tasks which aren't group leaders */ 4689 if (!thread_group_leader(task)) 4690 goto repeat; 4691 4692 /* and dying leaders w/o live member threads */ 4693 if (it->cur_tasks_head == &it->cur_cset->dying_tasks && 4694 !atomic_read(&task->signal->live)) 4695 goto repeat; 4696 } else { 4697 /* skip all dying ones */ 4698 if (it->cur_tasks_head == &it->cur_cset->dying_tasks) 4699 goto repeat; 4700 } 4701 } 4702 4703 /** 4704 * css_task_iter_start - initiate task iteration 4705 * @css: the css to walk tasks of 4706 * @flags: CSS_TASK_ITER_* flags 4707 * @it: the task iterator to use 4708 * 4709 * Initiate iteration through the tasks of @css. The caller can call 4710 * css_task_iter_next() to walk through the tasks until the function 4711 * returns NULL. On completion of iteration, css_task_iter_end() must be 4712 * called. 4713 */ 4714 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 4715 struct css_task_iter *it) 4716 { 4717 memset(it, 0, sizeof(*it)); 4718 4719 spin_lock_irq(&css_set_lock); 4720 4721 it->ss = css->ss; 4722 it->flags = flags; 4723 4724 if (CGROUP_HAS_SUBSYS_CONFIG && it->ss) 4725 it->cset_pos = &css->cgroup->e_csets[css->ss->id]; 4726 else 4727 it->cset_pos = &css->cgroup->cset_links; 4728 4729 it->cset_head = it->cset_pos; 4730 4731 css_task_iter_advance(it); 4732 4733 spin_unlock_irq(&css_set_lock); 4734 } 4735 4736 /** 4737 * css_task_iter_next - return the next task for the iterator 4738 * @it: the task iterator being iterated 4739 * 4740 * The "next" function for task iteration. @it should have been 4741 * initialized via css_task_iter_start(). Returns NULL when the iteration 4742 * reaches the end. 4743 */ 4744 struct task_struct *css_task_iter_next(struct css_task_iter *it) 4745 { 4746 if (it->cur_task) { 4747 put_task_struct(it->cur_task); 4748 it->cur_task = NULL; 4749 } 4750 4751 spin_lock_irq(&css_set_lock); 4752 4753 /* @it may be half-advanced by skips, finish advancing */ 4754 if (it->flags & CSS_TASK_ITER_SKIPPED) 4755 css_task_iter_advance(it); 4756 4757 if (it->task_pos) { 4758 it->cur_task = list_entry(it->task_pos, struct task_struct, 4759 cg_list); 4760 get_task_struct(it->cur_task); 4761 css_task_iter_advance(it); 4762 } 4763 4764 spin_unlock_irq(&css_set_lock); 4765 4766 return it->cur_task; 4767 } 4768 4769 /** 4770 * css_task_iter_end - finish task iteration 4771 * @it: the task iterator to finish 4772 * 4773 * Finish task iteration started by css_task_iter_start(). 4774 */ 4775 void css_task_iter_end(struct css_task_iter *it) 4776 { 4777 if (it->cur_cset) { 4778 spin_lock_irq(&css_set_lock); 4779 list_del(&it->iters_node); 4780 put_css_set_locked(it->cur_cset); 4781 spin_unlock_irq(&css_set_lock); 4782 } 4783 4784 if (it->cur_dcset) 4785 put_css_set(it->cur_dcset); 4786 4787 if (it->cur_task) 4788 put_task_struct(it->cur_task); 4789 } 4790 4791 static void cgroup_procs_release(struct kernfs_open_file *of) 4792 { 4793 struct cgroup_file_ctx *ctx = of->priv; 4794 4795 if (ctx->procs.started) 4796 css_task_iter_end(&ctx->procs.iter); 4797 } 4798 4799 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos) 4800 { 4801 struct kernfs_open_file *of = s->private; 4802 struct cgroup_file_ctx *ctx = of->priv; 4803 4804 if (pos) 4805 (*pos)++; 4806 4807 return css_task_iter_next(&ctx->procs.iter); 4808 } 4809 4810 static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos, 4811 unsigned int iter_flags) 4812 { 4813 struct kernfs_open_file *of = s->private; 4814 struct cgroup *cgrp = seq_css(s)->cgroup; 4815 struct cgroup_file_ctx *ctx = of->priv; 4816 struct css_task_iter *it = &ctx->procs.iter; 4817 4818 /* 4819 * When a seq_file is seeked, it's always traversed sequentially 4820 * from position 0, so we can simply keep iterating on !0 *pos. 4821 */ 4822 if (!ctx->procs.started) { 4823 if (WARN_ON_ONCE((*pos))) 4824 return ERR_PTR(-EINVAL); 4825 css_task_iter_start(&cgrp->self, iter_flags, it); 4826 ctx->procs.started = true; 4827 } else if (!(*pos)) { 4828 css_task_iter_end(it); 4829 css_task_iter_start(&cgrp->self, iter_flags, it); 4830 } else 4831 return it->cur_task; 4832 4833 return cgroup_procs_next(s, NULL, NULL); 4834 } 4835 4836 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos) 4837 { 4838 struct cgroup *cgrp = seq_css(s)->cgroup; 4839 4840 /* 4841 * All processes of a threaded subtree belong to the domain cgroup 4842 * of the subtree. Only threads can be distributed across the 4843 * subtree. Reject reads on cgroup.procs in the subtree proper. 4844 * They're always empty anyway. 4845 */ 4846 if (cgroup_is_threaded(cgrp)) 4847 return ERR_PTR(-EOPNOTSUPP); 4848 4849 return __cgroup_procs_start(s, pos, CSS_TASK_ITER_PROCS | 4850 CSS_TASK_ITER_THREADED); 4851 } 4852 4853 static int cgroup_procs_show(struct seq_file *s, void *v) 4854 { 4855 seq_printf(s, "%d\n", task_pid_vnr(v)); 4856 return 0; 4857 } 4858 4859 static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb) 4860 { 4861 int ret; 4862 struct inode *inode; 4863 4864 lockdep_assert_held(&cgroup_mutex); 4865 4866 inode = kernfs_get_inode(sb, cgrp->procs_file.kn); 4867 if (!inode) 4868 return -ENOMEM; 4869 4870 ret = inode_permission(&init_user_ns, inode, MAY_WRITE); 4871 iput(inode); 4872 return ret; 4873 } 4874 4875 static int cgroup_procs_write_permission(struct cgroup *src_cgrp, 4876 struct cgroup *dst_cgrp, 4877 struct super_block *sb, 4878 struct cgroup_namespace *ns) 4879 { 4880 struct cgroup *com_cgrp = src_cgrp; 4881 int ret; 4882 4883 lockdep_assert_held(&cgroup_mutex); 4884 4885 /* find the common ancestor */ 4886 while (!cgroup_is_descendant(dst_cgrp, com_cgrp)) 4887 com_cgrp = cgroup_parent(com_cgrp); 4888 4889 /* %current should be authorized to migrate to the common ancestor */ 4890 ret = cgroup_may_write(com_cgrp, sb); 4891 if (ret) 4892 return ret; 4893 4894 /* 4895 * If namespaces are delegation boundaries, %current must be able 4896 * to see both source and destination cgroups from its namespace. 4897 */ 4898 if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) && 4899 (!cgroup_is_descendant(src_cgrp, ns->root_cset->dfl_cgrp) || 4900 !cgroup_is_descendant(dst_cgrp, ns->root_cset->dfl_cgrp))) 4901 return -ENOENT; 4902 4903 return 0; 4904 } 4905 4906 static int cgroup_attach_permissions(struct cgroup *src_cgrp, 4907 struct cgroup *dst_cgrp, 4908 struct super_block *sb, bool threadgroup, 4909 struct cgroup_namespace *ns) 4910 { 4911 int ret = 0; 4912 4913 ret = cgroup_procs_write_permission(src_cgrp, dst_cgrp, sb, ns); 4914 if (ret) 4915 return ret; 4916 4917 ret = cgroup_migrate_vet_dst(dst_cgrp); 4918 if (ret) 4919 return ret; 4920 4921 if (!threadgroup && (src_cgrp->dom_cgrp != dst_cgrp->dom_cgrp)) 4922 ret = -EOPNOTSUPP; 4923 4924 return ret; 4925 } 4926 4927 static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, 4928 bool threadgroup) 4929 { 4930 struct cgroup_file_ctx *ctx = of->priv; 4931 struct cgroup *src_cgrp, *dst_cgrp; 4932 struct task_struct *task; 4933 const struct cred *saved_cred; 4934 ssize_t ret; 4935 bool locked; 4936 4937 dst_cgrp = cgroup_kn_lock_live(of->kn, false); 4938 if (!dst_cgrp) 4939 return -ENODEV; 4940 4941 task = cgroup_procs_write_start(buf, threadgroup, &locked); 4942 ret = PTR_ERR_OR_ZERO(task); 4943 if (ret) 4944 goto out_unlock; 4945 4946 /* find the source cgroup */ 4947 spin_lock_irq(&css_set_lock); 4948 src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); 4949 spin_unlock_irq(&css_set_lock); 4950 4951 /* 4952 * Process and thread migrations follow same delegation rule. Check 4953 * permissions using the credentials from file open to protect against 4954 * inherited fd attacks. 4955 */ 4956 saved_cred = override_creds(of->file->f_cred); 4957 ret = cgroup_attach_permissions(src_cgrp, dst_cgrp, 4958 of->file->f_path.dentry->d_sb, 4959 threadgroup, ctx->ns); 4960 revert_creds(saved_cred); 4961 if (ret) 4962 goto out_finish; 4963 4964 ret = cgroup_attach_task(dst_cgrp, task, threadgroup); 4965 4966 out_finish: 4967 cgroup_procs_write_finish(task, locked); 4968 out_unlock: 4969 cgroup_kn_unlock(of->kn); 4970 4971 return ret; 4972 } 4973 4974 static ssize_t cgroup_procs_write(struct kernfs_open_file *of, 4975 char *buf, size_t nbytes, loff_t off) 4976 { 4977 return __cgroup_procs_write(of, buf, true) ?: nbytes; 4978 } 4979 4980 static void *cgroup_threads_start(struct seq_file *s, loff_t *pos) 4981 { 4982 return __cgroup_procs_start(s, pos, 0); 4983 } 4984 4985 static ssize_t cgroup_threads_write(struct kernfs_open_file *of, 4986 char *buf, size_t nbytes, loff_t off) 4987 { 4988 return __cgroup_procs_write(of, buf, false) ?: nbytes; 4989 } 4990 4991 /* cgroup core interface files for the default hierarchy */ 4992 static struct cftype cgroup_base_files[] = { 4993 { 4994 .name = "cgroup.type", 4995 .flags = CFTYPE_NOT_ON_ROOT, 4996 .seq_show = cgroup_type_show, 4997 .write = cgroup_type_write, 4998 }, 4999 { 5000 .name = "cgroup.procs", 5001 .flags = CFTYPE_NS_DELEGATABLE, 5002 .file_offset = offsetof(struct cgroup, procs_file), 5003 .release = cgroup_procs_release, 5004 .seq_start = cgroup_procs_start, 5005 .seq_next = cgroup_procs_next, 5006 .seq_show = cgroup_procs_show, 5007 .write = cgroup_procs_write, 5008 }, 5009 { 5010 .name = "cgroup.threads", 5011 .flags = CFTYPE_NS_DELEGATABLE, 5012 .release = cgroup_procs_release, 5013 .seq_start = cgroup_threads_start, 5014 .seq_next = cgroup_procs_next, 5015 .seq_show = cgroup_procs_show, 5016 .write = cgroup_threads_write, 5017 }, 5018 { 5019 .name = "cgroup.controllers", 5020 .seq_show = cgroup_controllers_show, 5021 }, 5022 { 5023 .name = "cgroup.subtree_control", 5024 .flags = CFTYPE_NS_DELEGATABLE, 5025 .seq_show = cgroup_subtree_control_show, 5026 .write = cgroup_subtree_control_write, 5027 }, 5028 { 5029 .name = "cgroup.events", 5030 .flags = CFTYPE_NOT_ON_ROOT, 5031 .file_offset = offsetof(struct cgroup, events_file), 5032 .seq_show = cgroup_events_show, 5033 }, 5034 { 5035 .name = "cgroup.max.descendants", 5036 .seq_show = cgroup_max_descendants_show, 5037 .write = cgroup_max_descendants_write, 5038 }, 5039 { 5040 .name = "cgroup.max.depth", 5041 .seq_show = cgroup_max_depth_show, 5042 .write = cgroup_max_depth_write, 5043 }, 5044 { 5045 .name = "cgroup.stat", 5046 .seq_show = cgroup_stat_show, 5047 }, 5048 { 5049 .name = "cgroup.freeze", 5050 .flags = CFTYPE_NOT_ON_ROOT, 5051 .seq_show = cgroup_freeze_show, 5052 .write = cgroup_freeze_write, 5053 }, 5054 { 5055 .name = "cgroup.kill", 5056 .flags = CFTYPE_NOT_ON_ROOT, 5057 .write = cgroup_kill_write, 5058 }, 5059 { 5060 .name = "cpu.stat", 5061 .seq_show = cpu_stat_show, 5062 }, 5063 #ifdef CONFIG_PSI 5064 { 5065 .name = "io.pressure", 5066 .flags = CFTYPE_PRESSURE, 5067 .seq_show = cgroup_io_pressure_show, 5068 .write = cgroup_io_pressure_write, 5069 .poll = cgroup_pressure_poll, 5070 .release = cgroup_pressure_release, 5071 }, 5072 { 5073 .name = "memory.pressure", 5074 .flags = CFTYPE_PRESSURE, 5075 .seq_show = cgroup_memory_pressure_show, 5076 .write = cgroup_memory_pressure_write, 5077 .poll = cgroup_pressure_poll, 5078 .release = cgroup_pressure_release, 5079 }, 5080 { 5081 .name = "cpu.pressure", 5082 .flags = CFTYPE_PRESSURE, 5083 .seq_show = cgroup_cpu_pressure_show, 5084 .write = cgroup_cpu_pressure_write, 5085 .poll = cgroup_pressure_poll, 5086 .release = cgroup_pressure_release, 5087 }, 5088 #endif /* CONFIG_PSI */ 5089 { } /* terminate */ 5090 }; 5091 5092 /* 5093 * css destruction is four-stage process. 5094 * 5095 * 1. Destruction starts. Killing of the percpu_ref is initiated. 5096 * Implemented in kill_css(). 5097 * 5098 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs 5099 * and thus css_tryget_online() is guaranteed to fail, the css can be 5100 * offlined by invoking offline_css(). After offlining, the base ref is 5101 * put. Implemented in css_killed_work_fn(). 5102 * 5103 * 3. When the percpu_ref reaches zero, the only possible remaining 5104 * accessors are inside RCU read sections. css_release() schedules the 5105 * RCU callback. 5106 * 5107 * 4. After the grace period, the css can be freed. Implemented in 5108 * css_free_work_fn(). 5109 * 5110 * It is actually hairier because both step 2 and 4 require process context 5111 * and thus involve punting to css->destroy_work adding two additional 5112 * steps to the already complex sequence. 5113 */ 5114 static void css_free_rwork_fn(struct work_struct *work) 5115 { 5116 struct cgroup_subsys_state *css = container_of(to_rcu_work(work), 5117 struct cgroup_subsys_state, destroy_rwork); 5118 struct cgroup_subsys *ss = css->ss; 5119 struct cgroup *cgrp = css->cgroup; 5120 5121 percpu_ref_exit(&css->refcnt); 5122 5123 if (ss) { 5124 /* css free path */ 5125 struct cgroup_subsys_state *parent = css->parent; 5126 int id = css->id; 5127 5128 ss->css_free(css); 5129 cgroup_idr_remove(&ss->css_idr, id); 5130 cgroup_put(cgrp); 5131 5132 if (parent) 5133 css_put(parent); 5134 } else { 5135 /* cgroup free path */ 5136 atomic_dec(&cgrp->root->nr_cgrps); 5137 cgroup1_pidlist_destroy_all(cgrp); 5138 cancel_work_sync(&cgrp->release_agent_work); 5139 5140 if (cgroup_parent(cgrp)) { 5141 /* 5142 * We get a ref to the parent, and put the ref when 5143 * this cgroup is being freed, so it's guaranteed 5144 * that the parent won't be destroyed before its 5145 * children. 5146 */ 5147 cgroup_put(cgroup_parent(cgrp)); 5148 kernfs_put(cgrp->kn); 5149 psi_cgroup_free(cgrp); 5150 cgroup_rstat_exit(cgrp); 5151 kfree(cgrp); 5152 } else { 5153 /* 5154 * This is root cgroup's refcnt reaching zero, 5155 * which indicates that the root should be 5156 * released. 5157 */ 5158 cgroup_destroy_root(cgrp->root); 5159 } 5160 } 5161 } 5162 5163 static void css_release_work_fn(struct work_struct *work) 5164 { 5165 struct cgroup_subsys_state *css = 5166 container_of(work, struct cgroup_subsys_state, destroy_work); 5167 struct cgroup_subsys *ss = css->ss; 5168 struct cgroup *cgrp = css->cgroup; 5169 5170 mutex_lock(&cgroup_mutex); 5171 5172 css->flags |= CSS_RELEASED; 5173 list_del_rcu(&css->sibling); 5174 5175 if (ss) { 5176 /* css release path */ 5177 if (!list_empty(&css->rstat_css_node)) { 5178 cgroup_rstat_flush(cgrp); 5179 list_del_rcu(&css->rstat_css_node); 5180 } 5181 5182 cgroup_idr_replace(&ss->css_idr, NULL, css->id); 5183 if (ss->css_released) 5184 ss->css_released(css); 5185 } else { 5186 struct cgroup *tcgrp; 5187 5188 /* cgroup release path */ 5189 TRACE_CGROUP_PATH(release, cgrp); 5190 5191 cgroup_rstat_flush(cgrp); 5192 5193 spin_lock_irq(&css_set_lock); 5194 for (tcgrp = cgroup_parent(cgrp); tcgrp; 5195 tcgrp = cgroup_parent(tcgrp)) 5196 tcgrp->nr_dying_descendants--; 5197 spin_unlock_irq(&css_set_lock); 5198 5199 /* 5200 * There are two control paths which try to determine 5201 * cgroup from dentry without going through kernfs - 5202 * cgroupstats_build() and css_tryget_online_from_dir(). 5203 * Those are supported by RCU protecting clearing of 5204 * cgrp->kn->priv backpointer. 5205 */ 5206 if (cgrp->kn) 5207 RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, 5208 NULL); 5209 } 5210 5211 mutex_unlock(&cgroup_mutex); 5212 5213 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5214 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5215 } 5216 5217 static void css_release(struct percpu_ref *ref) 5218 { 5219 struct cgroup_subsys_state *css = 5220 container_of(ref, struct cgroup_subsys_state, refcnt); 5221 5222 INIT_WORK(&css->destroy_work, css_release_work_fn); 5223 queue_work(cgroup_destroy_wq, &css->destroy_work); 5224 } 5225 5226 static void init_and_link_css(struct cgroup_subsys_state *css, 5227 struct cgroup_subsys *ss, struct cgroup *cgrp) 5228 { 5229 lockdep_assert_held(&cgroup_mutex); 5230 5231 cgroup_get_live(cgrp); 5232 5233 memset(css, 0, sizeof(*css)); 5234 css->cgroup = cgrp; 5235 css->ss = ss; 5236 css->id = -1; 5237 INIT_LIST_HEAD(&css->sibling); 5238 INIT_LIST_HEAD(&css->children); 5239 INIT_LIST_HEAD(&css->rstat_css_node); 5240 css->serial_nr = css_serial_nr_next++; 5241 atomic_set(&css->online_cnt, 0); 5242 5243 if (cgroup_parent(cgrp)) { 5244 css->parent = cgroup_css(cgroup_parent(cgrp), ss); 5245 css_get(css->parent); 5246 } 5247 5248 if (ss->css_rstat_flush) 5249 list_add_rcu(&css->rstat_css_node, &cgrp->rstat_css_list); 5250 5251 BUG_ON(cgroup_css(cgrp, ss)); 5252 } 5253 5254 /* invoke ->css_online() on a new CSS and mark it online if successful */ 5255 static int online_css(struct cgroup_subsys_state *css) 5256 { 5257 struct cgroup_subsys *ss = css->ss; 5258 int ret = 0; 5259 5260 lockdep_assert_held(&cgroup_mutex); 5261 5262 if (ss->css_online) 5263 ret = ss->css_online(css); 5264 if (!ret) { 5265 css->flags |= CSS_ONLINE; 5266 rcu_assign_pointer(css->cgroup->subsys[ss->id], css); 5267 5268 atomic_inc(&css->online_cnt); 5269 if (css->parent) 5270 atomic_inc(&css->parent->online_cnt); 5271 } 5272 return ret; 5273 } 5274 5275 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */ 5276 static void offline_css(struct cgroup_subsys_state *css) 5277 { 5278 struct cgroup_subsys *ss = css->ss; 5279 5280 lockdep_assert_held(&cgroup_mutex); 5281 5282 if (!(css->flags & CSS_ONLINE)) 5283 return; 5284 5285 if (ss->css_offline) 5286 ss->css_offline(css); 5287 5288 css->flags &= ~CSS_ONLINE; 5289 RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL); 5290 5291 wake_up_all(&css->cgroup->offline_waitq); 5292 } 5293 5294 /** 5295 * css_create - create a cgroup_subsys_state 5296 * @cgrp: the cgroup new css will be associated with 5297 * @ss: the subsys of new css 5298 * 5299 * Create a new css associated with @cgrp - @ss pair. On success, the new 5300 * css is online and installed in @cgrp. This function doesn't create the 5301 * interface files. Returns 0 on success, -errno on failure. 5302 */ 5303 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, 5304 struct cgroup_subsys *ss) 5305 { 5306 struct cgroup *parent = cgroup_parent(cgrp); 5307 struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss); 5308 struct cgroup_subsys_state *css; 5309 int err; 5310 5311 lockdep_assert_held(&cgroup_mutex); 5312 5313 css = ss->css_alloc(parent_css); 5314 if (!css) 5315 css = ERR_PTR(-ENOMEM); 5316 if (IS_ERR(css)) 5317 return css; 5318 5319 init_and_link_css(css, ss, cgrp); 5320 5321 err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL); 5322 if (err) 5323 goto err_free_css; 5324 5325 err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); 5326 if (err < 0) 5327 goto err_free_css; 5328 css->id = err; 5329 5330 /* @css is ready to be brought online now, make it visible */ 5331 list_add_tail_rcu(&css->sibling, &parent_css->children); 5332 cgroup_idr_replace(&ss->css_idr, css, css->id); 5333 5334 err = online_css(css); 5335 if (err) 5336 goto err_list_del; 5337 5338 return css; 5339 5340 err_list_del: 5341 list_del_rcu(&css->sibling); 5342 err_free_css: 5343 list_del_rcu(&css->rstat_css_node); 5344 INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn); 5345 queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork); 5346 return ERR_PTR(err); 5347 } 5348 5349 /* 5350 * The returned cgroup is fully initialized including its control mask, but 5351 * it isn't associated with its kernfs_node and doesn't have the control 5352 * mask applied. 5353 */ 5354 static struct cgroup *cgroup_create(struct cgroup *parent, const char *name, 5355 umode_t mode) 5356 { 5357 struct cgroup_root *root = parent->root; 5358 struct cgroup *cgrp, *tcgrp; 5359 struct kernfs_node *kn; 5360 int level = parent->level + 1; 5361 int ret; 5362 5363 /* allocate the cgroup and its ID, 0 is reserved for the root */ 5364 cgrp = kzalloc(struct_size(cgrp, ancestor_ids, (level + 1)), 5365 GFP_KERNEL); 5366 if (!cgrp) 5367 return ERR_PTR(-ENOMEM); 5368 5369 ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL); 5370 if (ret) 5371 goto out_free_cgrp; 5372 5373 ret = cgroup_rstat_init(cgrp); 5374 if (ret) 5375 goto out_cancel_ref; 5376 5377 /* create the directory */ 5378 kn = kernfs_create_dir(parent->kn, name, mode, cgrp); 5379 if (IS_ERR(kn)) { 5380 ret = PTR_ERR(kn); 5381 goto out_stat_exit; 5382 } 5383 cgrp->kn = kn; 5384 5385 init_cgroup_housekeeping(cgrp); 5386 5387 cgrp->self.parent = &parent->self; 5388 cgrp->root = root; 5389 cgrp->level = level; 5390 5391 ret = psi_cgroup_alloc(cgrp); 5392 if (ret) 5393 goto out_kernfs_remove; 5394 5395 ret = cgroup_bpf_inherit(cgrp); 5396 if (ret) 5397 goto out_psi_free; 5398 5399 /* 5400 * New cgroup inherits effective freeze counter, and 5401 * if the parent has to be frozen, the child has too. 5402 */ 5403 cgrp->freezer.e_freeze = parent->freezer.e_freeze; 5404 if (cgrp->freezer.e_freeze) { 5405 /* 5406 * Set the CGRP_FREEZE flag, so when a process will be 5407 * attached to the child cgroup, it will become frozen. 5408 * At this point the new cgroup is unpopulated, so we can 5409 * consider it frozen immediately. 5410 */ 5411 set_bit(CGRP_FREEZE, &cgrp->flags); 5412 set_bit(CGRP_FROZEN, &cgrp->flags); 5413 } 5414 5415 spin_lock_irq(&css_set_lock); 5416 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5417 cgrp->ancestor_ids[tcgrp->level] = cgroup_id(tcgrp); 5418 5419 if (tcgrp != cgrp) { 5420 tcgrp->nr_descendants++; 5421 5422 /* 5423 * If the new cgroup is frozen, all ancestor cgroups 5424 * get a new frozen descendant, but their state can't 5425 * change because of this. 5426 */ 5427 if (cgrp->freezer.e_freeze) 5428 tcgrp->freezer.nr_frozen_descendants++; 5429 } 5430 } 5431 spin_unlock_irq(&css_set_lock); 5432 5433 if (notify_on_release(parent)) 5434 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); 5435 5436 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) 5437 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); 5438 5439 cgrp->self.serial_nr = css_serial_nr_next++; 5440 5441 /* allocation complete, commit to creation */ 5442 list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children); 5443 atomic_inc(&root->nr_cgrps); 5444 cgroup_get_live(parent); 5445 5446 /* 5447 * On the default hierarchy, a child doesn't automatically inherit 5448 * subtree_control from the parent. Each is configured manually. 5449 */ 5450 if (!cgroup_on_dfl(cgrp)) 5451 cgrp->subtree_control = cgroup_control(cgrp); 5452 5453 cgroup_propagate_control(cgrp); 5454 5455 return cgrp; 5456 5457 out_psi_free: 5458 psi_cgroup_free(cgrp); 5459 out_kernfs_remove: 5460 kernfs_remove(cgrp->kn); 5461 out_stat_exit: 5462 cgroup_rstat_exit(cgrp); 5463 out_cancel_ref: 5464 percpu_ref_exit(&cgrp->self.refcnt); 5465 out_free_cgrp: 5466 kfree(cgrp); 5467 return ERR_PTR(ret); 5468 } 5469 5470 static bool cgroup_check_hierarchy_limits(struct cgroup *parent) 5471 { 5472 struct cgroup *cgroup; 5473 int ret = false; 5474 int level = 1; 5475 5476 lockdep_assert_held(&cgroup_mutex); 5477 5478 for (cgroup = parent; cgroup; cgroup = cgroup_parent(cgroup)) { 5479 if (cgroup->nr_descendants >= cgroup->max_descendants) 5480 goto fail; 5481 5482 if (level > cgroup->max_depth) 5483 goto fail; 5484 5485 level++; 5486 } 5487 5488 ret = true; 5489 fail: 5490 return ret; 5491 } 5492 5493 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) 5494 { 5495 struct cgroup *parent, *cgrp; 5496 int ret; 5497 5498 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */ 5499 if (strchr(name, '\n')) 5500 return -EINVAL; 5501 5502 parent = cgroup_kn_lock_live(parent_kn, false); 5503 if (!parent) 5504 return -ENODEV; 5505 5506 if (!cgroup_check_hierarchy_limits(parent)) { 5507 ret = -EAGAIN; 5508 goto out_unlock; 5509 } 5510 5511 cgrp = cgroup_create(parent, name, mode); 5512 if (IS_ERR(cgrp)) { 5513 ret = PTR_ERR(cgrp); 5514 goto out_unlock; 5515 } 5516 5517 /* 5518 * This extra ref will be put in cgroup_free_fn() and guarantees 5519 * that @cgrp->kn is always accessible. 5520 */ 5521 kernfs_get(cgrp->kn); 5522 5523 ret = cgroup_kn_set_ugid(cgrp->kn); 5524 if (ret) 5525 goto out_destroy; 5526 5527 ret = css_populate_dir(&cgrp->self); 5528 if (ret) 5529 goto out_destroy; 5530 5531 ret = cgroup_apply_control_enable(cgrp); 5532 if (ret) 5533 goto out_destroy; 5534 5535 TRACE_CGROUP_PATH(mkdir, cgrp); 5536 5537 /* let's create and online css's */ 5538 kernfs_activate(cgrp->kn); 5539 5540 ret = 0; 5541 goto out_unlock; 5542 5543 out_destroy: 5544 cgroup_destroy_locked(cgrp); 5545 out_unlock: 5546 cgroup_kn_unlock(parent_kn); 5547 return ret; 5548 } 5549 5550 /* 5551 * This is called when the refcnt of a css is confirmed to be killed. 5552 * css_tryget_online() is now guaranteed to fail. Tell the subsystem to 5553 * initiate destruction and put the css ref from kill_css(). 5554 */ 5555 static void css_killed_work_fn(struct work_struct *work) 5556 { 5557 struct cgroup_subsys_state *css = 5558 container_of(work, struct cgroup_subsys_state, destroy_work); 5559 5560 mutex_lock(&cgroup_mutex); 5561 5562 do { 5563 offline_css(css); 5564 css_put(css); 5565 /* @css can't go away while we're holding cgroup_mutex */ 5566 css = css->parent; 5567 } while (css && atomic_dec_and_test(&css->online_cnt)); 5568 5569 mutex_unlock(&cgroup_mutex); 5570 } 5571 5572 /* css kill confirmation processing requires process context, bounce */ 5573 static void css_killed_ref_fn(struct percpu_ref *ref) 5574 { 5575 struct cgroup_subsys_state *css = 5576 container_of(ref, struct cgroup_subsys_state, refcnt); 5577 5578 if (atomic_dec_and_test(&css->online_cnt)) { 5579 INIT_WORK(&css->destroy_work, css_killed_work_fn); 5580 queue_work(cgroup_destroy_wq, &css->destroy_work); 5581 } 5582 } 5583 5584 /** 5585 * kill_css - destroy a css 5586 * @css: css to destroy 5587 * 5588 * This function initiates destruction of @css by removing cgroup interface 5589 * files and putting its base reference. ->css_offline() will be invoked 5590 * asynchronously once css_tryget_online() is guaranteed to fail and when 5591 * the reference count reaches zero, @css will be released. 5592 */ 5593 static void kill_css(struct cgroup_subsys_state *css) 5594 { 5595 lockdep_assert_held(&cgroup_mutex); 5596 5597 if (css->flags & CSS_DYING) 5598 return; 5599 5600 css->flags |= CSS_DYING; 5601 5602 /* 5603 * This must happen before css is disassociated with its cgroup. 5604 * See seq_css() for details. 5605 */ 5606 css_clear_dir(css); 5607 5608 /* 5609 * Killing would put the base ref, but we need to keep it alive 5610 * until after ->css_offline(). 5611 */ 5612 css_get(css); 5613 5614 /* 5615 * cgroup core guarantees that, by the time ->css_offline() is 5616 * invoked, no new css reference will be given out via 5617 * css_tryget_online(). We can't simply call percpu_ref_kill() and 5618 * proceed to offlining css's because percpu_ref_kill() doesn't 5619 * guarantee that the ref is seen as killed on all CPUs on return. 5620 * 5621 * Use percpu_ref_kill_and_confirm() to get notifications as each 5622 * css is confirmed to be seen as killed on all CPUs. 5623 */ 5624 percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn); 5625 } 5626 5627 /** 5628 * cgroup_destroy_locked - the first stage of cgroup destruction 5629 * @cgrp: cgroup to be destroyed 5630 * 5631 * css's make use of percpu refcnts whose killing latency shouldn't be 5632 * exposed to userland and are RCU protected. Also, cgroup core needs to 5633 * guarantee that css_tryget_online() won't succeed by the time 5634 * ->css_offline() is invoked. To satisfy all the requirements, 5635 * destruction is implemented in the following two steps. 5636 * 5637 * s1. Verify @cgrp can be destroyed and mark it dying. Remove all 5638 * userland visible parts and start killing the percpu refcnts of 5639 * css's. Set up so that the next stage will be kicked off once all 5640 * the percpu refcnts are confirmed to be killed. 5641 * 5642 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the 5643 * rest of destruction. Once all cgroup references are gone, the 5644 * cgroup is RCU-freed. 5645 * 5646 * This function implements s1. After this step, @cgrp is gone as far as 5647 * the userland is concerned and a new cgroup with the same name may be 5648 * created. As cgroup doesn't care about the names internally, this 5649 * doesn't cause any problem. 5650 */ 5651 static int cgroup_destroy_locked(struct cgroup *cgrp) 5652 __releases(&cgroup_mutex) __acquires(&cgroup_mutex) 5653 { 5654 struct cgroup *tcgrp, *parent = cgroup_parent(cgrp); 5655 struct cgroup_subsys_state *css; 5656 struct cgrp_cset_link *link; 5657 int ssid; 5658 5659 lockdep_assert_held(&cgroup_mutex); 5660 5661 /* 5662 * Only migration can raise populated from zero and we're already 5663 * holding cgroup_mutex. 5664 */ 5665 if (cgroup_is_populated(cgrp)) 5666 return -EBUSY; 5667 5668 /* 5669 * Make sure there's no live children. We can't test emptiness of 5670 * ->self.children as dead children linger on it while being 5671 * drained; otherwise, "rmdir parent/child parent" may fail. 5672 */ 5673 if (css_has_online_children(&cgrp->self)) 5674 return -EBUSY; 5675 5676 /* 5677 * Mark @cgrp and the associated csets dead. The former prevents 5678 * further task migration and child creation by disabling 5679 * cgroup_lock_live_group(). The latter makes the csets ignored by 5680 * the migration path. 5681 */ 5682 cgrp->self.flags &= ~CSS_ONLINE; 5683 5684 spin_lock_irq(&css_set_lock); 5685 list_for_each_entry(link, &cgrp->cset_links, cset_link) 5686 link->cset->dead = true; 5687 spin_unlock_irq(&css_set_lock); 5688 5689 /* initiate massacre of all css's */ 5690 for_each_css(css, ssid, cgrp) 5691 kill_css(css); 5692 5693 /* clear and remove @cgrp dir, @cgrp has an extra ref on its kn */ 5694 css_clear_dir(&cgrp->self); 5695 kernfs_remove(cgrp->kn); 5696 5697 if (cgroup_is_threaded(cgrp)) 5698 parent->nr_threaded_children--; 5699 5700 spin_lock_irq(&css_set_lock); 5701 for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5702 tcgrp->nr_descendants--; 5703 tcgrp->nr_dying_descendants++; 5704 /* 5705 * If the dying cgroup is frozen, decrease frozen descendants 5706 * counters of ancestor cgroups. 5707 */ 5708 if (test_bit(CGRP_FROZEN, &cgrp->flags)) 5709 tcgrp->freezer.nr_frozen_descendants--; 5710 } 5711 spin_unlock_irq(&css_set_lock); 5712 5713 cgroup1_check_for_release(parent); 5714 5715 cgroup_bpf_offline(cgrp); 5716 5717 /* put the base reference */ 5718 percpu_ref_kill(&cgrp->self.refcnt); 5719 5720 return 0; 5721 }; 5722 5723 int cgroup_rmdir(struct kernfs_node *kn) 5724 { 5725 struct cgroup *cgrp; 5726 int ret = 0; 5727 5728 cgrp = cgroup_kn_lock_live(kn, false); 5729 if (!cgrp) 5730 return 0; 5731 5732 ret = cgroup_destroy_locked(cgrp); 5733 if (!ret) 5734 TRACE_CGROUP_PATH(rmdir, cgrp); 5735 5736 cgroup_kn_unlock(kn); 5737 return ret; 5738 } 5739 5740 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { 5741 .show_options = cgroup_show_options, 5742 .mkdir = cgroup_mkdir, 5743 .rmdir = cgroup_rmdir, 5744 .show_path = cgroup_show_path, 5745 }; 5746 5747 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early) 5748 { 5749 struct cgroup_subsys_state *css; 5750 5751 pr_debug("Initializing cgroup subsys %s\n", ss->name); 5752 5753 mutex_lock(&cgroup_mutex); 5754 5755 idr_init(&ss->css_idr); 5756 INIT_LIST_HEAD(&ss->cfts); 5757 5758 /* Create the root cgroup state for this subsystem */ 5759 ss->root = &cgrp_dfl_root; 5760 css = ss->css_alloc(NULL); 5761 /* We don't handle early failures gracefully */ 5762 BUG_ON(IS_ERR(css)); 5763 init_and_link_css(css, ss, &cgrp_dfl_root.cgrp); 5764 5765 /* 5766 * Root csses are never destroyed and we can't initialize 5767 * percpu_ref during early init. Disable refcnting. 5768 */ 5769 css->flags |= CSS_NO_REF; 5770 5771 if (early) { 5772 /* allocation can't be done safely during early init */ 5773 css->id = 1; 5774 } else { 5775 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL); 5776 BUG_ON(css->id < 0); 5777 } 5778 5779 /* Update the init_css_set to contain a subsys 5780 * pointer to this state - since the subsystem is 5781 * newly registered, all tasks and hence the 5782 * init_css_set is in the subsystem's root cgroup. */ 5783 init_css_set.subsys[ss->id] = css; 5784 5785 have_fork_callback |= (bool)ss->fork << ss->id; 5786 have_exit_callback |= (bool)ss->exit << ss->id; 5787 have_release_callback |= (bool)ss->release << ss->id; 5788 have_canfork_callback |= (bool)ss->can_fork << ss->id; 5789 5790 /* At system boot, before all subsystems have been 5791 * registered, no tasks have been forked, so we don't 5792 * need to invoke fork callbacks here. */ 5793 BUG_ON(!list_empty(&init_task.tasks)); 5794 5795 BUG_ON(online_css(css)); 5796 5797 mutex_unlock(&cgroup_mutex); 5798 } 5799 5800 /** 5801 * cgroup_init_early - cgroup initialization at system boot 5802 * 5803 * Initialize cgroups at system boot, and initialize any 5804 * subsystems that request early init. 5805 */ 5806 int __init cgroup_init_early(void) 5807 { 5808 static struct cgroup_fs_context __initdata ctx; 5809 struct cgroup_subsys *ss; 5810 int i; 5811 5812 ctx.root = &cgrp_dfl_root; 5813 init_cgroup_root(&ctx); 5814 cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF; 5815 5816 RCU_INIT_POINTER(init_task.cgroups, &init_css_set); 5817 5818 for_each_subsys(ss, i) { 5819 WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, 5820 "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n", 5821 i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, 5822 ss->id, ss->name); 5823 WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, 5824 "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); 5825 5826 ss->id = i; 5827 ss->name = cgroup_subsys_name[i]; 5828 if (!ss->legacy_name) 5829 ss->legacy_name = cgroup_subsys_name[i]; 5830 5831 if (ss->early_init) 5832 cgroup_init_subsys(ss, true); 5833 } 5834 return 0; 5835 } 5836 5837 /** 5838 * cgroup_init - cgroup initialization 5839 * 5840 * Register cgroup filesystem and /proc file, and initialize 5841 * any subsystems that didn't request early init. 5842 */ 5843 int __init cgroup_init(void) 5844 { 5845 struct cgroup_subsys *ss; 5846 int ssid; 5847 5848 BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16); 5849 BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); 5850 BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); 5851 5852 cgroup_rstat_boot(); 5853 5854 /* 5855 * The latency of the synchronize_rcu() is too high for cgroups, 5856 * avoid it at the cost of forcing all readers into the slow path. 5857 */ 5858 rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss); 5859 5860 get_user_ns(init_cgroup_ns.user_ns); 5861 5862 mutex_lock(&cgroup_mutex); 5863 5864 /* 5865 * Add init_css_set to the hash table so that dfl_root can link to 5866 * it during init. 5867 */ 5868 hash_add(css_set_table, &init_css_set.hlist, 5869 css_set_hash(init_css_set.subsys)); 5870 5871 BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); 5872 5873 mutex_unlock(&cgroup_mutex); 5874 5875 for_each_subsys(ss, ssid) { 5876 if (ss->early_init) { 5877 struct cgroup_subsys_state *css = 5878 init_css_set.subsys[ss->id]; 5879 5880 css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, 5881 GFP_KERNEL); 5882 BUG_ON(css->id < 0); 5883 } else { 5884 cgroup_init_subsys(ss, false); 5885 } 5886 5887 list_add_tail(&init_css_set.e_cset_node[ssid], 5888 &cgrp_dfl_root.cgrp.e_csets[ssid]); 5889 5890 /* 5891 * Setting dfl_root subsys_mask needs to consider the 5892 * disabled flag and cftype registration needs kmalloc, 5893 * both of which aren't available during early_init. 5894 */ 5895 if (!cgroup_ssid_enabled(ssid)) 5896 continue; 5897 5898 if (cgroup1_ssid_disabled(ssid)) 5899 printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n", 5900 ss->name); 5901 5902 cgrp_dfl_root.subsys_mask |= 1 << ss->id; 5903 5904 /* implicit controllers must be threaded too */ 5905 WARN_ON(ss->implicit_on_dfl && !ss->threaded); 5906 5907 if (ss->implicit_on_dfl) 5908 cgrp_dfl_implicit_ss_mask |= 1 << ss->id; 5909 else if (!ss->dfl_cftypes) 5910 cgrp_dfl_inhibit_ss_mask |= 1 << ss->id; 5911 5912 if (ss->threaded) 5913 cgrp_dfl_threaded_ss_mask |= 1 << ss->id; 5914 5915 if (ss->dfl_cftypes == ss->legacy_cftypes) { 5916 WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes)); 5917 } else { 5918 WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes)); 5919 WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes)); 5920 } 5921 5922 if (ss->bind) 5923 ss->bind(init_css_set.subsys[ssid]); 5924 5925 mutex_lock(&cgroup_mutex); 5926 css_populate_dir(init_css_set.subsys[ssid]); 5927 mutex_unlock(&cgroup_mutex); 5928 } 5929 5930 /* init_css_set.subsys[] has been updated, re-hash */ 5931 hash_del(&init_css_set.hlist); 5932 hash_add(css_set_table, &init_css_set.hlist, 5933 css_set_hash(init_css_set.subsys)); 5934 5935 WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup")); 5936 WARN_ON(register_filesystem(&cgroup_fs_type)); 5937 WARN_ON(register_filesystem(&cgroup2_fs_type)); 5938 WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show)); 5939 #ifdef CONFIG_CPUSETS 5940 WARN_ON(register_filesystem(&cpuset_fs_type)); 5941 #endif 5942 5943 return 0; 5944 } 5945 5946 static int __init cgroup_wq_init(void) 5947 { 5948 /* 5949 * There isn't much point in executing destruction path in 5950 * parallel. Good chunk is serialized with cgroup_mutex anyway. 5951 * Use 1 for @max_active. 5952 * 5953 * We would prefer to do this in cgroup_init() above, but that 5954 * is called before init_workqueues(): so leave this until after. 5955 */ 5956 cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); 5957 BUG_ON(!cgroup_destroy_wq); 5958 return 0; 5959 } 5960 core_initcall(cgroup_wq_init); 5961 5962 void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) 5963 { 5964 struct kernfs_node *kn; 5965 5966 kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id); 5967 if (!kn) 5968 return; 5969 kernfs_path(kn, buf, buflen); 5970 kernfs_put(kn); 5971 } 5972 5973 /* 5974 * cgroup_get_from_id : get the cgroup associated with cgroup id 5975 * @id: cgroup id 5976 * On success return the cgrp, on failure return NULL 5977 */ 5978 struct cgroup *cgroup_get_from_id(u64 id) 5979 { 5980 struct kernfs_node *kn; 5981 struct cgroup *cgrp = NULL; 5982 5983 kn = kernfs_find_and_get_node_by_id(cgrp_dfl_root.kf_root, id); 5984 if (!kn) 5985 goto out; 5986 5987 rcu_read_lock(); 5988 5989 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 5990 if (cgrp && !cgroup_tryget(cgrp)) 5991 cgrp = NULL; 5992 5993 rcu_read_unlock(); 5994 5995 kernfs_put(kn); 5996 out: 5997 return cgrp; 5998 } 5999 EXPORT_SYMBOL_GPL(cgroup_get_from_id); 6000 6001 /* 6002 * proc_cgroup_show() 6003 * - Print task's cgroup paths into seq_file, one line for each hierarchy 6004 * - Used for /proc/<pid>/cgroup. 6005 */ 6006 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 6007 struct pid *pid, struct task_struct *tsk) 6008 { 6009 char *buf; 6010 int retval; 6011 struct cgroup_root *root; 6012 6013 retval = -ENOMEM; 6014 buf = kmalloc(PATH_MAX, GFP_KERNEL); 6015 if (!buf) 6016 goto out; 6017 6018 mutex_lock(&cgroup_mutex); 6019 spin_lock_irq(&css_set_lock); 6020 6021 for_each_root(root) { 6022 struct cgroup_subsys *ss; 6023 struct cgroup *cgrp; 6024 int ssid, count = 0; 6025 6026 if (root == &cgrp_dfl_root && !cgrp_dfl_visible) 6027 continue; 6028 6029 seq_printf(m, "%d:", root->hierarchy_id); 6030 if (root != &cgrp_dfl_root) 6031 for_each_subsys(ss, ssid) 6032 if (root->subsys_mask & (1 << ssid)) 6033 seq_printf(m, "%s%s", count++ ? "," : "", 6034 ss->legacy_name); 6035 if (strlen(root->name)) 6036 seq_printf(m, "%sname=%s", count ? "," : "", 6037 root->name); 6038 seq_putc(m, ':'); 6039 6040 cgrp = task_cgroup_from_root(tsk, root); 6041 6042 /* 6043 * On traditional hierarchies, all zombie tasks show up as 6044 * belonging to the root cgroup. On the default hierarchy, 6045 * while a zombie doesn't show up in "cgroup.procs" and 6046 * thus can't be migrated, its /proc/PID/cgroup keeps 6047 * reporting the cgroup it belonged to before exiting. If 6048 * the cgroup is removed before the zombie is reaped, 6049 * " (deleted)" is appended to the cgroup path. 6050 */ 6051 if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) { 6052 retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX, 6053 current->nsproxy->cgroup_ns); 6054 if (retval >= PATH_MAX) 6055 retval = -ENAMETOOLONG; 6056 if (retval < 0) 6057 goto out_unlock; 6058 6059 seq_puts(m, buf); 6060 } else { 6061 seq_puts(m, "/"); 6062 } 6063 6064 if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp)) 6065 seq_puts(m, " (deleted)\n"); 6066 else 6067 seq_putc(m, '\n'); 6068 } 6069 6070 retval = 0; 6071 out_unlock: 6072 spin_unlock_irq(&css_set_lock); 6073 mutex_unlock(&cgroup_mutex); 6074 kfree(buf); 6075 out: 6076 return retval; 6077 } 6078 6079 /** 6080 * cgroup_fork - initialize cgroup related fields during copy_process() 6081 * @child: pointer to task_struct of forking parent process. 6082 * 6083 * A task is associated with the init_css_set until cgroup_post_fork() 6084 * attaches it to the target css_set. 6085 */ 6086 void cgroup_fork(struct task_struct *child) 6087 { 6088 RCU_INIT_POINTER(child->cgroups, &init_css_set); 6089 INIT_LIST_HEAD(&child->cg_list); 6090 } 6091 6092 static struct cgroup *cgroup_get_from_file(struct file *f) 6093 { 6094 struct cgroup_subsys_state *css; 6095 struct cgroup *cgrp; 6096 6097 css = css_tryget_online_from_dir(f->f_path.dentry, NULL); 6098 if (IS_ERR(css)) 6099 return ERR_CAST(css); 6100 6101 cgrp = css->cgroup; 6102 if (!cgroup_on_dfl(cgrp)) { 6103 cgroup_put(cgrp); 6104 return ERR_PTR(-EBADF); 6105 } 6106 6107 return cgrp; 6108 } 6109 6110 /** 6111 * cgroup_css_set_fork - find or create a css_set for a child process 6112 * @kargs: the arguments passed to create the child process 6113 * 6114 * This functions finds or creates a new css_set which the child 6115 * process will be attached to in cgroup_post_fork(). By default, 6116 * the child process will be given the same css_set as its parent. 6117 * 6118 * If CLONE_INTO_CGROUP is specified this function will try to find an 6119 * existing css_set which includes the requested cgroup and if not create 6120 * a new css_set that the child will be attached to later. If this function 6121 * succeeds it will hold cgroup_threadgroup_rwsem on return. If 6122 * CLONE_INTO_CGROUP is requested this function will grab cgroup mutex 6123 * before grabbing cgroup_threadgroup_rwsem and will hold a reference 6124 * to the target cgroup. 6125 */ 6126 static int cgroup_css_set_fork(struct kernel_clone_args *kargs) 6127 __acquires(&cgroup_mutex) __acquires(&cgroup_threadgroup_rwsem) 6128 { 6129 int ret; 6130 struct cgroup *dst_cgrp = NULL; 6131 struct css_set *cset; 6132 struct super_block *sb; 6133 struct file *f; 6134 6135 if (kargs->flags & CLONE_INTO_CGROUP) 6136 mutex_lock(&cgroup_mutex); 6137 6138 cgroup_threadgroup_change_begin(current); 6139 6140 spin_lock_irq(&css_set_lock); 6141 cset = task_css_set(current); 6142 get_css_set(cset); 6143 spin_unlock_irq(&css_set_lock); 6144 6145 if (!(kargs->flags & CLONE_INTO_CGROUP)) { 6146 kargs->cset = cset; 6147 return 0; 6148 } 6149 6150 f = fget_raw(kargs->cgroup); 6151 if (!f) { 6152 ret = -EBADF; 6153 goto err; 6154 } 6155 sb = f->f_path.dentry->d_sb; 6156 6157 dst_cgrp = cgroup_get_from_file(f); 6158 if (IS_ERR(dst_cgrp)) { 6159 ret = PTR_ERR(dst_cgrp); 6160 dst_cgrp = NULL; 6161 goto err; 6162 } 6163 6164 if (cgroup_is_dead(dst_cgrp)) { 6165 ret = -ENODEV; 6166 goto err; 6167 } 6168 6169 /* 6170 * Verify that we the target cgroup is writable for us. This is 6171 * usually done by the vfs layer but since we're not going through 6172 * the vfs layer here we need to do it "manually". 6173 */ 6174 ret = cgroup_may_write(dst_cgrp, sb); 6175 if (ret) 6176 goto err; 6177 6178 /* 6179 * Spawning a task directly into a cgroup works by passing a file 6180 * descriptor to the target cgroup directory. This can even be an O_PATH 6181 * file descriptor. But it can never be a cgroup.procs file descriptor. 6182 * This was done on purpose so spawning into a cgroup could be 6183 * conceptualized as an atomic 6184 * 6185 * fd = openat(dfd_cgroup, "cgroup.procs", ...); 6186 * write(fd, <child-pid>, ...); 6187 * 6188 * sequence, i.e. it's a shorthand for the caller opening and writing 6189 * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us 6190 * to always use the caller's credentials. 6191 */ 6192 ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb, 6193 !(kargs->flags & CLONE_THREAD), 6194 current->nsproxy->cgroup_ns); 6195 if (ret) 6196 goto err; 6197 6198 kargs->cset = find_css_set(cset, dst_cgrp); 6199 if (!kargs->cset) { 6200 ret = -ENOMEM; 6201 goto err; 6202 } 6203 6204 put_css_set(cset); 6205 fput(f); 6206 kargs->cgrp = dst_cgrp; 6207 return ret; 6208 6209 err: 6210 cgroup_threadgroup_change_end(current); 6211 mutex_unlock(&cgroup_mutex); 6212 if (f) 6213 fput(f); 6214 if (dst_cgrp) 6215 cgroup_put(dst_cgrp); 6216 put_css_set(cset); 6217 if (kargs->cset) 6218 put_css_set(kargs->cset); 6219 return ret; 6220 } 6221 6222 /** 6223 * cgroup_css_set_put_fork - drop references we took during fork 6224 * @kargs: the arguments passed to create the child process 6225 * 6226 * Drop references to the prepared css_set and target cgroup if 6227 * CLONE_INTO_CGROUP was requested. 6228 */ 6229 static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs) 6230 __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) 6231 { 6232 cgroup_threadgroup_change_end(current); 6233 6234 if (kargs->flags & CLONE_INTO_CGROUP) { 6235 struct cgroup *cgrp = kargs->cgrp; 6236 struct css_set *cset = kargs->cset; 6237 6238 mutex_unlock(&cgroup_mutex); 6239 6240 if (cset) { 6241 put_css_set(cset); 6242 kargs->cset = NULL; 6243 } 6244 6245 if (cgrp) { 6246 cgroup_put(cgrp); 6247 kargs->cgrp = NULL; 6248 } 6249 } 6250 } 6251 6252 /** 6253 * cgroup_can_fork - called on a new task before the process is exposed 6254 * @child: the child process 6255 * @kargs: the arguments passed to create the child process 6256 * 6257 * This prepares a new css_set for the child process which the child will 6258 * be attached to in cgroup_post_fork(). 6259 * This calls the subsystem can_fork() callbacks. If the cgroup_can_fork() 6260 * callback returns an error, the fork aborts with that error code. This 6261 * allows for a cgroup subsystem to conditionally allow or deny new forks. 6262 */ 6263 int cgroup_can_fork(struct task_struct *child, struct kernel_clone_args *kargs) 6264 { 6265 struct cgroup_subsys *ss; 6266 int i, j, ret; 6267 6268 ret = cgroup_css_set_fork(kargs); 6269 if (ret) 6270 return ret; 6271 6272 do_each_subsys_mask(ss, i, have_canfork_callback) { 6273 ret = ss->can_fork(child, kargs->cset); 6274 if (ret) 6275 goto out_revert; 6276 } while_each_subsys_mask(); 6277 6278 return 0; 6279 6280 out_revert: 6281 for_each_subsys(ss, j) { 6282 if (j >= i) 6283 break; 6284 if (ss->cancel_fork) 6285 ss->cancel_fork(child, kargs->cset); 6286 } 6287 6288 cgroup_css_set_put_fork(kargs); 6289 6290 return ret; 6291 } 6292 6293 /** 6294 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork() 6295 * @child: the child process 6296 * @kargs: the arguments passed to create the child process 6297 * 6298 * This calls the cancel_fork() callbacks if a fork failed *after* 6299 * cgroup_can_fork() succeeded and cleans up references we took to 6300 * prepare a new css_set for the child process in cgroup_can_fork(). 6301 */ 6302 void cgroup_cancel_fork(struct task_struct *child, 6303 struct kernel_clone_args *kargs) 6304 { 6305 struct cgroup_subsys *ss; 6306 int i; 6307 6308 for_each_subsys(ss, i) 6309 if (ss->cancel_fork) 6310 ss->cancel_fork(child, kargs->cset); 6311 6312 cgroup_css_set_put_fork(kargs); 6313 } 6314 6315 /** 6316 * cgroup_post_fork - finalize cgroup setup for the child process 6317 * @child: the child process 6318 * @kargs: the arguments passed to create the child process 6319 * 6320 * Attach the child process to its css_set calling the subsystem fork() 6321 * callbacks. 6322 */ 6323 void cgroup_post_fork(struct task_struct *child, 6324 struct kernel_clone_args *kargs) 6325 __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) 6326 { 6327 unsigned long cgrp_flags = 0; 6328 bool kill = false; 6329 struct cgroup_subsys *ss; 6330 struct css_set *cset; 6331 int i; 6332 6333 cset = kargs->cset; 6334 kargs->cset = NULL; 6335 6336 spin_lock_irq(&css_set_lock); 6337 6338 /* init tasks are special, only link regular threads */ 6339 if (likely(child->pid)) { 6340 if (kargs->cgrp) 6341 cgrp_flags = kargs->cgrp->flags; 6342 else 6343 cgrp_flags = cset->dfl_cgrp->flags; 6344 6345 WARN_ON_ONCE(!list_empty(&child->cg_list)); 6346 cset->nr_tasks++; 6347 css_set_move_task(child, NULL, cset, false); 6348 } else { 6349 put_css_set(cset); 6350 cset = NULL; 6351 } 6352 6353 if (!(child->flags & PF_KTHREAD)) { 6354 if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) { 6355 /* 6356 * If the cgroup has to be frozen, the new task has 6357 * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to 6358 * get the task into the frozen state. 6359 */ 6360 spin_lock(&child->sighand->siglock); 6361 WARN_ON_ONCE(child->frozen); 6362 child->jobctl |= JOBCTL_TRAP_FREEZE; 6363 spin_unlock(&child->sighand->siglock); 6364 6365 /* 6366 * Calling cgroup_update_frozen() isn't required here, 6367 * because it will be called anyway a bit later from 6368 * do_freezer_trap(). So we avoid cgroup's transient 6369 * switch from the frozen state and back. 6370 */ 6371 } 6372 6373 /* 6374 * If the cgroup is to be killed notice it now and take the 6375 * child down right after we finished preparing it for 6376 * userspace. 6377 */ 6378 kill = test_bit(CGRP_KILL, &cgrp_flags); 6379 } 6380 6381 spin_unlock_irq(&css_set_lock); 6382 6383 /* 6384 * Call ss->fork(). This must happen after @child is linked on 6385 * css_set; otherwise, @child might change state between ->fork() 6386 * and addition to css_set. 6387 */ 6388 do_each_subsys_mask(ss, i, have_fork_callback) { 6389 ss->fork(child); 6390 } while_each_subsys_mask(); 6391 6392 /* Make the new cset the root_cset of the new cgroup namespace. */ 6393 if (kargs->flags & CLONE_NEWCGROUP) { 6394 struct css_set *rcset = child->nsproxy->cgroup_ns->root_cset; 6395 6396 get_css_set(cset); 6397 child->nsproxy->cgroup_ns->root_cset = cset; 6398 put_css_set(rcset); 6399 } 6400 6401 /* Cgroup has to be killed so take down child immediately. */ 6402 if (unlikely(kill)) 6403 do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID); 6404 6405 cgroup_css_set_put_fork(kargs); 6406 } 6407 6408 /** 6409 * cgroup_exit - detach cgroup from exiting task 6410 * @tsk: pointer to task_struct of exiting process 6411 * 6412 * Description: Detach cgroup from @tsk. 6413 * 6414 */ 6415 void cgroup_exit(struct task_struct *tsk) 6416 { 6417 struct cgroup_subsys *ss; 6418 struct css_set *cset; 6419 int i; 6420 6421 spin_lock_irq(&css_set_lock); 6422 6423 WARN_ON_ONCE(list_empty(&tsk->cg_list)); 6424 cset = task_css_set(tsk); 6425 css_set_move_task(tsk, cset, NULL, false); 6426 list_add_tail(&tsk->cg_list, &cset->dying_tasks); 6427 cset->nr_tasks--; 6428 6429 WARN_ON_ONCE(cgroup_task_frozen(tsk)); 6430 if (unlikely(!(tsk->flags & PF_KTHREAD) && 6431 test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags))) 6432 cgroup_update_frozen(task_dfl_cgroup(tsk)); 6433 6434 spin_unlock_irq(&css_set_lock); 6435 6436 /* see cgroup_post_fork() for details */ 6437 do_each_subsys_mask(ss, i, have_exit_callback) { 6438 ss->exit(tsk); 6439 } while_each_subsys_mask(); 6440 } 6441 6442 void cgroup_release(struct task_struct *task) 6443 { 6444 struct cgroup_subsys *ss; 6445 int ssid; 6446 6447 do_each_subsys_mask(ss, ssid, have_release_callback) { 6448 ss->release(task); 6449 } while_each_subsys_mask(); 6450 6451 spin_lock_irq(&css_set_lock); 6452 css_set_skip_task_iters(task_css_set(task), task); 6453 list_del_init(&task->cg_list); 6454 spin_unlock_irq(&css_set_lock); 6455 } 6456 6457 void cgroup_free(struct task_struct *task) 6458 { 6459 struct css_set *cset = task_css_set(task); 6460 put_css_set(cset); 6461 } 6462 6463 static int __init cgroup_disable(char *str) 6464 { 6465 struct cgroup_subsys *ss; 6466 char *token; 6467 int i; 6468 6469 while ((token = strsep(&str, ",")) != NULL) { 6470 if (!*token) 6471 continue; 6472 6473 for_each_subsys(ss, i) { 6474 if (strcmp(token, ss->name) && 6475 strcmp(token, ss->legacy_name)) 6476 continue; 6477 6478 static_branch_disable(cgroup_subsys_enabled_key[i]); 6479 pr_info("Disabling %s control group subsystem\n", 6480 ss->name); 6481 } 6482 6483 for (i = 0; i < OPT_FEATURE_COUNT; i++) { 6484 if (strcmp(token, cgroup_opt_feature_names[i])) 6485 continue; 6486 cgroup_feature_disable_mask |= 1 << i; 6487 pr_info("Disabling %s control group feature\n", 6488 cgroup_opt_feature_names[i]); 6489 break; 6490 } 6491 } 6492 return 1; 6493 } 6494 __setup("cgroup_disable=", cgroup_disable); 6495 6496 void __init __weak enable_debug_cgroup(void) { } 6497 6498 static int __init enable_cgroup_debug(char *str) 6499 { 6500 cgroup_debug = true; 6501 enable_debug_cgroup(); 6502 return 1; 6503 } 6504 __setup("cgroup_debug", enable_cgroup_debug); 6505 6506 /** 6507 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry 6508 * @dentry: directory dentry of interest 6509 * @ss: subsystem of interest 6510 * 6511 * If @dentry is a directory for a cgroup which has @ss enabled on it, try 6512 * to get the corresponding css and return it. If such css doesn't exist 6513 * or can't be pinned, an ERR_PTR value is returned. 6514 */ 6515 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 6516 struct cgroup_subsys *ss) 6517 { 6518 struct kernfs_node *kn = kernfs_node_from_dentry(dentry); 6519 struct file_system_type *s_type = dentry->d_sb->s_type; 6520 struct cgroup_subsys_state *css = NULL; 6521 struct cgroup *cgrp; 6522 6523 /* is @dentry a cgroup dir? */ 6524 if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) || 6525 !kn || kernfs_type(kn) != KERNFS_DIR) 6526 return ERR_PTR(-EBADF); 6527 6528 rcu_read_lock(); 6529 6530 /* 6531 * This path doesn't originate from kernfs and @kn could already 6532 * have been or be removed at any point. @kn->priv is RCU 6533 * protected for this access. See css_release_work_fn() for details. 6534 */ 6535 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 6536 if (cgrp) 6537 css = cgroup_css(cgrp, ss); 6538 6539 if (!css || !css_tryget_online(css)) 6540 css = ERR_PTR(-ENOENT); 6541 6542 rcu_read_unlock(); 6543 return css; 6544 } 6545 6546 /** 6547 * css_from_id - lookup css by id 6548 * @id: the cgroup id 6549 * @ss: cgroup subsys to be looked into 6550 * 6551 * Returns the css if there's valid one with @id, otherwise returns NULL. 6552 * Should be called under rcu_read_lock(). 6553 */ 6554 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) 6555 { 6556 WARN_ON_ONCE(!rcu_read_lock_held()); 6557 return idr_find(&ss->css_idr, id); 6558 } 6559 6560 /** 6561 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path 6562 * @path: path on the default hierarchy 6563 * 6564 * Find the cgroup at @path on the default hierarchy, increment its 6565 * reference count and return it. Returns pointer to the found cgroup on 6566 * success, ERR_PTR(-ENOENT) if @path doesn't exist or if the cgroup has already 6567 * been released and ERR_PTR(-ENOTDIR) if @path points to a non-directory. 6568 */ 6569 struct cgroup *cgroup_get_from_path(const char *path) 6570 { 6571 struct kernfs_node *kn; 6572 struct cgroup *cgrp = ERR_PTR(-ENOENT); 6573 6574 kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path); 6575 if (!kn) 6576 goto out; 6577 6578 if (kernfs_type(kn) != KERNFS_DIR) { 6579 cgrp = ERR_PTR(-ENOTDIR); 6580 goto out_kernfs; 6581 } 6582 6583 rcu_read_lock(); 6584 6585 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv); 6586 if (!cgrp || !cgroup_tryget(cgrp)) 6587 cgrp = ERR_PTR(-ENOENT); 6588 6589 rcu_read_unlock(); 6590 6591 out_kernfs: 6592 kernfs_put(kn); 6593 out: 6594 return cgrp; 6595 } 6596 EXPORT_SYMBOL_GPL(cgroup_get_from_path); 6597 6598 /** 6599 * cgroup_get_from_fd - get a cgroup pointer from a fd 6600 * @fd: fd obtained by open(cgroup2_dir) 6601 * 6602 * Find the cgroup from a fd which should be obtained 6603 * by opening a cgroup directory. Returns a pointer to the 6604 * cgroup on success. ERR_PTR is returned if the cgroup 6605 * cannot be found. 6606 */ 6607 struct cgroup *cgroup_get_from_fd(int fd) 6608 { 6609 struct cgroup *cgrp; 6610 struct file *f; 6611 6612 f = fget_raw(fd); 6613 if (!f) 6614 return ERR_PTR(-EBADF); 6615 6616 cgrp = cgroup_get_from_file(f); 6617 fput(f); 6618 return cgrp; 6619 } 6620 EXPORT_SYMBOL_GPL(cgroup_get_from_fd); 6621 6622 static u64 power_of_ten(int power) 6623 { 6624 u64 v = 1; 6625 while (power--) 6626 v *= 10; 6627 return v; 6628 } 6629 6630 /** 6631 * cgroup_parse_float - parse a floating number 6632 * @input: input string 6633 * @dec_shift: number of decimal digits to shift 6634 * @v: output 6635 * 6636 * Parse a decimal floating point number in @input and store the result in 6637 * @v with decimal point right shifted @dec_shift times. For example, if 6638 * @input is "12.3456" and @dec_shift is 3, *@v will be set to 12345. 6639 * Returns 0 on success, -errno otherwise. 6640 * 6641 * There's nothing cgroup specific about this function except that it's 6642 * currently the only user. 6643 */ 6644 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v) 6645 { 6646 s64 whole, frac = 0; 6647 int fstart = 0, fend = 0, flen; 6648 6649 if (!sscanf(input, "%lld.%n%lld%n", &whole, &fstart, &frac, &fend)) 6650 return -EINVAL; 6651 if (frac < 0) 6652 return -EINVAL; 6653 6654 flen = fend > fstart ? fend - fstart : 0; 6655 if (flen < dec_shift) 6656 frac *= power_of_ten(dec_shift - flen); 6657 else 6658 frac = DIV_ROUND_CLOSEST_ULL(frac, power_of_ten(flen - dec_shift)); 6659 6660 *v = whole * power_of_ten(dec_shift) + frac; 6661 return 0; 6662 } 6663 6664 /* 6665 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 6666 * definition in cgroup-defs.h. 6667 */ 6668 #ifdef CONFIG_SOCK_CGROUP_DATA 6669 6670 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 6671 { 6672 struct cgroup *cgroup; 6673 6674 rcu_read_lock(); 6675 /* Don't associate the sock with unrelated interrupted task's cgroup. */ 6676 if (in_interrupt()) { 6677 cgroup = &cgrp_dfl_root.cgrp; 6678 cgroup_get(cgroup); 6679 goto out; 6680 } 6681 6682 while (true) { 6683 struct css_set *cset; 6684 6685 cset = task_css_set(current); 6686 if (likely(cgroup_tryget(cset->dfl_cgrp))) { 6687 cgroup = cset->dfl_cgrp; 6688 break; 6689 } 6690 cpu_relax(); 6691 } 6692 out: 6693 skcd->cgroup = cgroup; 6694 cgroup_bpf_get(cgroup); 6695 rcu_read_unlock(); 6696 } 6697 6698 void cgroup_sk_clone(struct sock_cgroup_data *skcd) 6699 { 6700 struct cgroup *cgrp = sock_cgroup_ptr(skcd); 6701 6702 /* 6703 * We might be cloning a socket which is left in an empty 6704 * cgroup and the cgroup might have already been rmdir'd. 6705 * Don't use cgroup_get_live(). 6706 */ 6707 cgroup_get(cgrp); 6708 cgroup_bpf_get(cgrp); 6709 } 6710 6711 void cgroup_sk_free(struct sock_cgroup_data *skcd) 6712 { 6713 struct cgroup *cgrp = sock_cgroup_ptr(skcd); 6714 6715 cgroup_bpf_put(cgrp); 6716 cgroup_put(cgrp); 6717 } 6718 6719 #endif /* CONFIG_SOCK_CGROUP_DATA */ 6720 6721 #ifdef CONFIG_SYSFS 6722 static ssize_t show_delegatable_files(struct cftype *files, char *buf, 6723 ssize_t size, const char *prefix) 6724 { 6725 struct cftype *cft; 6726 ssize_t ret = 0; 6727 6728 for (cft = files; cft && cft->name[0] != '\0'; cft++) { 6729 if (!(cft->flags & CFTYPE_NS_DELEGATABLE)) 6730 continue; 6731 6732 if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled()) 6733 continue; 6734 6735 if (prefix) 6736 ret += snprintf(buf + ret, size - ret, "%s.", prefix); 6737 6738 ret += snprintf(buf + ret, size - ret, "%s\n", cft->name); 6739 6740 if (WARN_ON(ret >= size)) 6741 break; 6742 } 6743 6744 return ret; 6745 } 6746 6747 static ssize_t delegate_show(struct kobject *kobj, struct kobj_attribute *attr, 6748 char *buf) 6749 { 6750 struct cgroup_subsys *ss; 6751 int ssid; 6752 ssize_t ret = 0; 6753 6754 ret = show_delegatable_files(cgroup_base_files, buf, PAGE_SIZE - ret, 6755 NULL); 6756 6757 for_each_subsys(ss, ssid) 6758 ret += show_delegatable_files(ss->dfl_cftypes, buf + ret, 6759 PAGE_SIZE - ret, 6760 cgroup_subsys_name[ssid]); 6761 6762 return ret; 6763 } 6764 static struct kobj_attribute cgroup_delegate_attr = __ATTR_RO(delegate); 6765 6766 static ssize_t features_show(struct kobject *kobj, struct kobj_attribute *attr, 6767 char *buf) 6768 { 6769 return snprintf(buf, PAGE_SIZE, 6770 "nsdelegate\n" 6771 "memory_localevents\n" 6772 "memory_recursiveprot\n"); 6773 } 6774 static struct kobj_attribute cgroup_features_attr = __ATTR_RO(features); 6775 6776 static struct attribute *cgroup_sysfs_attrs[] = { 6777 &cgroup_delegate_attr.attr, 6778 &cgroup_features_attr.attr, 6779 NULL, 6780 }; 6781 6782 static const struct attribute_group cgroup_sysfs_attr_group = { 6783 .attrs = cgroup_sysfs_attrs, 6784 .name = "cgroup", 6785 }; 6786 6787 static int __init cgroup_sysfs_init(void) 6788 { 6789 return sysfs_create_group(kernel_kobj, &cgroup_sysfs_attr_group); 6790 } 6791 subsys_initcall(cgroup_sysfs_init); 6792 6793 #endif /* CONFIG_SYSFS */ 6794