1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_CGROUP_H 3 #define _LINUX_CGROUP_H 4 /* 5 * cgroup interface 6 * 7 * Copyright (C) 2003 BULL SA 8 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 * 10 */ 11 12 #include <linux/sched.h> 13 #include <linux/cpumask.h> 14 #include <linux/nodemask.h> 15 #include <linux/rculist.h> 16 #include <linux/cgroupstats.h> 17 #include <linux/fs.h> 18 #include <linux/seq_file.h> 19 #include <linux/kernfs.h> 20 #include <linux/jump_label.h> 21 #include <linux/types.h> 22 #include <linux/ns_common.h> 23 #include <linux/nsproxy.h> 24 #include <linux/user_namespace.h> 25 #include <linux/refcount.h> 26 #include <linux/kernel_stat.h> 27 28 #include <linux/cgroup-defs.h> 29 30 #ifdef CONFIG_CGROUPS 31 32 /* 33 * All weight knobs on the default hierarhcy should use the following min, 34 * default and max values. The default value is the logarithmic center of 35 * MIN and MAX and allows 100x to be expressed in both directions. 36 */ 37 #define CGROUP_WEIGHT_MIN 1 38 #define CGROUP_WEIGHT_DFL 100 39 #define CGROUP_WEIGHT_MAX 10000 40 41 /* walk only threadgroup leaders */ 42 #define CSS_TASK_ITER_PROCS (1U << 0) 43 /* walk all threaded css_sets in the domain */ 44 #define CSS_TASK_ITER_THREADED (1U << 1) 45 46 /* internal flags */ 47 #define CSS_TASK_ITER_SKIPPED (1U << 16) 48 49 /* a css_task_iter should be treated as an opaque object */ 50 struct css_task_iter { 51 struct cgroup_subsys *ss; 52 unsigned int flags; 53 54 struct list_head *cset_pos; 55 struct list_head *cset_head; 56 57 struct list_head *tcset_pos; 58 struct list_head *tcset_head; 59 60 struct list_head *task_pos; 61 struct list_head *tasks_head; 62 struct list_head *mg_tasks_head; 63 struct list_head *dying_tasks_head; 64 65 struct list_head *cur_tasks_head; 66 struct css_set *cur_cset; 67 struct css_set *cur_dcset; 68 struct task_struct *cur_task; 69 struct list_head iters_node; /* css_set->task_iters */ 70 }; 71 72 extern struct cgroup_root cgrp_dfl_root; 73 extern struct css_set init_css_set; 74 75 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; 76 #include <linux/cgroup_subsys.h> 77 #undef SUBSYS 78 79 #define SUBSYS(_x) \ 80 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ 81 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; 82 #include <linux/cgroup_subsys.h> 83 #undef SUBSYS 84 85 /** 86 * cgroup_subsys_enabled - fast test on whether a subsys is enabled 87 * @ss: subsystem in question 88 */ 89 #define cgroup_subsys_enabled(ss) \ 90 static_branch_likely(&ss ## _enabled_key) 91 92 /** 93 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy 94 * @ss: subsystem in question 95 */ 96 #define cgroup_subsys_on_dfl(ss) \ 97 static_branch_likely(&ss ## _on_dfl_key) 98 99 bool css_has_online_children(struct cgroup_subsys_state *css); 100 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); 101 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, 102 struct cgroup_subsys *ss); 103 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, 104 struct cgroup_subsys *ss); 105 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 106 struct cgroup_subsys *ss); 107 108 struct cgroup *cgroup_get_from_path(const char *path); 109 struct cgroup *cgroup_get_from_fd(int fd); 110 111 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 112 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 113 114 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 115 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 116 int cgroup_rm_cftypes(struct cftype *cfts); 117 void cgroup_file_notify(struct cgroup_file *cfile); 118 119 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 120 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 121 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 122 struct pid *pid, struct task_struct *tsk); 123 124 void cgroup_fork(struct task_struct *p); 125 extern int cgroup_can_fork(struct task_struct *p); 126 extern void cgroup_cancel_fork(struct task_struct *p); 127 extern void cgroup_post_fork(struct task_struct *p); 128 void cgroup_exit(struct task_struct *p); 129 void cgroup_release(struct task_struct *p); 130 void cgroup_free(struct task_struct *p); 131 132 int cgroup_init_early(void); 133 int cgroup_init(void); 134 135 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); 136 137 /* 138 * Iteration helpers and macros. 139 */ 140 141 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 142 struct cgroup_subsys_state *parent); 143 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, 144 struct cgroup_subsys_state *css); 145 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); 146 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 147 struct cgroup_subsys_state *css); 148 149 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 150 struct cgroup_subsys_state **dst_cssp); 151 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 152 struct cgroup_subsys_state **dst_cssp); 153 154 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 155 struct css_task_iter *it); 156 struct task_struct *css_task_iter_next(struct css_task_iter *it); 157 void css_task_iter_end(struct css_task_iter *it); 158 159 /** 160 * css_for_each_child - iterate through children of a css 161 * @pos: the css * to use as the loop cursor 162 * @parent: css whose children to walk 163 * 164 * Walk @parent's children. Must be called under rcu_read_lock(). 165 * 166 * If a subsystem synchronizes ->css_online() and the start of iteration, a 167 * css which finished ->css_online() is guaranteed to be visible in the 168 * future iterations and will stay visible until the last reference is put. 169 * A css which hasn't finished ->css_online() or already finished 170 * ->css_offline() may show up during traversal. It's each subsystem's 171 * responsibility to synchronize against on/offlining. 172 * 173 * It is allowed to temporarily drop RCU read lock during iteration. The 174 * caller is responsible for ensuring that @pos remains accessible until 175 * the start of the next iteration by, for example, bumping the css refcnt. 176 */ 177 #define css_for_each_child(pos, parent) \ 178 for ((pos) = css_next_child(NULL, (parent)); (pos); \ 179 (pos) = css_next_child((pos), (parent))) 180 181 /** 182 * css_for_each_descendant_pre - pre-order walk of a css's descendants 183 * @pos: the css * to use as the loop cursor 184 * @root: css whose descendants to walk 185 * 186 * Walk @root's descendants. @root is included in the iteration and the 187 * first node to be visited. Must be called under rcu_read_lock(). 188 * 189 * If a subsystem synchronizes ->css_online() and the start of iteration, a 190 * css which finished ->css_online() is guaranteed to be visible in the 191 * future iterations and will stay visible until the last reference is put. 192 * A css which hasn't finished ->css_online() or already finished 193 * ->css_offline() may show up during traversal. It's each subsystem's 194 * responsibility to synchronize against on/offlining. 195 * 196 * For example, the following guarantees that a descendant can't escape 197 * state updates of its ancestors. 198 * 199 * my_online(@css) 200 * { 201 * Lock @css's parent and @css; 202 * Inherit state from the parent; 203 * Unlock both. 204 * } 205 * 206 * my_update_state(@css) 207 * { 208 * css_for_each_descendant_pre(@pos, @css) { 209 * Lock @pos; 210 * if (@pos == @css) 211 * Update @css's state; 212 * else 213 * Verify @pos is alive and inherit state from its parent; 214 * Unlock @pos; 215 * } 216 * } 217 * 218 * As long as the inheriting step, including checking the parent state, is 219 * enclosed inside @pos locking, double-locking the parent isn't necessary 220 * while inheriting. The state update to the parent is guaranteed to be 221 * visible by walking order and, as long as inheriting operations to the 222 * same @pos are atomic to each other, multiple updates racing each other 223 * still result in the correct state. It's guaranateed that at least one 224 * inheritance happens for any css after the latest update to its parent. 225 * 226 * If checking parent's state requires locking the parent, each inheriting 227 * iteration should lock and unlock both @pos->parent and @pos. 228 * 229 * Alternatively, a subsystem may choose to use a single global lock to 230 * synchronize ->css_online() and ->css_offline() against tree-walking 231 * operations. 232 * 233 * It is allowed to temporarily drop RCU read lock during iteration. The 234 * caller is responsible for ensuring that @pos remains accessible until 235 * the start of the next iteration by, for example, bumping the css refcnt. 236 */ 237 #define css_for_each_descendant_pre(pos, css) \ 238 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ 239 (pos) = css_next_descendant_pre((pos), (css))) 240 241 /** 242 * css_for_each_descendant_post - post-order walk of a css's descendants 243 * @pos: the css * to use as the loop cursor 244 * @css: css whose descendants to walk 245 * 246 * Similar to css_for_each_descendant_pre() but performs post-order 247 * traversal instead. @root is included in the iteration and the last 248 * node to be visited. 249 * 250 * If a subsystem synchronizes ->css_online() and the start of iteration, a 251 * css which finished ->css_online() is guaranteed to be visible in the 252 * future iterations and will stay visible until the last reference is put. 253 * A css which hasn't finished ->css_online() or already finished 254 * ->css_offline() may show up during traversal. It's each subsystem's 255 * responsibility to synchronize against on/offlining. 256 * 257 * Note that the walk visibility guarantee example described in pre-order 258 * walk doesn't apply the same to post-order walks. 259 */ 260 #define css_for_each_descendant_post(pos, css) \ 261 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 262 (pos) = css_next_descendant_post((pos), (css))) 263 264 /** 265 * cgroup_taskset_for_each - iterate cgroup_taskset 266 * @task: the loop cursor 267 * @dst_css: the destination css 268 * @tset: taskset to iterate 269 * 270 * @tset may contain multiple tasks and they may belong to multiple 271 * processes. 272 * 273 * On the v2 hierarchy, there may be tasks from multiple processes and they 274 * may not share the source or destination csses. 275 * 276 * On traditional hierarchies, when there are multiple tasks in @tset, if a 277 * task of a process is in @tset, all tasks of the process are in @tset. 278 * Also, all are guaranteed to share the same source and destination csses. 279 * 280 * Iteration is not in any specific order. 281 */ 282 #define cgroup_taskset_for_each(task, dst_css, tset) \ 283 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ 284 (task); \ 285 (task) = cgroup_taskset_next((tset), &(dst_css))) 286 287 /** 288 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 289 * @leader: the loop cursor 290 * @dst_css: the destination css 291 * @tset: taskset to iterate 292 * 293 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 294 * may not contain any. 295 */ 296 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ 297 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ 298 (leader); \ 299 (leader) = cgroup_taskset_next((tset), &(dst_css))) \ 300 if ((leader) != (leader)->group_leader) \ 301 ; \ 302 else 303 304 /* 305 * Inline functions. 306 */ 307 308 static inline u64 cgroup_id(struct cgroup *cgrp) 309 { 310 return cgrp->kn->id; 311 } 312 313 /** 314 * css_get - obtain a reference on the specified css 315 * @css: target css 316 * 317 * The caller must already have a reference. 318 */ 319 static inline void css_get(struct cgroup_subsys_state *css) 320 { 321 if (!(css->flags & CSS_NO_REF)) 322 percpu_ref_get(&css->refcnt); 323 } 324 325 /** 326 * css_get_many - obtain references on the specified css 327 * @css: target css 328 * @n: number of references to get 329 * 330 * The caller must already have a reference. 331 */ 332 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) 333 { 334 if (!(css->flags & CSS_NO_REF)) 335 percpu_ref_get_many(&css->refcnt, n); 336 } 337 338 /** 339 * css_tryget - try to obtain a reference on the specified css 340 * @css: target css 341 * 342 * Obtain a reference on @css unless it already has reached zero and is 343 * being released. This function doesn't care whether @css is on or 344 * offline. The caller naturally needs to ensure that @css is accessible 345 * but doesn't have to be holding a reference on it - IOW, RCU protected 346 * access is good enough for this function. Returns %true if a reference 347 * count was successfully obtained; %false otherwise. 348 */ 349 static inline bool css_tryget(struct cgroup_subsys_state *css) 350 { 351 if (!(css->flags & CSS_NO_REF)) 352 return percpu_ref_tryget(&css->refcnt); 353 return true; 354 } 355 356 /** 357 * css_tryget_online - try to obtain a reference on the specified css if online 358 * @css: target css 359 * 360 * Obtain a reference on @css if it's online. The caller naturally needs 361 * to ensure that @css is accessible but doesn't have to be holding a 362 * reference on it - IOW, RCU protected access is good enough for this 363 * function. Returns %true if a reference count was successfully obtained; 364 * %false otherwise. 365 */ 366 static inline bool css_tryget_online(struct cgroup_subsys_state *css) 367 { 368 if (!(css->flags & CSS_NO_REF)) 369 return percpu_ref_tryget_live(&css->refcnt); 370 return true; 371 } 372 373 /** 374 * css_is_dying - test whether the specified css is dying 375 * @css: target css 376 * 377 * Test whether @css is in the process of offlining or already offline. In 378 * most cases, ->css_online() and ->css_offline() callbacks should be 379 * enough; however, the actual offline operations are RCU delayed and this 380 * test returns %true also when @css is scheduled to be offlined. 381 * 382 * This is useful, for example, when the use case requires synchronous 383 * behavior with respect to cgroup removal. cgroup removal schedules css 384 * offlining but the css can seem alive while the operation is being 385 * delayed. If the delay affects user visible semantics, this test can be 386 * used to resolve the situation. 387 */ 388 static inline bool css_is_dying(struct cgroup_subsys_state *css) 389 { 390 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); 391 } 392 393 /** 394 * css_put - put a css reference 395 * @css: target css 396 * 397 * Put a reference obtained via css_get() and css_tryget_online(). 398 */ 399 static inline void css_put(struct cgroup_subsys_state *css) 400 { 401 if (!(css->flags & CSS_NO_REF)) 402 percpu_ref_put(&css->refcnt); 403 } 404 405 /** 406 * css_put_many - put css references 407 * @css: target css 408 * @n: number of references to put 409 * 410 * Put references obtained via css_get() and css_tryget_online(). 411 */ 412 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) 413 { 414 if (!(css->flags & CSS_NO_REF)) 415 percpu_ref_put_many(&css->refcnt, n); 416 } 417 418 static inline void cgroup_get(struct cgroup *cgrp) 419 { 420 css_get(&cgrp->self); 421 } 422 423 static inline bool cgroup_tryget(struct cgroup *cgrp) 424 { 425 return css_tryget(&cgrp->self); 426 } 427 428 static inline void cgroup_put(struct cgroup *cgrp) 429 { 430 css_put(&cgrp->self); 431 } 432 433 /** 434 * task_css_set_check - obtain a task's css_set with extra access conditions 435 * @task: the task to obtain css_set for 436 * @__c: extra condition expression to be passed to rcu_dereference_check() 437 * 438 * A task's css_set is RCU protected, initialized and exited while holding 439 * task_lock(), and can only be modified while holding both cgroup_mutex 440 * and task_lock() while the task is alive. This macro verifies that the 441 * caller is inside proper critical section and returns @task's css_set. 442 * 443 * The caller can also specify additional allowed conditions via @__c, such 444 * as locks used during the cgroup_subsys::attach() methods. 445 */ 446 #ifdef CONFIG_PROVE_RCU 447 extern struct mutex cgroup_mutex; 448 extern spinlock_t css_set_lock; 449 #define task_css_set_check(task, __c) \ 450 rcu_dereference_check((task)->cgroups, \ 451 lockdep_is_held(&cgroup_mutex) || \ 452 lockdep_is_held(&css_set_lock) || \ 453 ((task)->flags & PF_EXITING) || (__c)) 454 #else 455 #define task_css_set_check(task, __c) \ 456 rcu_dereference((task)->cgroups) 457 #endif 458 459 /** 460 * task_css_check - obtain css for (task, subsys) w/ extra access conds 461 * @task: the target task 462 * @subsys_id: the target subsystem ID 463 * @__c: extra condition expression to be passed to rcu_dereference_check() 464 * 465 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The 466 * synchronization rules are the same as task_css_set_check(). 467 */ 468 #define task_css_check(task, subsys_id, __c) \ 469 task_css_set_check((task), (__c))->subsys[(subsys_id)] 470 471 /** 472 * task_css_set - obtain a task's css_set 473 * @task: the task to obtain css_set for 474 * 475 * See task_css_set_check(). 476 */ 477 static inline struct css_set *task_css_set(struct task_struct *task) 478 { 479 return task_css_set_check(task, false); 480 } 481 482 /** 483 * task_css - obtain css for (task, subsys) 484 * @task: the target task 485 * @subsys_id: the target subsystem ID 486 * 487 * See task_css_check(). 488 */ 489 static inline struct cgroup_subsys_state *task_css(struct task_struct *task, 490 int subsys_id) 491 { 492 return task_css_check(task, subsys_id, false); 493 } 494 495 /** 496 * task_get_css - find and get the css for (task, subsys) 497 * @task: the target task 498 * @subsys_id: the target subsystem ID 499 * 500 * Find the css for the (@task, @subsys_id) combination, increment a 501 * reference on and return it. This function is guaranteed to return a 502 * valid css. The returned css may already have been offlined. 503 */ 504 static inline struct cgroup_subsys_state * 505 task_get_css(struct task_struct *task, int subsys_id) 506 { 507 struct cgroup_subsys_state *css; 508 509 rcu_read_lock(); 510 while (true) { 511 css = task_css(task, subsys_id); 512 /* 513 * Can't use css_tryget_online() here. A task which has 514 * PF_EXITING set may stay associated with an offline css. 515 * If such task calls this function, css_tryget_online() 516 * will keep failing. 517 */ 518 if (likely(css_tryget(css))) 519 break; 520 cpu_relax(); 521 } 522 rcu_read_unlock(); 523 return css; 524 } 525 526 /** 527 * task_css_is_root - test whether a task belongs to the root css 528 * @task: the target task 529 * @subsys_id: the target subsystem ID 530 * 531 * Test whether @task belongs to the root css on the specified subsystem. 532 * May be invoked in any context. 533 */ 534 static inline bool task_css_is_root(struct task_struct *task, int subsys_id) 535 { 536 return task_css_check(task, subsys_id, true) == 537 init_css_set.subsys[subsys_id]; 538 } 539 540 static inline struct cgroup *task_cgroup(struct task_struct *task, 541 int subsys_id) 542 { 543 return task_css(task, subsys_id)->cgroup; 544 } 545 546 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) 547 { 548 return task_css_set(task)->dfl_cgrp; 549 } 550 551 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 552 { 553 struct cgroup_subsys_state *parent_css = cgrp->self.parent; 554 555 if (parent_css) 556 return container_of(parent_css, struct cgroup, self); 557 return NULL; 558 } 559 560 /** 561 * cgroup_is_descendant - test ancestry 562 * @cgrp: the cgroup to be tested 563 * @ancestor: possible ancestor of @cgrp 564 * 565 * Test whether @cgrp is a descendant of @ancestor. It also returns %true 566 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp 567 * and @ancestor are accessible. 568 */ 569 static inline bool cgroup_is_descendant(struct cgroup *cgrp, 570 struct cgroup *ancestor) 571 { 572 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) 573 return false; 574 return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor); 575 } 576 577 /** 578 * cgroup_ancestor - find ancestor of cgroup 579 * @cgrp: cgroup to find ancestor of 580 * @ancestor_level: level of ancestor to find starting from root 581 * 582 * Find ancestor of cgroup at specified level starting from root if it exists 583 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at 584 * @ancestor_level. 585 * 586 * This function is safe to call as long as @cgrp is accessible. 587 */ 588 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, 589 int ancestor_level) 590 { 591 if (cgrp->level < ancestor_level) 592 return NULL; 593 while (cgrp && cgrp->level > ancestor_level) 594 cgrp = cgroup_parent(cgrp); 595 return cgrp; 596 } 597 598 /** 599 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry 600 * @task: the task to be tested 601 * @ancestor: possible ancestor of @task's cgroup 602 * 603 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 604 * It follows all the same rules as cgroup_is_descendant, and only applies 605 * to the default hierarchy. 606 */ 607 static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 608 struct cgroup *ancestor) 609 { 610 struct css_set *cset = task_css_set(task); 611 612 return cgroup_is_descendant(cset->dfl_cgrp, ancestor); 613 } 614 615 /* no synchronization, the result can only be used as a hint */ 616 static inline bool cgroup_is_populated(struct cgroup *cgrp) 617 { 618 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + 619 cgrp->nr_populated_threaded_children; 620 } 621 622 /* returns ino associated with a cgroup */ 623 static inline ino_t cgroup_ino(struct cgroup *cgrp) 624 { 625 return kernfs_ino(cgrp->kn); 626 } 627 628 /* cft/css accessors for cftype->write() operation */ 629 static inline struct cftype *of_cft(struct kernfs_open_file *of) 630 { 631 return of->kn->priv; 632 } 633 634 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); 635 636 /* cft/css accessors for cftype->seq_*() operations */ 637 static inline struct cftype *seq_cft(struct seq_file *seq) 638 { 639 return of_cft(seq->private); 640 } 641 642 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) 643 { 644 return of_css(seq->private); 645 } 646 647 /* 648 * Name / path handling functions. All are thin wrappers around the kernfs 649 * counterparts and can be called under any context. 650 */ 651 652 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) 653 { 654 return kernfs_name(cgrp->kn, buf, buflen); 655 } 656 657 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) 658 { 659 return kernfs_path(cgrp->kn, buf, buflen); 660 } 661 662 static inline void pr_cont_cgroup_name(struct cgroup *cgrp) 663 { 664 pr_cont_kernfs_name(cgrp->kn); 665 } 666 667 static inline void pr_cont_cgroup_path(struct cgroup *cgrp) 668 { 669 pr_cont_kernfs_path(cgrp->kn); 670 } 671 672 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) 673 { 674 return &cgrp->psi; 675 } 676 677 static inline void cgroup_init_kthreadd(void) 678 { 679 /* 680 * kthreadd is inherited by all kthreads, keep it in the root so 681 * that the new kthreads are guaranteed to stay in the root until 682 * initialization is finished. 683 */ 684 current->no_cgroup_migration = 1; 685 } 686 687 static inline void cgroup_kthread_ready(void) 688 { 689 /* 690 * This kthread finished initialization. The creator should have 691 * set PF_NO_SETAFFINITY if this kthread should stay in the root. 692 */ 693 current->no_cgroup_migration = 0; 694 } 695 696 void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); 697 #else /* !CONFIG_CGROUPS */ 698 699 struct cgroup_subsys_state; 700 struct cgroup; 701 702 static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; } 703 static inline void css_get(struct cgroup_subsys_state *css) {} 704 static inline void css_put(struct cgroup_subsys_state *css) {} 705 static inline int cgroup_attach_task_all(struct task_struct *from, 706 struct task_struct *t) { return 0; } 707 static inline int cgroupstats_build(struct cgroupstats *stats, 708 struct dentry *dentry) { return -EINVAL; } 709 710 static inline void cgroup_fork(struct task_struct *p) {} 711 static inline int cgroup_can_fork(struct task_struct *p) { return 0; } 712 static inline void cgroup_cancel_fork(struct task_struct *p) {} 713 static inline void cgroup_post_fork(struct task_struct *p) {} 714 static inline void cgroup_exit(struct task_struct *p) {} 715 static inline void cgroup_release(struct task_struct *p) {} 716 static inline void cgroup_free(struct task_struct *p) {} 717 718 static inline int cgroup_init_early(void) { return 0; } 719 static inline int cgroup_init(void) { return 0; } 720 static inline void cgroup_init_kthreadd(void) {} 721 static inline void cgroup_kthread_ready(void) {} 722 723 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 724 { 725 return NULL; 726 } 727 728 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) 729 { 730 return NULL; 731 } 732 733 static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 734 struct cgroup *ancestor) 735 { 736 return true; 737 } 738 739 static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) 740 {} 741 #endif /* !CONFIG_CGROUPS */ 742 743 #ifdef CONFIG_CGROUPS 744 /* 745 * cgroup scalable recursive statistics. 746 */ 747 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); 748 void cgroup_rstat_flush(struct cgroup *cgrp); 749 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp); 750 void cgroup_rstat_flush_hold(struct cgroup *cgrp); 751 void cgroup_rstat_flush_release(void); 752 753 /* 754 * Basic resource stats. 755 */ 756 #ifdef CONFIG_CGROUP_CPUACCT 757 void cpuacct_charge(struct task_struct *tsk, u64 cputime); 758 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); 759 #else 760 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 761 static inline void cpuacct_account_field(struct task_struct *tsk, int index, 762 u64 val) {} 763 #endif 764 765 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); 766 void __cgroup_account_cputime_field(struct cgroup *cgrp, 767 enum cpu_usage_stat index, u64 delta_exec); 768 769 static inline void cgroup_account_cputime(struct task_struct *task, 770 u64 delta_exec) 771 { 772 struct cgroup *cgrp; 773 774 cpuacct_charge(task, delta_exec); 775 776 rcu_read_lock(); 777 cgrp = task_dfl_cgroup(task); 778 if (cgroup_parent(cgrp)) 779 __cgroup_account_cputime(cgrp, delta_exec); 780 rcu_read_unlock(); 781 } 782 783 static inline void cgroup_account_cputime_field(struct task_struct *task, 784 enum cpu_usage_stat index, 785 u64 delta_exec) 786 { 787 struct cgroup *cgrp; 788 789 cpuacct_account_field(task, index, delta_exec); 790 791 rcu_read_lock(); 792 cgrp = task_dfl_cgroup(task); 793 if (cgroup_parent(cgrp)) 794 __cgroup_account_cputime_field(cgrp, index, delta_exec); 795 rcu_read_unlock(); 796 } 797 798 #else /* CONFIG_CGROUPS */ 799 800 static inline void cgroup_account_cputime(struct task_struct *task, 801 u64 delta_exec) {} 802 static inline void cgroup_account_cputime_field(struct task_struct *task, 803 enum cpu_usage_stat index, 804 u64 delta_exec) {} 805 806 #endif /* CONFIG_CGROUPS */ 807 808 /* 809 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 810 * definition in cgroup-defs.h. 811 */ 812 #ifdef CONFIG_SOCK_CGROUP_DATA 813 814 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 815 extern spinlock_t cgroup_sk_update_lock; 816 #endif 817 818 void cgroup_sk_alloc_disable(void); 819 void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 820 void cgroup_sk_free(struct sock_cgroup_data *skcd); 821 822 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) 823 { 824 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 825 unsigned long v; 826 827 /* 828 * @skcd->val is 64bit but the following is safe on 32bit too as we 829 * just need the lower ulong to be written and read atomically. 830 */ 831 v = READ_ONCE(skcd->val); 832 833 if (v & 1) 834 return &cgrp_dfl_root.cgrp; 835 836 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; 837 #else 838 return (struct cgroup *)(unsigned long)skcd->val; 839 #endif 840 } 841 842 #else /* CONFIG_CGROUP_DATA */ 843 844 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} 845 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} 846 847 #endif /* CONFIG_CGROUP_DATA */ 848 849 struct cgroup_namespace { 850 refcount_t count; 851 struct ns_common ns; 852 struct user_namespace *user_ns; 853 struct ucounts *ucounts; 854 struct css_set *root_cset; 855 }; 856 857 extern struct cgroup_namespace init_cgroup_ns; 858 859 #ifdef CONFIG_CGROUPS 860 861 void free_cgroup_ns(struct cgroup_namespace *ns); 862 863 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, 864 struct user_namespace *user_ns, 865 struct cgroup_namespace *old_ns); 866 867 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 868 struct cgroup_namespace *ns); 869 870 #else /* !CONFIG_CGROUPS */ 871 872 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } 873 static inline struct cgroup_namespace * 874 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, 875 struct cgroup_namespace *old_ns) 876 { 877 return old_ns; 878 } 879 880 #endif /* !CONFIG_CGROUPS */ 881 882 static inline void get_cgroup_ns(struct cgroup_namespace *ns) 883 { 884 if (ns) 885 refcount_inc(&ns->count); 886 } 887 888 static inline void put_cgroup_ns(struct cgroup_namespace *ns) 889 { 890 if (ns && refcount_dec_and_test(&ns->count)) 891 free_cgroup_ns(ns); 892 } 893 894 #ifdef CONFIG_CGROUPS 895 896 void cgroup_enter_frozen(void); 897 void cgroup_leave_frozen(bool always_leave); 898 void cgroup_update_frozen(struct cgroup *cgrp); 899 void cgroup_freeze(struct cgroup *cgrp, bool freeze); 900 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, 901 struct cgroup *dst); 902 903 static inline bool cgroup_task_freeze(struct task_struct *task) 904 { 905 bool ret; 906 907 if (task->flags & PF_KTHREAD) 908 return false; 909 910 rcu_read_lock(); 911 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); 912 rcu_read_unlock(); 913 914 return ret; 915 } 916 917 static inline bool cgroup_task_frozen(struct task_struct *task) 918 { 919 return task->frozen; 920 } 921 922 #else /* !CONFIG_CGROUPS */ 923 924 static inline void cgroup_enter_frozen(void) { } 925 static inline void cgroup_leave_frozen(bool always_leave) { } 926 static inline bool cgroup_task_freeze(struct task_struct *task) 927 { 928 return false; 929 } 930 static inline bool cgroup_task_frozen(struct task_struct *task) 931 { 932 return false; 933 } 934 935 #endif /* !CONFIG_CGROUPS */ 936 937 #ifdef CONFIG_CGROUP_BPF 938 static inline void cgroup_bpf_get(struct cgroup *cgrp) 939 { 940 percpu_ref_get(&cgrp->bpf.refcnt); 941 } 942 943 static inline void cgroup_bpf_put(struct cgroup *cgrp) 944 { 945 percpu_ref_put(&cgrp->bpf.refcnt); 946 } 947 948 #else /* CONFIG_CGROUP_BPF */ 949 950 static inline void cgroup_bpf_get(struct cgroup *cgrp) {} 951 static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 952 953 #endif /* CONFIG_CGROUP_BPF */ 954 955 #endif /* _LINUX_CGROUP_H */ 956