1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * linux/cgroup-defs.h - basic definitions for cgroup 4 * 5 * This file provides basic type and interface. Include this file directly 6 * only if necessary to avoid cyclic dependencies. 7 */ 8 #ifndef _LINUX_CGROUP_DEFS_H 9 #define _LINUX_CGROUP_DEFS_H 10 11 #include <linux/limits.h> 12 #include <linux/list.h> 13 #include <linux/idr.h> 14 #include <linux/wait.h> 15 #include <linux/mutex.h> 16 #include <linux/rcupdate.h> 17 #include <linux/refcount.h> 18 #include <linux/percpu-refcount.h> 19 #include <linux/percpu-rwsem.h> 20 #include <linux/u64_stats_sync.h> 21 #include <linux/workqueue.h> 22 #include <linux/bpf-cgroup.h> 23 24 #ifdef CONFIG_CGROUPS 25 26 struct cgroup; 27 struct cgroup_root; 28 struct cgroup_subsys; 29 struct cgroup_taskset; 30 struct kernfs_node; 31 struct kernfs_ops; 32 struct kernfs_open_file; 33 struct seq_file; 34 35 #define MAX_CGROUP_TYPE_NAMELEN 32 36 #define MAX_CGROUP_ROOT_NAMELEN 64 37 #define MAX_CFTYPE_NAME 64 38 39 /* define the enumeration of all cgroup subsystems */ 40 #define SUBSYS(_x) _x ## _cgrp_id, 41 enum cgroup_subsys_id { 42 #include <linux/cgroup_subsys.h> 43 CGROUP_SUBSYS_COUNT, 44 }; 45 #undef SUBSYS 46 47 /* bits in struct cgroup_subsys_state flags field */ 48 enum { 49 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 50 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 51 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 52 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 53 CSS_DYING = (1 << 4), /* css is dying */ 54 }; 55 56 /* bits in struct cgroup flags field */ 57 enum { 58 /* Control Group requires release notifications to userspace */ 59 CGRP_NOTIFY_ON_RELEASE, 60 /* 61 * Clone the parent's configuration when creating a new child 62 * cpuset cgroup. For historical reasons, this option can be 63 * specified at mount time and thus is implemented here. 64 */ 65 CGRP_CPUSET_CLONE_CHILDREN, 66 }; 67 68 /* cgroup_root->flags */ 69 enum { 70 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 71 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 72 73 /* 74 * Consider namespaces as delegation boundaries. If this flag is 75 * set, controller specific interface files in a namespace root 76 * aren't writeable from inside the namespace. 77 */ 78 CGRP_ROOT_NS_DELEGATE = (1 << 3), 79 80 /* 81 * Enable cpuset controller in v1 cgroup to use v2 behavior. 82 */ 83 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), 84 }; 85 86 /* cftype->flags */ 87 enum { 88 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 89 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 90 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ 91 92 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 93 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 94 95 /* internal flags, do not use outside cgroup core proper */ 96 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 97 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 98 }; 99 100 /* 101 * cgroup_file is the handle for a file instance created in a cgroup which 102 * is used, for example, to generate file changed notifications. This can 103 * be obtained by setting cftype->file_offset. 104 */ 105 struct cgroup_file { 106 /* do not access any fields from outside cgroup core */ 107 struct kernfs_node *kn; 108 unsigned long notified_at; 109 struct timer_list notify_timer; 110 }; 111 112 /* 113 * Per-subsystem/per-cgroup state maintained by the system. This is the 114 * fundamental structural building block that controllers deal with. 115 * 116 * Fields marked with "PI:" are public and immutable and may be accessed 117 * directly without synchronization. 118 */ 119 struct cgroup_subsys_state { 120 /* PI: the cgroup that this css is attached to */ 121 struct cgroup *cgroup; 122 123 /* PI: the cgroup subsystem that this css is attached to */ 124 struct cgroup_subsys *ss; 125 126 /* reference count - access via css_[try]get() and css_put() */ 127 struct percpu_ref refcnt; 128 129 /* siblings list anchored at the parent's ->children */ 130 struct list_head sibling; 131 struct list_head children; 132 133 /* flush target list anchored at cgrp->rstat_css_list */ 134 struct list_head rstat_css_node; 135 136 /* 137 * PI: Subsys-unique ID. 0 is unused and root is always 1. The 138 * matching css can be looked up using css_from_id(). 139 */ 140 int id; 141 142 unsigned int flags; 143 144 /* 145 * Monotonically increasing unique serial number which defines a 146 * uniform order among all csses. It's guaranteed that all 147 * ->children lists are in the ascending order of ->serial_nr and 148 * used to allow interrupting and resuming iterations. 149 */ 150 u64 serial_nr; 151 152 /* 153 * Incremented by online self and children. Used to guarantee that 154 * parents are not offlined before their children. 155 */ 156 atomic_t online_cnt; 157 158 /* percpu_ref killing and RCU release */ 159 struct work_struct destroy_work; 160 struct rcu_work destroy_rwork; 161 162 /* 163 * PI: the parent css. Placed here for cache proximity to following 164 * fields of the containing structure. 165 */ 166 struct cgroup_subsys_state *parent; 167 }; 168 169 /* 170 * A css_set is a structure holding pointers to a set of 171 * cgroup_subsys_state objects. This saves space in the task struct 172 * object and speeds up fork()/exit(), since a single inc/dec and a 173 * list_add()/del() can bump the reference count on the entire cgroup 174 * set for a task. 175 */ 176 struct css_set { 177 /* 178 * Set of subsystem states, one for each subsystem. This array is 179 * immutable after creation apart from the init_css_set during 180 * subsystem registration (at boot time). 181 */ 182 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 183 184 /* reference count */ 185 refcount_t refcount; 186 187 /* 188 * For a domain cgroup, the following points to self. If threaded, 189 * to the matching cset of the nearest domain ancestor. The 190 * dom_cset provides access to the domain cgroup and its csses to 191 * which domain level resource consumptions should be charged. 192 */ 193 struct css_set *dom_cset; 194 195 /* the default cgroup associated with this css_set */ 196 struct cgroup *dfl_cgrp; 197 198 /* internal task count, protected by css_set_lock */ 199 int nr_tasks; 200 201 /* 202 * Lists running through all tasks using this cgroup group. 203 * mg_tasks lists tasks which belong to this cset but are in the 204 * process of being migrated out or in. Protected by 205 * css_set_rwsem, but, during migration, once tasks are moved to 206 * mg_tasks, it can be read safely while holding cgroup_mutex. 207 */ 208 struct list_head tasks; 209 struct list_head mg_tasks; 210 211 /* all css_task_iters currently walking this cset */ 212 struct list_head task_iters; 213 214 /* 215 * On the default hierarhcy, ->subsys[ssid] may point to a css 216 * attached to an ancestor instead of the cgroup this css_set is 217 * associated with. The following node is anchored at 218 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to 219 * iterate through all css's attached to a given cgroup. 220 */ 221 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; 222 223 /* all threaded csets whose ->dom_cset points to this cset */ 224 struct list_head threaded_csets; 225 struct list_head threaded_csets_node; 226 227 /* 228 * List running through all cgroup groups in the same hash 229 * slot. Protected by css_set_lock 230 */ 231 struct hlist_node hlist; 232 233 /* 234 * List of cgrp_cset_links pointing at cgroups referenced from this 235 * css_set. Protected by css_set_lock. 236 */ 237 struct list_head cgrp_links; 238 239 /* 240 * List of csets participating in the on-going migration either as 241 * source or destination. Protected by cgroup_mutex. 242 */ 243 struct list_head mg_preload_node; 244 struct list_head mg_node; 245 246 /* 247 * If this cset is acting as the source of migration the following 248 * two fields are set. mg_src_cgrp and mg_dst_cgrp are 249 * respectively the source and destination cgroups of the on-going 250 * migration. mg_dst_cset is the destination cset the target tasks 251 * on this cset should be migrated to. Protected by cgroup_mutex. 252 */ 253 struct cgroup *mg_src_cgrp; 254 struct cgroup *mg_dst_cgrp; 255 struct css_set *mg_dst_cset; 256 257 /* dead and being drained, ignore for migration */ 258 bool dead; 259 260 /* For RCU-protected deletion */ 261 struct rcu_head rcu_head; 262 }; 263 264 struct cgroup_base_stat { 265 struct task_cputime cputime; 266 }; 267 268 /* 269 * rstat - cgroup scalable recursive statistics. Accounting is done 270 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the 271 * hierarchy on reads. 272 * 273 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are 274 * linked into the updated tree. On the following read, propagation only 275 * considers and consumes the updated tree. This makes reading O(the 276 * number of descendants which have been active since last read) instead of 277 * O(the total number of descendants). 278 * 279 * This is important because there can be a lot of (draining) cgroups which 280 * aren't active and stat may be read frequently. The combination can 281 * become very expensive. By propagating selectively, increasing reading 282 * frequency decreases the cost of each read. 283 * 284 * This struct hosts both the fields which implement the above - 285 * updated_children and updated_next - and the fields which track basic 286 * resource statistics on top of it - bsync, bstat and last_bstat. 287 */ 288 struct cgroup_rstat_cpu { 289 /* 290 * ->bsync protects ->bstat. These are the only fields which get 291 * updated in the hot path. 292 */ 293 struct u64_stats_sync bsync; 294 struct cgroup_base_stat bstat; 295 296 /* 297 * Snapshots at the last reading. These are used to calculate the 298 * deltas to propagate to the global counters. 299 */ 300 struct cgroup_base_stat last_bstat; 301 302 /* 303 * Child cgroups with stat updates on this cpu since the last read 304 * are linked on the parent's ->updated_children through 305 * ->updated_next. 306 * 307 * In addition to being more compact, singly-linked list pointing 308 * to the cgroup makes it unnecessary for each per-cpu struct to 309 * point back to the associated cgroup. 310 * 311 * Protected by per-cpu cgroup_rstat_cpu_lock. 312 */ 313 struct cgroup *updated_children; /* terminated by self cgroup */ 314 struct cgroup *updated_next; /* NULL iff not on the list */ 315 }; 316 317 struct cgroup { 318 /* self css with NULL ->ss, points back to this cgroup */ 319 struct cgroup_subsys_state self; 320 321 unsigned long flags; /* "unsigned long" so bitops work */ 322 323 /* 324 * idr allocated in-hierarchy ID. 325 * 326 * ID 0 is not used, the ID of the root cgroup is always 1, and a 327 * new cgroup will be assigned with a smallest available ID. 328 * 329 * Allocating/Removing ID must be protected by cgroup_mutex. 330 */ 331 int id; 332 333 /* 334 * The depth this cgroup is at. The root is at depth zero and each 335 * step down the hierarchy increments the level. This along with 336 * ancestor_ids[] can determine whether a given cgroup is a 337 * descendant of another without traversing the hierarchy. 338 */ 339 int level; 340 341 /* Maximum allowed descent tree depth */ 342 int max_depth; 343 344 /* 345 * Keep track of total numbers of visible and dying descent cgroups. 346 * Dying cgroups are cgroups which were deleted by a user, 347 * but are still existing because someone else is holding a reference. 348 * max_descendants is a maximum allowed number of descent cgroups. 349 */ 350 int nr_descendants; 351 int nr_dying_descendants; 352 int max_descendants; 353 354 /* 355 * Each non-empty css_set associated with this cgroup contributes 356 * one to nr_populated_csets. The counter is zero iff this cgroup 357 * doesn't have any tasks. 358 * 359 * All children which have non-zero nr_populated_csets and/or 360 * nr_populated_children of their own contribute one to either 361 * nr_populated_domain_children or nr_populated_threaded_children 362 * depending on their type. Each counter is zero iff all cgroups 363 * of the type in the subtree proper don't have any tasks. 364 */ 365 int nr_populated_csets; 366 int nr_populated_domain_children; 367 int nr_populated_threaded_children; 368 369 int nr_threaded_children; /* # of live threaded child cgroups */ 370 371 struct kernfs_node *kn; /* cgroup kernfs entry */ 372 struct cgroup_file procs_file; /* handle for "cgroup.procs" */ 373 struct cgroup_file events_file; /* handle for "cgroup.events" */ 374 375 /* 376 * The bitmask of subsystems enabled on the child cgroups. 377 * ->subtree_control is the one configured through 378 * "cgroup.subtree_control" while ->child_ss_mask is the effective 379 * one which may have more subsystems enabled. Controller knobs 380 * are made available iff it's enabled in ->subtree_control. 381 */ 382 u16 subtree_control; 383 u16 subtree_ss_mask; 384 u16 old_subtree_control; 385 u16 old_subtree_ss_mask; 386 387 /* Private pointers for each registered subsystem */ 388 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 389 390 struct cgroup_root *root; 391 392 /* 393 * List of cgrp_cset_links pointing at css_sets with tasks in this 394 * cgroup. Protected by css_set_lock. 395 */ 396 struct list_head cset_links; 397 398 /* 399 * On the default hierarchy, a css_set for a cgroup with some 400 * susbsys disabled will point to css's which are associated with 401 * the closest ancestor which has the subsys enabled. The 402 * following lists all css_sets which point to this cgroup's css 403 * for the given subsystem. 404 */ 405 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 406 407 /* 408 * If !threaded, self. If threaded, it points to the nearest 409 * domain ancestor. Inside a threaded subtree, cgroups are exempt 410 * from process granularity and no-internal-task constraint. 411 * Domain level resource consumptions which aren't tied to a 412 * specific task are charged to the dom_cgrp. 413 */ 414 struct cgroup *dom_cgrp; 415 416 /* per-cpu recursive resource statistics */ 417 struct cgroup_rstat_cpu __percpu *rstat_cpu; 418 struct list_head rstat_css_list; 419 420 /* cgroup basic resource statistics */ 421 struct cgroup_base_stat pending_bstat; /* pending from children */ 422 struct cgroup_base_stat bstat; 423 struct prev_cputime prev_cputime; /* for printing out cputime */ 424 425 /* 426 * list of pidlists, up to two for each namespace (one for procs, one 427 * for tasks); created on demand. 428 */ 429 struct list_head pidlists; 430 struct mutex pidlist_mutex; 431 432 /* used to wait for offlining of csses */ 433 wait_queue_head_t offline_waitq; 434 435 /* used to schedule release agent */ 436 struct work_struct release_agent_work; 437 438 /* used to store eBPF programs */ 439 struct cgroup_bpf bpf; 440 441 /* ids of the ancestors at each level including self */ 442 int ancestor_ids[]; 443 }; 444 445 /* 446 * A cgroup_root represents the root of a cgroup hierarchy, and may be 447 * associated with a kernfs_root to form an active hierarchy. This is 448 * internal to cgroup core. Don't access directly from controllers. 449 */ 450 struct cgroup_root { 451 struct kernfs_root *kf_root; 452 453 /* The bitmask of subsystems attached to this hierarchy */ 454 unsigned int subsys_mask; 455 456 /* Unique id for this hierarchy. */ 457 int hierarchy_id; 458 459 /* The root cgroup. Root is destroyed on its release. */ 460 struct cgroup cgrp; 461 462 /* for cgrp->ancestor_ids[0] */ 463 int cgrp_ancestor_id_storage; 464 465 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 466 atomic_t nr_cgrps; 467 468 /* A list running through the active hierarchies */ 469 struct list_head root_list; 470 471 /* Hierarchy-specific flags */ 472 unsigned int flags; 473 474 /* IDs for cgroups in this hierarchy */ 475 struct idr cgroup_idr; 476 477 /* The path to use for release notifications. */ 478 char release_agent_path[PATH_MAX]; 479 480 /* The name for this hierarchy - may be empty */ 481 char name[MAX_CGROUP_ROOT_NAMELEN]; 482 }; 483 484 /* 485 * struct cftype: handler definitions for cgroup control files 486 * 487 * When reading/writing to a file: 488 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata 489 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 490 */ 491 struct cftype { 492 /* 493 * By convention, the name should begin with the name of the 494 * subsystem, followed by a period. Zero length string indicates 495 * end of cftype array. 496 */ 497 char name[MAX_CFTYPE_NAME]; 498 unsigned long private; 499 500 /* 501 * The maximum length of string, excluding trailing nul, that can 502 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. 503 */ 504 size_t max_write_len; 505 506 /* CFTYPE_* flags */ 507 unsigned int flags; 508 509 /* 510 * If non-zero, should contain the offset from the start of css to 511 * a struct cgroup_file field. cgroup will record the handle of 512 * the created file into it. The recorded handle can be used as 513 * long as the containing css remains accessible. 514 */ 515 unsigned int file_offset; 516 517 /* 518 * Fields used for internal bookkeeping. Initialized automatically 519 * during registration. 520 */ 521 struct cgroup_subsys *ss; /* NULL for cgroup core files */ 522 struct list_head node; /* anchored at ss->cfts */ 523 struct kernfs_ops *kf_ops; 524 525 int (*open)(struct kernfs_open_file *of); 526 void (*release)(struct kernfs_open_file *of); 527 528 /* 529 * read_u64() is a shortcut for the common case of returning a 530 * single integer. Use it in place of read() 531 */ 532 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); 533 /* 534 * read_s64() is a signed version of read_u64() 535 */ 536 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); 537 538 /* generic seq_file read interface */ 539 int (*seq_show)(struct seq_file *sf, void *v); 540 541 /* optional ops, implement all or none */ 542 void *(*seq_start)(struct seq_file *sf, loff_t *ppos); 543 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); 544 void (*seq_stop)(struct seq_file *sf, void *v); 545 546 /* 547 * write_u64() is a shortcut for the common case of accepting 548 * a single integer (as parsed by simple_strtoull) from 549 * userspace. Use in place of write(); return 0 or error. 550 */ 551 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, 552 u64 val); 553 /* 554 * write_s64() is a signed version of write_u64() 555 */ 556 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, 557 s64 val); 558 559 /* 560 * write() is the generic write callback which maps directly to 561 * kernfs write operation and overrides all other operations. 562 * Maximum write size is determined by ->max_write_len. Use 563 * of_css/cft() to access the associated css and cft. 564 */ 565 ssize_t (*write)(struct kernfs_open_file *of, 566 char *buf, size_t nbytes, loff_t off); 567 568 #ifdef CONFIG_DEBUG_LOCK_ALLOC 569 struct lock_class_key lockdep_key; 570 #endif 571 }; 572 573 /* 574 * Control Group subsystem type. 575 * See Documentation/cgroup-v1/cgroups.txt for details 576 */ 577 struct cgroup_subsys { 578 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 579 int (*css_online)(struct cgroup_subsys_state *css); 580 void (*css_offline)(struct cgroup_subsys_state *css); 581 void (*css_released)(struct cgroup_subsys_state *css); 582 void (*css_free)(struct cgroup_subsys_state *css); 583 void (*css_reset)(struct cgroup_subsys_state *css); 584 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); 585 int (*css_extra_stat_show)(struct seq_file *seq, 586 struct cgroup_subsys_state *css); 587 588 int (*can_attach)(struct cgroup_taskset *tset); 589 void (*cancel_attach)(struct cgroup_taskset *tset); 590 void (*attach)(struct cgroup_taskset *tset); 591 void (*post_attach)(void); 592 int (*can_fork)(struct task_struct *task); 593 void (*cancel_fork)(struct task_struct *task); 594 void (*fork)(struct task_struct *task); 595 void (*exit)(struct task_struct *task); 596 void (*free)(struct task_struct *task); 597 void (*bind)(struct cgroup_subsys_state *root_css); 598 599 bool early_init:1; 600 601 /* 602 * If %true, the controller, on the default hierarchy, doesn't show 603 * up in "cgroup.controllers" or "cgroup.subtree_control", is 604 * implicitly enabled on all cgroups on the default hierarchy, and 605 * bypasses the "no internal process" constraint. This is for 606 * utility type controllers which is transparent to userland. 607 * 608 * An implicit controller can be stolen from the default hierarchy 609 * anytime and thus must be okay with offline csses from previous 610 * hierarchies coexisting with csses for the current one. 611 */ 612 bool implicit_on_dfl:1; 613 614 /* 615 * If %true, the controller, supports threaded mode on the default 616 * hierarchy. In a threaded subtree, both process granularity and 617 * no-internal-process constraint are ignored and a threaded 618 * controllers should be able to handle that. 619 * 620 * Note that as an implicit controller is automatically enabled on 621 * all cgroups on the default hierarchy, it should also be 622 * threaded. implicit && !threaded is not supported. 623 */ 624 bool threaded:1; 625 626 /* 627 * If %false, this subsystem is properly hierarchical - 628 * configuration, resource accounting and restriction on a parent 629 * cgroup cover those of its children. If %true, hierarchy support 630 * is broken in some ways - some subsystems ignore hierarchy 631 * completely while others are only implemented half-way. 632 * 633 * It's now disallowed to create nested cgroups if the subsystem is 634 * broken and cgroup core will emit a warning message on such 635 * cases. Eventually, all subsystems will be made properly 636 * hierarchical and this will go away. 637 */ 638 bool broken_hierarchy:1; 639 bool warned_broken_hierarchy:1; 640 641 /* the following two fields are initialized automtically during boot */ 642 int id; 643 const char *name; 644 645 /* optional, initialized automatically during boot if not set */ 646 const char *legacy_name; 647 648 /* link to parent, protected by cgroup_lock() */ 649 struct cgroup_root *root; 650 651 /* idr for css->id */ 652 struct idr css_idr; 653 654 /* 655 * List of cftypes. Each entry is the first entry of an array 656 * terminated by zero length name. 657 */ 658 struct list_head cfts; 659 660 /* 661 * Base cftypes which are automatically registered. The two can 662 * point to the same array. 663 */ 664 struct cftype *dfl_cftypes; /* for the default hierarchy */ 665 struct cftype *legacy_cftypes; /* for the legacy hierarchies */ 666 667 /* 668 * A subsystem may depend on other subsystems. When such subsystem 669 * is enabled on a cgroup, the depended-upon subsystems are enabled 670 * together if available. Subsystems enabled due to dependency are 671 * not visible to userland until explicitly enabled. The following 672 * specifies the mask of subsystems that this one depends on. 673 */ 674 unsigned int depends_on; 675 }; 676 677 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 678 679 /** 680 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups 681 * @tsk: target task 682 * 683 * Allows cgroup operations to synchronize against threadgroup changes 684 * using a percpu_rw_semaphore. 685 */ 686 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 687 { 688 percpu_down_read(&cgroup_threadgroup_rwsem); 689 } 690 691 /** 692 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups 693 * @tsk: target task 694 * 695 * Counterpart of cgroup_threadcgroup_change_begin(). 696 */ 697 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) 698 { 699 percpu_up_read(&cgroup_threadgroup_rwsem); 700 } 701 702 #else /* CONFIG_CGROUPS */ 703 704 #define CGROUP_SUBSYS_COUNT 0 705 706 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 707 { 708 might_sleep(); 709 } 710 711 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} 712 713 #endif /* CONFIG_CGROUPS */ 714 715 #ifdef CONFIG_SOCK_CGROUP_DATA 716 717 /* 718 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 719 * per-socket cgroup information except for memcg association. 720 * 721 * On legacy hierarchies, net_prio and net_cls controllers directly set 722 * attributes on each sock which can then be tested by the network layer. 723 * On the default hierarchy, each sock is associated with the cgroup it was 724 * created in and the networking layer can match the cgroup directly. 725 * 726 * To avoid carrying all three cgroup related fields separately in sock, 727 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer. 728 * On boot, sock_cgroup_data records the cgroup that the sock was created 729 * in so that cgroup2 matches can be made; however, once either net_prio or 730 * net_cls starts being used, the area is overriden to carry prioidx and/or 731 * classid. The two modes are distinguished by whether the lowest bit is 732 * set. Clear bit indicates cgroup pointer while set bit prioidx and 733 * classid. 734 * 735 * While userland may start using net_prio or net_cls at any time, once 736 * either is used, cgroup2 matching no longer works. There is no reason to 737 * mix the two and this is in line with how legacy and v2 compatibility is 738 * handled. On mode switch, cgroup references which are already being 739 * pointed to by socks may be leaked. While this can be remedied by adding 740 * synchronization around sock_cgroup_data, given that the number of leaked 741 * cgroups is bound and highly unlikely to be high, this seems to be the 742 * better trade-off. 743 */ 744 struct sock_cgroup_data { 745 union { 746 #ifdef __LITTLE_ENDIAN 747 struct { 748 u8 is_data; 749 u8 padding; 750 u16 prioidx; 751 u32 classid; 752 } __packed; 753 #else 754 struct { 755 u32 classid; 756 u16 prioidx; 757 u8 padding; 758 u8 is_data; 759 } __packed; 760 #endif 761 u64 val; 762 }; 763 }; 764 765 /* 766 * There's a theoretical window where the following accessors race with 767 * updaters and return part of the previous pointer as the prioidx or 768 * classid. Such races are short-lived and the result isn't critical. 769 */ 770 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 771 { 772 /* fallback to 1 which is always the ID of the root cgroup */ 773 return (skcd->is_data & 1) ? skcd->prioidx : 1; 774 } 775 776 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 777 { 778 /* fallback to 0 which is the unconfigured default classid */ 779 return (skcd->is_data & 1) ? skcd->classid : 0; 780 } 781 782 /* 783 * If invoked concurrently, the updaters may clobber each other. The 784 * caller is responsible for synchronization. 785 */ 786 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 787 u16 prioidx) 788 { 789 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; 790 791 if (sock_cgroup_prioidx(&skcd_buf) == prioidx) 792 return; 793 794 if (!(skcd_buf.is_data & 1)) { 795 skcd_buf.val = 0; 796 skcd_buf.is_data = 1; 797 } 798 799 skcd_buf.prioidx = prioidx; 800 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ 801 } 802 803 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 804 u32 classid) 805 { 806 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }}; 807 808 if (sock_cgroup_classid(&skcd_buf) == classid) 809 return; 810 811 if (!(skcd_buf.is_data & 1)) { 812 skcd_buf.val = 0; 813 skcd_buf.is_data = 1; 814 } 815 816 skcd_buf.classid = classid; 817 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */ 818 } 819 820 #else /* CONFIG_SOCK_CGROUP_DATA */ 821 822 struct sock_cgroup_data { 823 }; 824 825 #endif /* CONFIG_SOCK_CGROUP_DATA */ 826 827 #endif /* _LINUX_CGROUP_DEFS_H */ 828