1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 25 #include <linux/cpu.h> 26 #include <linux/cpumask.h> 27 #include <linux/cpuset.h> 28 #include <linux/init.h> 29 #include <linux/interrupt.h> 30 #include <linux/kernel.h> 31 #include <linux/mempolicy.h> 32 #include <linux/mm.h> 33 #include <linux/memory.h> 34 #include <linux/export.h> 35 #include <linux/rcupdate.h> 36 #include <linux/sched.h> 37 #include <linux/sched/deadline.h> 38 #include <linux/sched/mm.h> 39 #include <linux/sched/task.h> 40 #include <linux/security.h> 41 #include <linux/spinlock.h> 42 #include <linux/oom.h> 43 #include <linux/sched/isolation.h> 44 #include <linux/cgroup.h> 45 #include <linux/wait.h> 46 47 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 48 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 49 50 /* 51 * There could be abnormal cpuset configurations for cpu or memory 52 * node binding, add this key to provide a quick low-cost judgment 53 * of the situation. 54 */ 55 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); 56 57 /* See "Frequency meter" comments, below. */ 58 59 struct fmeter { 60 int cnt; /* unprocessed events count */ 61 int val; /* most recent output value */ 62 time64_t time; /* clock (secs) when val computed */ 63 spinlock_t lock; /* guards read or write of above */ 64 }; 65 66 /* 67 * Invalid partition error code 68 */ 69 enum prs_errcode { 70 PERR_NONE = 0, 71 PERR_INVCPUS, 72 PERR_INVPARENT, 73 PERR_NOTPART, 74 PERR_NOTEXCL, 75 PERR_NOCPUS, 76 PERR_HOTPLUG, 77 PERR_CPUSEMPTY, 78 }; 79 80 static const char * const perr_strings[] = { 81 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus", 82 [PERR_INVPARENT] = "Parent is an invalid partition root", 83 [PERR_NOTPART] = "Parent is not a partition root", 84 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive", 85 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream", 86 [PERR_HOTPLUG] = "No cpu available due to hotplug", 87 [PERR_CPUSEMPTY] = "cpuset.cpus is empty", 88 }; 89 90 struct cpuset { 91 struct cgroup_subsys_state css; 92 93 unsigned long flags; /* "unsigned long" so bitops work */ 94 95 /* 96 * On default hierarchy: 97 * 98 * The user-configured masks can only be changed by writing to 99 * cpuset.cpus and cpuset.mems, and won't be limited by the 100 * parent masks. 101 * 102 * The effective masks is the real masks that apply to the tasks 103 * in the cpuset. They may be changed if the configured masks are 104 * changed or hotplug happens. 105 * 106 * effective_mask == configured_mask & parent's effective_mask, 107 * and if it ends up empty, it will inherit the parent's mask. 108 * 109 * 110 * On legacy hierarchy: 111 * 112 * The user-configured masks are always the same with effective masks. 113 */ 114 115 /* user-configured CPUs and Memory Nodes allow to tasks */ 116 cpumask_var_t cpus_allowed; 117 nodemask_t mems_allowed; 118 119 /* effective CPUs and Memory Nodes allow to tasks */ 120 cpumask_var_t effective_cpus; 121 nodemask_t effective_mems; 122 123 /* 124 * CPUs allocated to child sub-partitions (default hierarchy only) 125 * - CPUs granted by the parent = effective_cpus U subparts_cpus 126 * - effective_cpus and subparts_cpus are mutually exclusive. 127 * 128 * effective_cpus contains only onlined CPUs, but subparts_cpus 129 * may have offlined ones. 130 */ 131 cpumask_var_t subparts_cpus; 132 133 /* 134 * This is old Memory Nodes tasks took on. 135 * 136 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 137 * - A new cpuset's old_mems_allowed is initialized when some 138 * task is moved into it. 139 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 140 * cpuset.mems_allowed and have tasks' nodemask updated, and 141 * then old_mems_allowed is updated to mems_allowed. 142 */ 143 nodemask_t old_mems_allowed; 144 145 struct fmeter fmeter; /* memory_pressure filter */ 146 147 /* 148 * Tasks are being attached to this cpuset. Used to prevent 149 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 150 */ 151 int attach_in_progress; 152 153 /* partition number for rebuild_sched_domains() */ 154 int pn; 155 156 /* for custom sched domain */ 157 int relax_domain_level; 158 159 /* number of CPUs in subparts_cpus */ 160 int nr_subparts_cpus; 161 162 /* partition root state */ 163 int partition_root_state; 164 165 /* 166 * Default hierarchy only: 167 * use_parent_ecpus - set if using parent's effective_cpus 168 * child_ecpus_count - # of children with use_parent_ecpus set 169 */ 170 int use_parent_ecpus; 171 int child_ecpus_count; 172 173 /* 174 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we 175 * know when to rebuild associated root domain bandwidth information. 176 */ 177 int nr_deadline_tasks; 178 int nr_migrate_dl_tasks; 179 u64 sum_migrate_dl_bw; 180 181 /* Invalid partition error code, not lock protected */ 182 enum prs_errcode prs_err; 183 184 /* Handle for cpuset.cpus.partition */ 185 struct cgroup_file partition_file; 186 }; 187 188 /* 189 * Partition root states: 190 * 191 * 0 - member (not a partition root) 192 * 1 - partition root 193 * 2 - partition root without load balancing (isolated) 194 * -1 - invalid partition root 195 * -2 - invalid isolated partition root 196 */ 197 #define PRS_MEMBER 0 198 #define PRS_ROOT 1 199 #define PRS_ISOLATED 2 200 #define PRS_INVALID_ROOT -1 201 #define PRS_INVALID_ISOLATED -2 202 203 static inline bool is_prs_invalid(int prs_state) 204 { 205 return prs_state < 0; 206 } 207 208 /* 209 * Temporary cpumasks for working with partitions that are passed among 210 * functions to avoid memory allocation in inner functions. 211 */ 212 struct tmpmasks { 213 cpumask_var_t addmask, delmask; /* For partition root */ 214 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 215 }; 216 217 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 218 { 219 return css ? container_of(css, struct cpuset, css) : NULL; 220 } 221 222 /* Retrieve the cpuset for a task */ 223 static inline struct cpuset *task_cs(struct task_struct *task) 224 { 225 return css_cs(task_css(task, cpuset_cgrp_id)); 226 } 227 228 static inline struct cpuset *parent_cs(struct cpuset *cs) 229 { 230 return css_cs(cs->css.parent); 231 } 232 233 void inc_dl_tasks_cs(struct task_struct *p) 234 { 235 struct cpuset *cs = task_cs(p); 236 237 cs->nr_deadline_tasks++; 238 } 239 240 void dec_dl_tasks_cs(struct task_struct *p) 241 { 242 struct cpuset *cs = task_cs(p); 243 244 cs->nr_deadline_tasks--; 245 } 246 247 /* bits in struct cpuset flags field */ 248 typedef enum { 249 CS_ONLINE, 250 CS_CPU_EXCLUSIVE, 251 CS_MEM_EXCLUSIVE, 252 CS_MEM_HARDWALL, 253 CS_MEMORY_MIGRATE, 254 CS_SCHED_LOAD_BALANCE, 255 CS_SPREAD_PAGE, 256 CS_SPREAD_SLAB, 257 } cpuset_flagbits_t; 258 259 /* convenient tests for these bits */ 260 static inline bool is_cpuset_online(struct cpuset *cs) 261 { 262 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 263 } 264 265 static inline int is_cpu_exclusive(const struct cpuset *cs) 266 { 267 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 268 } 269 270 static inline int is_mem_exclusive(const struct cpuset *cs) 271 { 272 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 273 } 274 275 static inline int is_mem_hardwall(const struct cpuset *cs) 276 { 277 return test_bit(CS_MEM_HARDWALL, &cs->flags); 278 } 279 280 static inline int is_sched_load_balance(const struct cpuset *cs) 281 { 282 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 283 } 284 285 static inline int is_memory_migrate(const struct cpuset *cs) 286 { 287 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 288 } 289 290 static inline int is_spread_page(const struct cpuset *cs) 291 { 292 return test_bit(CS_SPREAD_PAGE, &cs->flags); 293 } 294 295 static inline int is_spread_slab(const struct cpuset *cs) 296 { 297 return test_bit(CS_SPREAD_SLAB, &cs->flags); 298 } 299 300 static inline int is_partition_valid(const struct cpuset *cs) 301 { 302 return cs->partition_root_state > 0; 303 } 304 305 static inline int is_partition_invalid(const struct cpuset *cs) 306 { 307 return cs->partition_root_state < 0; 308 } 309 310 /* 311 * Callers should hold callback_lock to modify partition_root_state. 312 */ 313 static inline void make_partition_invalid(struct cpuset *cs) 314 { 315 if (is_partition_valid(cs)) 316 cs->partition_root_state = -cs->partition_root_state; 317 } 318 319 /* 320 * Send notification event of whenever partition_root_state changes. 321 */ 322 static inline void notify_partition_change(struct cpuset *cs, int old_prs) 323 { 324 if (old_prs == cs->partition_root_state) 325 return; 326 cgroup_file_notify(&cs->partition_file); 327 328 /* Reset prs_err if not invalid */ 329 if (is_partition_valid(cs)) 330 WRITE_ONCE(cs->prs_err, PERR_NONE); 331 } 332 333 static struct cpuset top_cpuset = { 334 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | 335 (1 << CS_MEM_EXCLUSIVE)), 336 .partition_root_state = PRS_ROOT, 337 }; 338 339 /** 340 * cpuset_for_each_child - traverse online children of a cpuset 341 * @child_cs: loop cursor pointing to the current child 342 * @pos_css: used for iteration 343 * @parent_cs: target cpuset to walk children of 344 * 345 * Walk @child_cs through the online children of @parent_cs. Must be used 346 * with RCU read locked. 347 */ 348 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 349 css_for_each_child((pos_css), &(parent_cs)->css) \ 350 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 351 352 /** 353 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 354 * @des_cs: loop cursor pointing to the current descendant 355 * @pos_css: used for iteration 356 * @root_cs: target cpuset to walk ancestor of 357 * 358 * Walk @des_cs through the online descendants of @root_cs. Must be used 359 * with RCU read locked. The caller may modify @pos_css by calling 360 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 361 * iteration and the first node to be visited. 362 */ 363 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 364 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 365 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 366 367 /* 368 * There are two global locks guarding cpuset structures - cpuset_mutex and 369 * callback_lock. We also require taking task_lock() when dereferencing a 370 * task's cpuset pointer. See "The task_lock() exception", at the end of this 371 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems 372 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset 373 * structures. Note that cpuset_mutex needs to be a mutex as it is used in 374 * paths that rely on priority inheritance (e.g. scheduler - on RT) for 375 * correctness. 376 * 377 * A task must hold both locks to modify cpusets. If a task holds 378 * cpuset_mutex, it blocks others, ensuring that it is the only task able to 379 * also acquire callback_lock and be able to modify cpusets. It can perform 380 * various checks on the cpuset structure first, knowing nothing will change. 381 * It can also allocate memory while just holding cpuset_mutex. While it is 382 * performing these checks, various callback routines can briefly acquire 383 * callback_lock to query cpusets. Once it is ready to make the changes, it 384 * takes callback_lock, blocking everyone else. 385 * 386 * Calls to the kernel memory allocator can not be made while holding 387 * callback_lock, as that would risk double tripping on callback_lock 388 * from one of the callbacks into the cpuset code from within 389 * __alloc_pages(). 390 * 391 * If a task is only holding callback_lock, then it has read-only 392 * access to cpusets. 393 * 394 * Now, the task_struct fields mems_allowed and mempolicy may be changed 395 * by other task, we use alloc_lock in the task_struct fields to protect 396 * them. 397 * 398 * The cpuset_common_file_read() handlers only hold callback_lock across 399 * small pieces of code, such as when reading out possibly multi-word 400 * cpumasks and nodemasks. 401 * 402 * Accessing a task's cpuset should be done in accordance with the 403 * guidelines for accessing subsystem state in kernel/cgroup.c 404 */ 405 406 static DEFINE_MUTEX(cpuset_mutex); 407 408 void cpuset_lock(void) 409 { 410 mutex_lock(&cpuset_mutex); 411 } 412 413 void cpuset_unlock(void) 414 { 415 mutex_unlock(&cpuset_mutex); 416 } 417 418 static DEFINE_SPINLOCK(callback_lock); 419 420 static struct workqueue_struct *cpuset_migrate_mm_wq; 421 422 /* 423 * CPU / memory hotplug is handled asynchronously. 424 */ 425 static void cpuset_hotplug_workfn(struct work_struct *work); 426 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); 427 428 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 429 430 static inline void check_insane_mems_config(nodemask_t *nodes) 431 { 432 if (!cpusets_insane_config() && 433 movable_only_nodes(nodes)) { 434 static_branch_enable(&cpusets_insane_config_key); 435 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" 436 "Cpuset allocations might fail even with a lot of memory available.\n", 437 nodemask_pr_args(nodes)); 438 } 439 } 440 441 /* 442 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when 443 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting 444 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. 445 * With v2 behavior, "cpus" and "mems" are always what the users have 446 * requested and won't be changed by hotplug events. Only the effective 447 * cpus or mems will be affected. 448 */ 449 static inline bool is_in_v2_mode(void) 450 { 451 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 452 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 453 } 454 455 /** 456 * partition_is_populated - check if partition has tasks 457 * @cs: partition root to be checked 458 * @excluded_child: a child cpuset to be excluded in task checking 459 * Return: true if there are tasks, false otherwise 460 * 461 * It is assumed that @cs is a valid partition root. @excluded_child should 462 * be non-NULL when this cpuset is going to become a partition itself. 463 */ 464 static inline bool partition_is_populated(struct cpuset *cs, 465 struct cpuset *excluded_child) 466 { 467 struct cgroup_subsys_state *css; 468 struct cpuset *child; 469 470 if (cs->css.cgroup->nr_populated_csets) 471 return true; 472 if (!excluded_child && !cs->nr_subparts_cpus) 473 return cgroup_is_populated(cs->css.cgroup); 474 475 rcu_read_lock(); 476 cpuset_for_each_child(child, css, cs) { 477 if (child == excluded_child) 478 continue; 479 if (is_partition_valid(child)) 480 continue; 481 if (cgroup_is_populated(child->css.cgroup)) { 482 rcu_read_unlock(); 483 return true; 484 } 485 } 486 rcu_read_unlock(); 487 return false; 488 } 489 490 /* 491 * Return in pmask the portion of a task's cpusets's cpus_allowed that 492 * are online and are capable of running the task. If none are found, 493 * walk up the cpuset hierarchy until we find one that does have some 494 * appropriate cpus. 495 * 496 * One way or another, we guarantee to return some non-empty subset 497 * of cpu_online_mask. 498 * 499 * Call with callback_lock or cpuset_mutex held. 500 */ 501 static void guarantee_online_cpus(struct task_struct *tsk, 502 struct cpumask *pmask) 503 { 504 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 505 struct cpuset *cs; 506 507 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) 508 cpumask_copy(pmask, cpu_online_mask); 509 510 rcu_read_lock(); 511 cs = task_cs(tsk); 512 513 while (!cpumask_intersects(cs->effective_cpus, pmask)) { 514 cs = parent_cs(cs); 515 if (unlikely(!cs)) { 516 /* 517 * The top cpuset doesn't have any online cpu as a 518 * consequence of a race between cpuset_hotplug_work 519 * and cpu hotplug notifier. But we know the top 520 * cpuset's effective_cpus is on its way to be 521 * identical to cpu_online_mask. 522 */ 523 goto out_unlock; 524 } 525 } 526 cpumask_and(pmask, pmask, cs->effective_cpus); 527 528 out_unlock: 529 rcu_read_unlock(); 530 } 531 532 /* 533 * Return in *pmask the portion of a cpusets's mems_allowed that 534 * are online, with memory. If none are online with memory, walk 535 * up the cpuset hierarchy until we find one that does have some 536 * online mems. The top cpuset always has some mems online. 537 * 538 * One way or another, we guarantee to return some non-empty subset 539 * of node_states[N_MEMORY]. 540 * 541 * Call with callback_lock or cpuset_mutex held. 542 */ 543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 544 { 545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 546 cs = parent_cs(cs); 547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 548 } 549 550 /* 551 * update task's spread flag if cpuset's page/slab spread flag is set 552 * 553 * Call with callback_lock or cpuset_mutex held. The check can be skipped 554 * if on default hierarchy. 555 */ 556 static void cpuset_update_task_spread_flags(struct cpuset *cs, 557 struct task_struct *tsk) 558 { 559 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 560 return; 561 562 if (is_spread_page(cs)) 563 task_set_spread_page(tsk); 564 else 565 task_clear_spread_page(tsk); 566 567 if (is_spread_slab(cs)) 568 task_set_spread_slab(tsk); 569 else 570 task_clear_spread_slab(tsk); 571 } 572 573 /* 574 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 575 * 576 * One cpuset is a subset of another if all its allowed CPUs and 577 * Memory Nodes are a subset of the other, and its exclusive flags 578 * are only set if the other's are set. Call holding cpuset_mutex. 579 */ 580 581 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 582 { 583 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 584 nodes_subset(p->mems_allowed, q->mems_allowed) && 585 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 586 is_mem_exclusive(p) <= is_mem_exclusive(q); 587 } 588 589 /** 590 * alloc_cpumasks - allocate three cpumasks for cpuset 591 * @cs: the cpuset that have cpumasks to be allocated. 592 * @tmp: the tmpmasks structure pointer 593 * Return: 0 if successful, -ENOMEM otherwise. 594 * 595 * Only one of the two input arguments should be non-NULL. 596 */ 597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 598 { 599 cpumask_var_t *pmask1, *pmask2, *pmask3; 600 601 if (cs) { 602 pmask1 = &cs->cpus_allowed; 603 pmask2 = &cs->effective_cpus; 604 pmask3 = &cs->subparts_cpus; 605 } else { 606 pmask1 = &tmp->new_cpus; 607 pmask2 = &tmp->addmask; 608 pmask3 = &tmp->delmask; 609 } 610 611 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 612 return -ENOMEM; 613 614 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 615 goto free_one; 616 617 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 618 goto free_two; 619 620 return 0; 621 622 free_two: 623 free_cpumask_var(*pmask2); 624 free_one: 625 free_cpumask_var(*pmask1); 626 return -ENOMEM; 627 } 628 629 /** 630 * free_cpumasks - free cpumasks in a tmpmasks structure 631 * @cs: the cpuset that have cpumasks to be free. 632 * @tmp: the tmpmasks structure pointer 633 */ 634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 635 { 636 if (cs) { 637 free_cpumask_var(cs->cpus_allowed); 638 free_cpumask_var(cs->effective_cpus); 639 free_cpumask_var(cs->subparts_cpus); 640 } 641 if (tmp) { 642 free_cpumask_var(tmp->new_cpus); 643 free_cpumask_var(tmp->addmask); 644 free_cpumask_var(tmp->delmask); 645 } 646 } 647 648 /** 649 * alloc_trial_cpuset - allocate a trial cpuset 650 * @cs: the cpuset that the trial cpuset duplicates 651 */ 652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 653 { 654 struct cpuset *trial; 655 656 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 657 if (!trial) 658 return NULL; 659 660 if (alloc_cpumasks(trial, NULL)) { 661 kfree(trial); 662 return NULL; 663 } 664 665 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 666 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 667 return trial; 668 } 669 670 /** 671 * free_cpuset - free the cpuset 672 * @cs: the cpuset to be freed 673 */ 674 static inline void free_cpuset(struct cpuset *cs) 675 { 676 free_cpumasks(cs, NULL); 677 kfree(cs); 678 } 679 680 /* 681 * validate_change_legacy() - Validate conditions specific to legacy (v1) 682 * behavior. 683 */ 684 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial) 685 { 686 struct cgroup_subsys_state *css; 687 struct cpuset *c, *par; 688 int ret; 689 690 WARN_ON_ONCE(!rcu_read_lock_held()); 691 692 /* Each of our child cpusets must be a subset of us */ 693 ret = -EBUSY; 694 cpuset_for_each_child(c, css, cur) 695 if (!is_cpuset_subset(c, trial)) 696 goto out; 697 698 /* On legacy hierarchy, we must be a subset of our parent cpuset. */ 699 ret = -EACCES; 700 par = parent_cs(cur); 701 if (par && !is_cpuset_subset(trial, par)) 702 goto out; 703 704 ret = 0; 705 out: 706 return ret; 707 } 708 709 /* 710 * validate_change() - Used to validate that any proposed cpuset change 711 * follows the structural rules for cpusets. 712 * 713 * If we replaced the flag and mask values of the current cpuset 714 * (cur) with those values in the trial cpuset (trial), would 715 * our various subset and exclusive rules still be valid? Presumes 716 * cpuset_mutex held. 717 * 718 * 'cur' is the address of an actual, in-use cpuset. Operations 719 * such as list traversal that depend on the actual address of the 720 * cpuset in the list must use cur below, not trial. 721 * 722 * 'trial' is the address of bulk structure copy of cur, with 723 * perhaps one or more of the fields cpus_allowed, mems_allowed, 724 * or flags changed to new, trial values. 725 * 726 * Return 0 if valid, -errno if not. 727 */ 728 729 static int validate_change(struct cpuset *cur, struct cpuset *trial) 730 { 731 struct cgroup_subsys_state *css; 732 struct cpuset *c, *par; 733 int ret = 0; 734 735 rcu_read_lock(); 736 737 if (!is_in_v2_mode()) 738 ret = validate_change_legacy(cur, trial); 739 if (ret) 740 goto out; 741 742 /* Remaining checks don't apply to root cpuset */ 743 if (cur == &top_cpuset) 744 goto out; 745 746 par = parent_cs(cur); 747 748 /* 749 * Cpusets with tasks - existing or newly being attached - can't 750 * be changed to have empty cpus_allowed or mems_allowed. 751 */ 752 ret = -ENOSPC; 753 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 754 if (!cpumask_empty(cur->cpus_allowed) && 755 cpumask_empty(trial->cpus_allowed)) 756 goto out; 757 if (!nodes_empty(cur->mems_allowed) && 758 nodes_empty(trial->mems_allowed)) 759 goto out; 760 } 761 762 /* 763 * We can't shrink if we won't have enough room for SCHED_DEADLINE 764 * tasks. 765 */ 766 ret = -EBUSY; 767 if (is_cpu_exclusive(cur) && 768 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 769 trial->cpus_allowed)) 770 goto out; 771 772 /* 773 * If either I or some sibling (!= me) is exclusive, we can't 774 * overlap 775 */ 776 ret = -EINVAL; 777 cpuset_for_each_child(c, css, par) { 778 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 779 c != cur && 780 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) 781 goto out; 782 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 783 c != cur && 784 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 785 goto out; 786 } 787 788 ret = 0; 789 out: 790 rcu_read_unlock(); 791 return ret; 792 } 793 794 #ifdef CONFIG_SMP 795 /* 796 * Helper routine for generate_sched_domains(). 797 * Do cpusets a, b have overlapping effective cpus_allowed masks? 798 */ 799 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 800 { 801 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 802 } 803 804 static void 805 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 806 { 807 if (dattr->relax_domain_level < c->relax_domain_level) 808 dattr->relax_domain_level = c->relax_domain_level; 809 return; 810 } 811 812 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 813 struct cpuset *root_cs) 814 { 815 struct cpuset *cp; 816 struct cgroup_subsys_state *pos_css; 817 818 rcu_read_lock(); 819 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 820 /* skip the whole subtree if @cp doesn't have any CPU */ 821 if (cpumask_empty(cp->cpus_allowed)) { 822 pos_css = css_rightmost_descendant(pos_css); 823 continue; 824 } 825 826 if (is_sched_load_balance(cp)) 827 update_domain_attr(dattr, cp); 828 } 829 rcu_read_unlock(); 830 } 831 832 /* Must be called with cpuset_mutex held. */ 833 static inline int nr_cpusets(void) 834 { 835 /* jump label reference count + the top-level cpuset */ 836 return static_key_count(&cpusets_enabled_key.key) + 1; 837 } 838 839 /* 840 * generate_sched_domains() 841 * 842 * This function builds a partial partition of the systems CPUs 843 * A 'partial partition' is a set of non-overlapping subsets whose 844 * union is a subset of that set. 845 * The output of this function needs to be passed to kernel/sched/core.c 846 * partition_sched_domains() routine, which will rebuild the scheduler's 847 * load balancing domains (sched domains) as specified by that partial 848 * partition. 849 * 850 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst 851 * for a background explanation of this. 852 * 853 * Does not return errors, on the theory that the callers of this 854 * routine would rather not worry about failures to rebuild sched 855 * domains when operating in the severe memory shortage situations 856 * that could cause allocation failures below. 857 * 858 * Must be called with cpuset_mutex held. 859 * 860 * The three key local variables below are: 861 * cp - cpuset pointer, used (together with pos_css) to perform a 862 * top-down scan of all cpusets. For our purposes, rebuilding 863 * the schedulers sched domains, we can ignore !is_sched_load_ 864 * balance cpusets. 865 * csa - (for CpuSet Array) Array of pointers to all the cpusets 866 * that need to be load balanced, for convenient iterative 867 * access by the subsequent code that finds the best partition, 868 * i.e the set of domains (subsets) of CPUs such that the 869 * cpus_allowed of every cpuset marked is_sched_load_balance 870 * is a subset of one of these domains, while there are as 871 * many such domains as possible, each as small as possible. 872 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 873 * the kernel/sched/core.c routine partition_sched_domains() in a 874 * convenient format, that can be easily compared to the prior 875 * value to determine what partition elements (sched domains) 876 * were changed (added or removed.) 877 * 878 * Finding the best partition (set of domains): 879 * The triple nested loops below over i, j, k scan over the 880 * load balanced cpusets (using the array of cpuset pointers in 881 * csa[]) looking for pairs of cpusets that have overlapping 882 * cpus_allowed, but which don't have the same 'pn' partition 883 * number and gives them in the same partition number. It keeps 884 * looping on the 'restart' label until it can no longer find 885 * any such pairs. 886 * 887 * The union of the cpus_allowed masks from the set of 888 * all cpusets having the same 'pn' value then form the one 889 * element of the partition (one sched domain) to be passed to 890 * partition_sched_domains(). 891 */ 892 static int generate_sched_domains(cpumask_var_t **domains, 893 struct sched_domain_attr **attributes) 894 { 895 struct cpuset *cp; /* top-down scan of cpusets */ 896 struct cpuset **csa; /* array of all cpuset ptrs */ 897 int csn; /* how many cpuset ptrs in csa so far */ 898 int i, j, k; /* indices for partition finding loops */ 899 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 900 struct sched_domain_attr *dattr; /* attributes for custom domains */ 901 int ndoms = 0; /* number of sched domains in result */ 902 int nslot; /* next empty doms[] struct cpumask slot */ 903 struct cgroup_subsys_state *pos_css; 904 bool root_load_balance = is_sched_load_balance(&top_cpuset); 905 906 doms = NULL; 907 dattr = NULL; 908 csa = NULL; 909 910 /* Special case for the 99% of systems with one, full, sched domain */ 911 if (root_load_balance && !top_cpuset.nr_subparts_cpus) { 912 ndoms = 1; 913 doms = alloc_sched_domains(ndoms); 914 if (!doms) 915 goto done; 916 917 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 918 if (dattr) { 919 *dattr = SD_ATTR_INIT; 920 update_domain_attr_tree(dattr, &top_cpuset); 921 } 922 cpumask_and(doms[0], top_cpuset.effective_cpus, 923 housekeeping_cpumask(HK_TYPE_DOMAIN)); 924 925 goto done; 926 } 927 928 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 929 if (!csa) 930 goto done; 931 csn = 0; 932 933 rcu_read_lock(); 934 if (root_load_balance) 935 csa[csn++] = &top_cpuset; 936 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 937 if (cp == &top_cpuset) 938 continue; 939 /* 940 * Continue traversing beyond @cp iff @cp has some CPUs and 941 * isn't load balancing. The former is obvious. The 942 * latter: All child cpusets contain a subset of the 943 * parent's cpus, so just skip them, and then we call 944 * update_domain_attr_tree() to calc relax_domain_level of 945 * the corresponding sched domain. 946 * 947 * If root is load-balancing, we can skip @cp if it 948 * is a subset of the root's effective_cpus. 949 */ 950 if (!cpumask_empty(cp->cpus_allowed) && 951 !(is_sched_load_balance(cp) && 952 cpumask_intersects(cp->cpus_allowed, 953 housekeeping_cpumask(HK_TYPE_DOMAIN)))) 954 continue; 955 956 if (root_load_balance && 957 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) 958 continue; 959 960 if (is_sched_load_balance(cp) && 961 !cpumask_empty(cp->effective_cpus)) 962 csa[csn++] = cp; 963 964 /* skip @cp's subtree if not a partition root */ 965 if (!is_partition_valid(cp)) 966 pos_css = css_rightmost_descendant(pos_css); 967 } 968 rcu_read_unlock(); 969 970 for (i = 0; i < csn; i++) 971 csa[i]->pn = i; 972 ndoms = csn; 973 974 restart: 975 /* Find the best partition (set of sched domains) */ 976 for (i = 0; i < csn; i++) { 977 struct cpuset *a = csa[i]; 978 int apn = a->pn; 979 980 for (j = 0; j < csn; j++) { 981 struct cpuset *b = csa[j]; 982 int bpn = b->pn; 983 984 if (apn != bpn && cpusets_overlap(a, b)) { 985 for (k = 0; k < csn; k++) { 986 struct cpuset *c = csa[k]; 987 988 if (c->pn == bpn) 989 c->pn = apn; 990 } 991 ndoms--; /* one less element */ 992 goto restart; 993 } 994 } 995 } 996 997 /* 998 * Now we know how many domains to create. 999 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 1000 */ 1001 doms = alloc_sched_domains(ndoms); 1002 if (!doms) 1003 goto done; 1004 1005 /* 1006 * The rest of the code, including the scheduler, can deal with 1007 * dattr==NULL case. No need to abort if alloc fails. 1008 */ 1009 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 1010 GFP_KERNEL); 1011 1012 for (nslot = 0, i = 0; i < csn; i++) { 1013 struct cpuset *a = csa[i]; 1014 struct cpumask *dp; 1015 int apn = a->pn; 1016 1017 if (apn < 0) { 1018 /* Skip completed partitions */ 1019 continue; 1020 } 1021 1022 dp = doms[nslot]; 1023 1024 if (nslot == ndoms) { 1025 static int warnings = 10; 1026 if (warnings) { 1027 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 1028 nslot, ndoms, csn, i, apn); 1029 warnings--; 1030 } 1031 continue; 1032 } 1033 1034 cpumask_clear(dp); 1035 if (dattr) 1036 *(dattr + nslot) = SD_ATTR_INIT; 1037 for (j = i; j < csn; j++) { 1038 struct cpuset *b = csa[j]; 1039 1040 if (apn == b->pn) { 1041 cpumask_or(dp, dp, b->effective_cpus); 1042 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); 1043 if (dattr) 1044 update_domain_attr_tree(dattr + nslot, b); 1045 1046 /* Done with this partition */ 1047 b->pn = -1; 1048 } 1049 } 1050 nslot++; 1051 } 1052 BUG_ON(nslot != ndoms); 1053 1054 done: 1055 kfree(csa); 1056 1057 /* 1058 * Fallback to the default domain if kmalloc() failed. 1059 * See comments in partition_sched_domains(). 1060 */ 1061 if (doms == NULL) 1062 ndoms = 1; 1063 1064 *domains = doms; 1065 *attributes = dattr; 1066 return ndoms; 1067 } 1068 1069 static void dl_update_tasks_root_domain(struct cpuset *cs) 1070 { 1071 struct css_task_iter it; 1072 struct task_struct *task; 1073 1074 if (cs->nr_deadline_tasks == 0) 1075 return; 1076 1077 css_task_iter_start(&cs->css, 0, &it); 1078 1079 while ((task = css_task_iter_next(&it))) 1080 dl_add_task_root_domain(task); 1081 1082 css_task_iter_end(&it); 1083 } 1084 1085 static void dl_rebuild_rd_accounting(void) 1086 { 1087 struct cpuset *cs = NULL; 1088 struct cgroup_subsys_state *pos_css; 1089 1090 lockdep_assert_held(&cpuset_mutex); 1091 lockdep_assert_cpus_held(); 1092 lockdep_assert_held(&sched_domains_mutex); 1093 1094 rcu_read_lock(); 1095 1096 /* 1097 * Clear default root domain DL accounting, it will be computed again 1098 * if a task belongs to it. 1099 */ 1100 dl_clear_root_domain(&def_root_domain); 1101 1102 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1103 1104 if (cpumask_empty(cs->effective_cpus)) { 1105 pos_css = css_rightmost_descendant(pos_css); 1106 continue; 1107 } 1108 1109 css_get(&cs->css); 1110 1111 rcu_read_unlock(); 1112 1113 dl_update_tasks_root_domain(cs); 1114 1115 rcu_read_lock(); 1116 css_put(&cs->css); 1117 } 1118 rcu_read_unlock(); 1119 } 1120 1121 static void 1122 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 1123 struct sched_domain_attr *dattr_new) 1124 { 1125 mutex_lock(&sched_domains_mutex); 1126 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 1127 dl_rebuild_rd_accounting(); 1128 mutex_unlock(&sched_domains_mutex); 1129 } 1130 1131 /* 1132 * Rebuild scheduler domains. 1133 * 1134 * If the flag 'sched_load_balance' of any cpuset with non-empty 1135 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 1136 * which has that flag enabled, or if any cpuset with a non-empty 1137 * 'cpus' is removed, then call this routine to rebuild the 1138 * scheduler's dynamic sched domains. 1139 * 1140 * Call with cpuset_mutex held. Takes cpus_read_lock(). 1141 */ 1142 static void rebuild_sched_domains_locked(void) 1143 { 1144 struct cgroup_subsys_state *pos_css; 1145 struct sched_domain_attr *attr; 1146 cpumask_var_t *doms; 1147 struct cpuset *cs; 1148 int ndoms; 1149 1150 lockdep_assert_cpus_held(); 1151 lockdep_assert_held(&cpuset_mutex); 1152 1153 /* 1154 * If we have raced with CPU hotplug, return early to avoid 1155 * passing doms with offlined cpu to partition_sched_domains(). 1156 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. 1157 * 1158 * With no CPUs in any subpartitions, top_cpuset's effective CPUs 1159 * should be the same as the active CPUs, so checking only top_cpuset 1160 * is enough to detect racing CPU offlines. 1161 */ 1162 if (!top_cpuset.nr_subparts_cpus && 1163 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 1164 return; 1165 1166 /* 1167 * With subpartition CPUs, however, the effective CPUs of a partition 1168 * root should be only a subset of the active CPUs. Since a CPU in any 1169 * partition root could be offlined, all must be checked. 1170 */ 1171 if (top_cpuset.nr_subparts_cpus) { 1172 rcu_read_lock(); 1173 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 1174 if (!is_partition_valid(cs)) { 1175 pos_css = css_rightmost_descendant(pos_css); 1176 continue; 1177 } 1178 if (!cpumask_subset(cs->effective_cpus, 1179 cpu_active_mask)) { 1180 rcu_read_unlock(); 1181 return; 1182 } 1183 } 1184 rcu_read_unlock(); 1185 } 1186 1187 /* Generate domain masks and attrs */ 1188 ndoms = generate_sched_domains(&doms, &attr); 1189 1190 /* Have scheduler rebuild the domains */ 1191 partition_and_rebuild_sched_domains(ndoms, doms, attr); 1192 } 1193 #else /* !CONFIG_SMP */ 1194 static void rebuild_sched_domains_locked(void) 1195 { 1196 } 1197 #endif /* CONFIG_SMP */ 1198 1199 void rebuild_sched_domains(void) 1200 { 1201 cpus_read_lock(); 1202 mutex_lock(&cpuset_mutex); 1203 rebuild_sched_domains_locked(); 1204 mutex_unlock(&cpuset_mutex); 1205 cpus_read_unlock(); 1206 } 1207 1208 /** 1209 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 1210 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 1211 * @new_cpus: the temp variable for the new effective_cpus mask 1212 * 1213 * Iterate through each task of @cs updating its cpus_allowed to the 1214 * effective cpuset's. As this function is called with cpuset_mutex held, 1215 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask() 1216 * is used instead of effective_cpus to make sure all offline CPUs are also 1217 * included as hotplug code won't update cpumasks for tasks in top_cpuset. 1218 */ 1219 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) 1220 { 1221 struct css_task_iter it; 1222 struct task_struct *task; 1223 bool top_cs = cs == &top_cpuset; 1224 1225 css_task_iter_start(&cs->css, 0, &it); 1226 while ((task = css_task_iter_next(&it))) { 1227 const struct cpumask *possible_mask = task_cpu_possible_mask(task); 1228 1229 if (top_cs) { 1230 /* 1231 * Percpu kthreads in top_cpuset are ignored 1232 */ 1233 if ((task->flags & PF_KTHREAD) && kthread_is_per_cpu(task)) 1234 continue; 1235 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus); 1236 } else { 1237 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); 1238 } 1239 set_cpus_allowed_ptr(task, new_cpus); 1240 } 1241 css_task_iter_end(&it); 1242 } 1243 1244 /** 1245 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1246 * @new_cpus: the temp variable for the new effective_cpus mask 1247 * @cs: the cpuset the need to recompute the new effective_cpus mask 1248 * @parent: the parent cpuset 1249 * 1250 * If the parent has subpartition CPUs, include them in the list of 1251 * allowable CPUs in computing the new effective_cpus mask. Since offlined 1252 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask 1253 * to mask those out. 1254 */ 1255 static void compute_effective_cpumask(struct cpumask *new_cpus, 1256 struct cpuset *cs, struct cpuset *parent) 1257 { 1258 if (parent->nr_subparts_cpus) { 1259 cpumask_or(new_cpus, parent->effective_cpus, 1260 parent->subparts_cpus); 1261 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); 1262 cpumask_and(new_cpus, new_cpus, cpu_active_mask); 1263 } else { 1264 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1265 } 1266 } 1267 1268 /* 1269 * Commands for update_parent_subparts_cpumask 1270 */ 1271 enum subparts_cmd { 1272 partcmd_enable, /* Enable partition root */ 1273 partcmd_disable, /* Disable partition root */ 1274 partcmd_update, /* Update parent's subparts_cpus */ 1275 partcmd_invalidate, /* Make partition invalid */ 1276 }; 1277 1278 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1279 int turning_on); 1280 /** 1281 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset 1282 * @cs: The cpuset that requests change in partition root state 1283 * @cmd: Partition root state change command 1284 * @newmask: Optional new cpumask for partcmd_update 1285 * @tmp: Temporary addmask and delmask 1286 * Return: 0 or a partition root state error code 1287 * 1288 * For partcmd_enable, the cpuset is being transformed from a non-partition 1289 * root to a partition root. The cpus_allowed mask of the given cpuset will 1290 * be put into parent's subparts_cpus and taken away from parent's 1291 * effective_cpus. The function will return 0 if all the CPUs listed in 1292 * cpus_allowed can be granted or an error code will be returned. 1293 * 1294 * For partcmd_disable, the cpuset is being transformed from a partition 1295 * root back to a non-partition root. Any CPUs in cpus_allowed that are in 1296 * parent's subparts_cpus will be taken away from that cpumask and put back 1297 * into parent's effective_cpus. 0 will always be returned. 1298 * 1299 * For partcmd_update, if the optional newmask is specified, the cpu list is 1300 * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is 1301 * assumed to remain the same. The cpuset should either be a valid or invalid 1302 * partition root. The partition root state may change from valid to invalid 1303 * or vice versa. An error code will only be returned if transitioning from 1304 * invalid to valid violates the exclusivity rule. 1305 * 1306 * For partcmd_invalidate, the current partition will be made invalid. 1307 * 1308 * The partcmd_enable and partcmd_disable commands are used by 1309 * update_prstate(). An error code may be returned and the caller will check 1310 * for error. 1311 * 1312 * The partcmd_update command is used by update_cpumasks_hier() with newmask 1313 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used 1314 * by update_cpumask() with NULL newmask. In both cases, the callers won't 1315 * check for error and so partition_root_state and prs_error will be updated 1316 * directly. 1317 */ 1318 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, 1319 struct cpumask *newmask, 1320 struct tmpmasks *tmp) 1321 { 1322 struct cpuset *parent = parent_cs(cs); 1323 int adding; /* Moving cpus from effective_cpus to subparts_cpus */ 1324 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ 1325 int old_prs, new_prs; 1326 int part_error = PERR_NONE; /* Partition error? */ 1327 1328 lockdep_assert_held(&cpuset_mutex); 1329 1330 /* 1331 * The parent must be a partition root. 1332 * The new cpumask, if present, or the current cpus_allowed must 1333 * not be empty. 1334 */ 1335 if (!is_partition_valid(parent)) { 1336 return is_partition_invalid(parent) 1337 ? PERR_INVPARENT : PERR_NOTPART; 1338 } 1339 if ((newmask && cpumask_empty(newmask)) || 1340 (!newmask && cpumask_empty(cs->cpus_allowed))) 1341 return PERR_CPUSEMPTY; 1342 1343 /* 1344 * new_prs will only be changed for the partcmd_update and 1345 * partcmd_invalidate commands. 1346 */ 1347 adding = deleting = false; 1348 old_prs = new_prs = cs->partition_root_state; 1349 if (cmd == partcmd_enable) { 1350 /* 1351 * Enabling partition root is not allowed if cpus_allowed 1352 * doesn't overlap parent's cpus_allowed. 1353 */ 1354 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) 1355 return PERR_INVCPUS; 1356 1357 /* 1358 * A parent can be left with no CPU as long as there is no 1359 * task directly associated with the parent partition. 1360 */ 1361 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) && 1362 partition_is_populated(parent, cs)) 1363 return PERR_NOCPUS; 1364 1365 cpumask_copy(tmp->addmask, cs->cpus_allowed); 1366 adding = true; 1367 } else if (cmd == partcmd_disable) { 1368 /* 1369 * Need to remove cpus from parent's subparts_cpus for valid 1370 * partition root. 1371 */ 1372 deleting = !is_prs_invalid(old_prs) && 1373 cpumask_and(tmp->delmask, cs->cpus_allowed, 1374 parent->subparts_cpus); 1375 } else if (cmd == partcmd_invalidate) { 1376 if (is_prs_invalid(old_prs)) 1377 return 0; 1378 1379 /* 1380 * Make the current partition invalid. It is assumed that 1381 * invalidation is caused by violating cpu exclusivity rule. 1382 */ 1383 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, 1384 parent->subparts_cpus); 1385 if (old_prs > 0) { 1386 new_prs = -old_prs; 1387 part_error = PERR_NOTEXCL; 1388 } 1389 } else if (newmask) { 1390 /* 1391 * partcmd_update with newmask: 1392 * 1393 * Compute add/delete mask to/from subparts_cpus 1394 * 1395 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus 1396 * addmask = newmask & parent->cpus_allowed 1397 * & ~parent->subparts_cpus 1398 */ 1399 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); 1400 deleting = cpumask_and(tmp->delmask, tmp->delmask, 1401 parent->subparts_cpus); 1402 1403 cpumask_and(tmp->addmask, newmask, parent->cpus_allowed); 1404 adding = cpumask_andnot(tmp->addmask, tmp->addmask, 1405 parent->subparts_cpus); 1406 /* 1407 * Make partition invalid if parent's effective_cpus could 1408 * become empty and there are tasks in the parent. 1409 */ 1410 if (adding && 1411 cpumask_subset(parent->effective_cpus, tmp->addmask) && 1412 !cpumask_intersects(tmp->delmask, cpu_active_mask) && 1413 partition_is_populated(parent, cs)) { 1414 part_error = PERR_NOCPUS; 1415 adding = false; 1416 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, 1417 parent->subparts_cpus); 1418 } 1419 } else { 1420 /* 1421 * partcmd_update w/o newmask: 1422 * 1423 * delmask = cpus_allowed & parent->subparts_cpus 1424 * addmask = cpus_allowed & parent->cpus_allowed 1425 * & ~parent->subparts_cpus 1426 * 1427 * This gets invoked either due to a hotplug event or from 1428 * update_cpumasks_hier(). This can cause the state of a 1429 * partition root to transition from valid to invalid or vice 1430 * versa. So we still need to compute the addmask and delmask. 1431 1432 * A partition error happens when: 1433 * 1) Cpuset is valid partition, but parent does not distribute 1434 * out any CPUs. 1435 * 2) Parent has tasks and all its effective CPUs will have 1436 * to be distributed out. 1437 */ 1438 cpumask_and(tmp->addmask, cs->cpus_allowed, 1439 parent->cpus_allowed); 1440 adding = cpumask_andnot(tmp->addmask, tmp->addmask, 1441 parent->subparts_cpus); 1442 1443 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || 1444 (adding && 1445 cpumask_subset(parent->effective_cpus, tmp->addmask) && 1446 partition_is_populated(parent, cs))) { 1447 part_error = PERR_NOCPUS; 1448 adding = false; 1449 } 1450 1451 if (part_error && is_partition_valid(cs) && 1452 parent->nr_subparts_cpus) 1453 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, 1454 parent->subparts_cpus); 1455 } 1456 if (part_error) 1457 WRITE_ONCE(cs->prs_err, part_error); 1458 1459 if (cmd == partcmd_update) { 1460 /* 1461 * Check for possible transition between valid and invalid 1462 * partition root. 1463 */ 1464 switch (cs->partition_root_state) { 1465 case PRS_ROOT: 1466 case PRS_ISOLATED: 1467 if (part_error) 1468 new_prs = -old_prs; 1469 break; 1470 case PRS_INVALID_ROOT: 1471 case PRS_INVALID_ISOLATED: 1472 if (!part_error) 1473 new_prs = -old_prs; 1474 break; 1475 } 1476 } 1477 1478 if (!adding && !deleting && (new_prs == old_prs)) 1479 return 0; 1480 1481 /* 1482 * Transitioning between invalid to valid or vice versa may require 1483 * changing CS_CPU_EXCLUSIVE and CS_SCHED_LOAD_BALANCE. 1484 */ 1485 if (old_prs != new_prs) { 1486 if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) && 1487 (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0)) 1488 return PERR_NOTEXCL; 1489 if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs)) 1490 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1491 } 1492 1493 /* 1494 * Change the parent's subparts_cpus. 1495 * Newly added CPUs will be removed from effective_cpus and 1496 * newly deleted ones will be added back to effective_cpus. 1497 */ 1498 spin_lock_irq(&callback_lock); 1499 if (adding) { 1500 cpumask_or(parent->subparts_cpus, 1501 parent->subparts_cpus, tmp->addmask); 1502 cpumask_andnot(parent->effective_cpus, 1503 parent->effective_cpus, tmp->addmask); 1504 } 1505 if (deleting) { 1506 cpumask_andnot(parent->subparts_cpus, 1507 parent->subparts_cpus, tmp->delmask); 1508 /* 1509 * Some of the CPUs in subparts_cpus might have been offlined. 1510 */ 1511 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); 1512 cpumask_or(parent->effective_cpus, 1513 parent->effective_cpus, tmp->delmask); 1514 } 1515 1516 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); 1517 1518 if (old_prs != new_prs) 1519 cs->partition_root_state = new_prs; 1520 1521 spin_unlock_irq(&callback_lock); 1522 1523 if (adding || deleting) 1524 update_tasks_cpumask(parent, tmp->addmask); 1525 1526 /* 1527 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary. 1528 * rebuild_sched_domains_locked() may be called. 1529 */ 1530 if (old_prs != new_prs) { 1531 if (old_prs == PRS_ISOLATED) 1532 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); 1533 else if (new_prs == PRS_ISOLATED) 1534 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 1535 } 1536 notify_partition_change(cs, old_prs); 1537 return 0; 1538 } 1539 1540 /* 1541 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 1542 * @cs: the cpuset to consider 1543 * @tmp: temp variables for calculating effective_cpus & partition setup 1544 * @force: don't skip any descendant cpusets if set 1545 * 1546 * When configured cpumask is changed, the effective cpumasks of this cpuset 1547 * and all its descendants need to be updated. 1548 * 1549 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. 1550 * 1551 * Called with cpuset_mutex held 1552 */ 1553 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, 1554 bool force) 1555 { 1556 struct cpuset *cp; 1557 struct cgroup_subsys_state *pos_css; 1558 bool need_rebuild_sched_domains = false; 1559 int old_prs, new_prs; 1560 1561 rcu_read_lock(); 1562 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1563 struct cpuset *parent = parent_cs(cp); 1564 bool update_parent = false; 1565 1566 compute_effective_cpumask(tmp->new_cpus, cp, parent); 1567 1568 /* 1569 * If it becomes empty, inherit the effective mask of the 1570 * parent, which is guaranteed to have some CPUs unless 1571 * it is a partition root that has explicitly distributed 1572 * out all its CPUs. 1573 */ 1574 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { 1575 if (is_partition_valid(cp) && 1576 cpumask_equal(cp->cpus_allowed, cp->subparts_cpus)) 1577 goto update_parent_subparts; 1578 1579 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 1580 if (!cp->use_parent_ecpus) { 1581 cp->use_parent_ecpus = true; 1582 parent->child_ecpus_count++; 1583 } 1584 } else if (cp->use_parent_ecpus) { 1585 cp->use_parent_ecpus = false; 1586 WARN_ON_ONCE(!parent->child_ecpus_count); 1587 parent->child_ecpus_count--; 1588 } 1589 1590 /* 1591 * Skip the whole subtree if the cpumask remains the same 1592 * and has no partition root state and force flag not set. 1593 */ 1594 if (!cp->partition_root_state && !force && 1595 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { 1596 pos_css = css_rightmost_descendant(pos_css); 1597 continue; 1598 } 1599 1600 update_parent_subparts: 1601 /* 1602 * update_parent_subparts_cpumask() should have been called 1603 * for cs already in update_cpumask(). We should also call 1604 * update_tasks_cpumask() again for tasks in the parent 1605 * cpuset if the parent's subparts_cpus changes. 1606 */ 1607 old_prs = new_prs = cp->partition_root_state; 1608 if ((cp != cs) && old_prs) { 1609 switch (parent->partition_root_state) { 1610 case PRS_ROOT: 1611 case PRS_ISOLATED: 1612 update_parent = true; 1613 break; 1614 1615 default: 1616 /* 1617 * When parent is not a partition root or is 1618 * invalid, child partition roots become 1619 * invalid too. 1620 */ 1621 if (is_partition_valid(cp)) 1622 new_prs = -cp->partition_root_state; 1623 WRITE_ONCE(cp->prs_err, 1624 is_partition_invalid(parent) 1625 ? PERR_INVPARENT : PERR_NOTPART); 1626 break; 1627 } 1628 } 1629 1630 if (!css_tryget_online(&cp->css)) 1631 continue; 1632 rcu_read_unlock(); 1633 1634 if (update_parent) { 1635 update_parent_subparts_cpumask(cp, partcmd_update, NULL, 1636 tmp); 1637 /* 1638 * The cpuset partition_root_state may become 1639 * invalid. Capture it. 1640 */ 1641 new_prs = cp->partition_root_state; 1642 } 1643 1644 spin_lock_irq(&callback_lock); 1645 1646 if (cp->nr_subparts_cpus && !is_partition_valid(cp)) { 1647 /* 1648 * Put all active subparts_cpus back to effective_cpus. 1649 */ 1650 cpumask_or(tmp->new_cpus, tmp->new_cpus, 1651 cp->subparts_cpus); 1652 cpumask_and(tmp->new_cpus, tmp->new_cpus, 1653 cpu_active_mask); 1654 cp->nr_subparts_cpus = 0; 1655 cpumask_clear(cp->subparts_cpus); 1656 } 1657 1658 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1659 if (cp->nr_subparts_cpus) { 1660 /* 1661 * Make sure that effective_cpus & subparts_cpus 1662 * are mutually exclusive. 1663 */ 1664 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, 1665 cp->subparts_cpus); 1666 } 1667 1668 cp->partition_root_state = new_prs; 1669 spin_unlock_irq(&callback_lock); 1670 1671 notify_partition_change(cp, old_prs); 1672 1673 WARN_ON(!is_in_v2_mode() && 1674 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 1675 1676 update_tasks_cpumask(cp, tmp->new_cpus); 1677 1678 /* 1679 * On legacy hierarchy, if the effective cpumask of any non- 1680 * empty cpuset is changed, we need to rebuild sched domains. 1681 * On default hierarchy, the cpuset needs to be a partition 1682 * root as well. 1683 */ 1684 if (!cpumask_empty(cp->cpus_allowed) && 1685 is_sched_load_balance(cp) && 1686 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 1687 is_partition_valid(cp))) 1688 need_rebuild_sched_domains = true; 1689 1690 rcu_read_lock(); 1691 css_put(&cp->css); 1692 } 1693 rcu_read_unlock(); 1694 1695 if (need_rebuild_sched_domains) 1696 rebuild_sched_domains_locked(); 1697 } 1698 1699 /** 1700 * update_sibling_cpumasks - Update siblings cpumasks 1701 * @parent: Parent cpuset 1702 * @cs: Current cpuset 1703 * @tmp: Temp variables 1704 */ 1705 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1706 struct tmpmasks *tmp) 1707 { 1708 struct cpuset *sibling; 1709 struct cgroup_subsys_state *pos_css; 1710 1711 lockdep_assert_held(&cpuset_mutex); 1712 1713 /* 1714 * Check all its siblings and call update_cpumasks_hier() 1715 * if their use_parent_ecpus flag is set in order for them 1716 * to use the right effective_cpus value. 1717 * 1718 * The update_cpumasks_hier() function may sleep. So we have to 1719 * release the RCU read lock before calling it. 1720 */ 1721 rcu_read_lock(); 1722 cpuset_for_each_child(sibling, pos_css, parent) { 1723 if (sibling == cs) 1724 continue; 1725 if (!sibling->use_parent_ecpus) 1726 continue; 1727 if (!css_tryget_online(&sibling->css)) 1728 continue; 1729 1730 rcu_read_unlock(); 1731 update_cpumasks_hier(sibling, tmp, false); 1732 rcu_read_lock(); 1733 css_put(&sibling->css); 1734 } 1735 rcu_read_unlock(); 1736 } 1737 1738 /** 1739 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 1740 * @cs: the cpuset to consider 1741 * @trialcs: trial cpuset 1742 * @buf: buffer of cpu numbers written to this cpuset 1743 */ 1744 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 1745 const char *buf) 1746 { 1747 int retval; 1748 struct tmpmasks tmp; 1749 bool invalidate = false; 1750 1751 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 1752 if (cs == &top_cpuset) 1753 return -EACCES; 1754 1755 /* 1756 * An empty cpus_allowed is ok only if the cpuset has no tasks. 1757 * Since cpulist_parse() fails on an empty mask, we special case 1758 * that parsing. The validate_change() call ensures that cpusets 1759 * with tasks have cpus. 1760 */ 1761 if (!*buf) { 1762 cpumask_clear(trialcs->cpus_allowed); 1763 } else { 1764 retval = cpulist_parse(buf, trialcs->cpus_allowed); 1765 if (retval < 0) 1766 return retval; 1767 1768 if (!cpumask_subset(trialcs->cpus_allowed, 1769 top_cpuset.cpus_allowed)) 1770 return -EINVAL; 1771 } 1772 1773 /* Nothing to do if the cpus didn't change */ 1774 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 1775 return 0; 1776 1777 #ifdef CONFIG_CPUMASK_OFFSTACK 1778 /* 1779 * Use the cpumasks in trialcs for tmpmasks when they are pointers 1780 * to allocated cpumasks. 1781 * 1782 * Note that update_parent_subparts_cpumask() uses only addmask & 1783 * delmask, but not new_cpus. 1784 */ 1785 tmp.addmask = trialcs->subparts_cpus; 1786 tmp.delmask = trialcs->effective_cpus; 1787 tmp.new_cpus = NULL; 1788 #endif 1789 1790 retval = validate_change(cs, trialcs); 1791 1792 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { 1793 struct cpuset *cp, *parent; 1794 struct cgroup_subsys_state *css; 1795 1796 /* 1797 * The -EINVAL error code indicates that partition sibling 1798 * CPU exclusivity rule has been violated. We still allow 1799 * the cpumask change to proceed while invalidating the 1800 * partition. However, any conflicting sibling partitions 1801 * have to be marked as invalid too. 1802 */ 1803 invalidate = true; 1804 rcu_read_lock(); 1805 parent = parent_cs(cs); 1806 cpuset_for_each_child(cp, css, parent) 1807 if (is_partition_valid(cp) && 1808 cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) { 1809 rcu_read_unlock(); 1810 update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp); 1811 rcu_read_lock(); 1812 } 1813 rcu_read_unlock(); 1814 retval = 0; 1815 } 1816 if (retval < 0) 1817 return retval; 1818 1819 if (cs->partition_root_state) { 1820 if (invalidate) 1821 update_parent_subparts_cpumask(cs, partcmd_invalidate, 1822 NULL, &tmp); 1823 else 1824 update_parent_subparts_cpumask(cs, partcmd_update, 1825 trialcs->cpus_allowed, &tmp); 1826 } 1827 1828 compute_effective_cpumask(trialcs->effective_cpus, trialcs, 1829 parent_cs(cs)); 1830 spin_lock_irq(&callback_lock); 1831 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 1832 1833 /* 1834 * Make sure that subparts_cpus, if not empty, is a subset of 1835 * cpus_allowed. Clear subparts_cpus if partition not valid or 1836 * empty effective cpus with tasks. 1837 */ 1838 if (cs->nr_subparts_cpus) { 1839 if (!is_partition_valid(cs) || 1840 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && 1841 partition_is_populated(cs, NULL))) { 1842 cs->nr_subparts_cpus = 0; 1843 cpumask_clear(cs->subparts_cpus); 1844 } else { 1845 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, 1846 cs->cpus_allowed); 1847 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); 1848 } 1849 } 1850 spin_unlock_irq(&callback_lock); 1851 1852 #ifdef CONFIG_CPUMASK_OFFSTACK 1853 /* Now trialcs->cpus_allowed is available */ 1854 tmp.new_cpus = trialcs->cpus_allowed; 1855 #endif 1856 1857 /* effective_cpus will be updated here */ 1858 update_cpumasks_hier(cs, &tmp, false); 1859 1860 if (cs->partition_root_state) { 1861 struct cpuset *parent = parent_cs(cs); 1862 1863 /* 1864 * For partition root, update the cpumasks of sibling 1865 * cpusets if they use parent's effective_cpus. 1866 */ 1867 if (parent->child_ecpus_count) 1868 update_sibling_cpumasks(parent, cs, &tmp); 1869 } 1870 return 0; 1871 } 1872 1873 /* 1874 * Migrate memory region from one set of nodes to another. This is 1875 * performed asynchronously as it can be called from process migration path 1876 * holding locks involved in process management. All mm migrations are 1877 * performed in the queued order and can be waited for by flushing 1878 * cpuset_migrate_mm_wq. 1879 */ 1880 1881 struct cpuset_migrate_mm_work { 1882 struct work_struct work; 1883 struct mm_struct *mm; 1884 nodemask_t from; 1885 nodemask_t to; 1886 }; 1887 1888 static void cpuset_migrate_mm_workfn(struct work_struct *work) 1889 { 1890 struct cpuset_migrate_mm_work *mwork = 1891 container_of(work, struct cpuset_migrate_mm_work, work); 1892 1893 /* on a wq worker, no need to worry about %current's mems_allowed */ 1894 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 1895 mmput(mwork->mm); 1896 kfree(mwork); 1897 } 1898 1899 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1900 const nodemask_t *to) 1901 { 1902 struct cpuset_migrate_mm_work *mwork; 1903 1904 if (nodes_equal(*from, *to)) { 1905 mmput(mm); 1906 return; 1907 } 1908 1909 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 1910 if (mwork) { 1911 mwork->mm = mm; 1912 mwork->from = *from; 1913 mwork->to = *to; 1914 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 1915 queue_work(cpuset_migrate_mm_wq, &mwork->work); 1916 } else { 1917 mmput(mm); 1918 } 1919 } 1920 1921 static void cpuset_post_attach(void) 1922 { 1923 flush_workqueue(cpuset_migrate_mm_wq); 1924 } 1925 1926 /* 1927 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 1928 * @tsk: the task to change 1929 * @newmems: new nodes that the task will be set 1930 * 1931 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 1932 * and rebind an eventual tasks' mempolicy. If the task is allocating in 1933 * parallel, it might temporarily see an empty intersection, which results in 1934 * a seqlock check and retry before OOM or allocation failure. 1935 */ 1936 static void cpuset_change_task_nodemask(struct task_struct *tsk, 1937 nodemask_t *newmems) 1938 { 1939 task_lock(tsk); 1940 1941 local_irq_disable(); 1942 write_seqcount_begin(&tsk->mems_allowed_seq); 1943 1944 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1945 mpol_rebind_task(tsk, newmems); 1946 tsk->mems_allowed = *newmems; 1947 1948 write_seqcount_end(&tsk->mems_allowed_seq); 1949 local_irq_enable(); 1950 1951 task_unlock(tsk); 1952 } 1953 1954 static void *cpuset_being_rebound; 1955 1956 /** 1957 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1958 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1959 * 1960 * Iterate through each task of @cs updating its mems_allowed to the 1961 * effective cpuset's. As this function is called with cpuset_mutex held, 1962 * cpuset membership stays stable. 1963 */ 1964 static void update_tasks_nodemask(struct cpuset *cs) 1965 { 1966 static nodemask_t newmems; /* protected by cpuset_mutex */ 1967 struct css_task_iter it; 1968 struct task_struct *task; 1969 1970 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1971 1972 guarantee_online_mems(cs, &newmems); 1973 1974 /* 1975 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't 1976 * take while holding tasklist_lock. Forks can happen - the 1977 * mpol_dup() cpuset_being_rebound check will catch such forks, 1978 * and rebind their vma mempolicies too. Because we still hold 1979 * the global cpuset_mutex, we know that no other rebind effort 1980 * will be contending for the global variable cpuset_being_rebound. 1981 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1982 * is idempotent. Also migrate pages in each mm to new nodes. 1983 */ 1984 css_task_iter_start(&cs->css, 0, &it); 1985 while ((task = css_task_iter_next(&it))) { 1986 struct mm_struct *mm; 1987 bool migrate; 1988 1989 cpuset_change_task_nodemask(task, &newmems); 1990 1991 mm = get_task_mm(task); 1992 if (!mm) 1993 continue; 1994 1995 migrate = is_memory_migrate(cs); 1996 1997 mpol_rebind_mm(mm, &cs->mems_allowed); 1998 if (migrate) 1999 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 2000 else 2001 mmput(mm); 2002 } 2003 css_task_iter_end(&it); 2004 2005 /* 2006 * All the tasks' nodemasks have been updated, update 2007 * cs->old_mems_allowed. 2008 */ 2009 cs->old_mems_allowed = newmems; 2010 2011 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 2012 cpuset_being_rebound = NULL; 2013 } 2014 2015 /* 2016 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 2017 * @cs: the cpuset to consider 2018 * @new_mems: a temp variable for calculating new effective_mems 2019 * 2020 * When configured nodemask is changed, the effective nodemasks of this cpuset 2021 * and all its descendants need to be updated. 2022 * 2023 * On legacy hierarchy, effective_mems will be the same with mems_allowed. 2024 * 2025 * Called with cpuset_mutex held 2026 */ 2027 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 2028 { 2029 struct cpuset *cp; 2030 struct cgroup_subsys_state *pos_css; 2031 2032 rcu_read_lock(); 2033 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 2034 struct cpuset *parent = parent_cs(cp); 2035 2036 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 2037 2038 /* 2039 * If it becomes empty, inherit the effective mask of the 2040 * parent, which is guaranteed to have some MEMs. 2041 */ 2042 if (is_in_v2_mode() && nodes_empty(*new_mems)) 2043 *new_mems = parent->effective_mems; 2044 2045 /* Skip the whole subtree if the nodemask remains the same. */ 2046 if (nodes_equal(*new_mems, cp->effective_mems)) { 2047 pos_css = css_rightmost_descendant(pos_css); 2048 continue; 2049 } 2050 2051 if (!css_tryget_online(&cp->css)) 2052 continue; 2053 rcu_read_unlock(); 2054 2055 spin_lock_irq(&callback_lock); 2056 cp->effective_mems = *new_mems; 2057 spin_unlock_irq(&callback_lock); 2058 2059 WARN_ON(!is_in_v2_mode() && 2060 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 2061 2062 update_tasks_nodemask(cp); 2063 2064 rcu_read_lock(); 2065 css_put(&cp->css); 2066 } 2067 rcu_read_unlock(); 2068 } 2069 2070 /* 2071 * Handle user request to change the 'mems' memory placement 2072 * of a cpuset. Needs to validate the request, update the 2073 * cpusets mems_allowed, and for each task in the cpuset, 2074 * update mems_allowed and rebind task's mempolicy and any vma 2075 * mempolicies and if the cpuset is marked 'memory_migrate', 2076 * migrate the tasks pages to the new memory. 2077 * 2078 * Call with cpuset_mutex held. May take callback_lock during call. 2079 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 2080 * lock each such tasks mm->mmap_lock, scan its vma's and rebind 2081 * their mempolicies to the cpusets new mems_allowed. 2082 */ 2083 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 2084 const char *buf) 2085 { 2086 int retval; 2087 2088 /* 2089 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 2090 * it's read-only 2091 */ 2092 if (cs == &top_cpuset) { 2093 retval = -EACCES; 2094 goto done; 2095 } 2096 2097 /* 2098 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 2099 * Since nodelist_parse() fails on an empty mask, we special case 2100 * that parsing. The validate_change() call ensures that cpusets 2101 * with tasks have memory. 2102 */ 2103 if (!*buf) { 2104 nodes_clear(trialcs->mems_allowed); 2105 } else { 2106 retval = nodelist_parse(buf, trialcs->mems_allowed); 2107 if (retval < 0) 2108 goto done; 2109 2110 if (!nodes_subset(trialcs->mems_allowed, 2111 top_cpuset.mems_allowed)) { 2112 retval = -EINVAL; 2113 goto done; 2114 } 2115 } 2116 2117 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 2118 retval = 0; /* Too easy - nothing to do */ 2119 goto done; 2120 } 2121 retval = validate_change(cs, trialcs); 2122 if (retval < 0) 2123 goto done; 2124 2125 check_insane_mems_config(&trialcs->mems_allowed); 2126 2127 spin_lock_irq(&callback_lock); 2128 cs->mems_allowed = trialcs->mems_allowed; 2129 spin_unlock_irq(&callback_lock); 2130 2131 /* use trialcs->mems_allowed as a temp variable */ 2132 update_nodemasks_hier(cs, &trialcs->mems_allowed); 2133 done: 2134 return retval; 2135 } 2136 2137 bool current_cpuset_is_being_rebound(void) 2138 { 2139 bool ret; 2140 2141 rcu_read_lock(); 2142 ret = task_cs(current) == cpuset_being_rebound; 2143 rcu_read_unlock(); 2144 2145 return ret; 2146 } 2147 2148 static int update_relax_domain_level(struct cpuset *cs, s64 val) 2149 { 2150 #ifdef CONFIG_SMP 2151 if (val < -1 || val >= sched_domain_level_max) 2152 return -EINVAL; 2153 #endif 2154 2155 if (val != cs->relax_domain_level) { 2156 cs->relax_domain_level = val; 2157 if (!cpumask_empty(cs->cpus_allowed) && 2158 is_sched_load_balance(cs)) 2159 rebuild_sched_domains_locked(); 2160 } 2161 2162 return 0; 2163 } 2164 2165 /** 2166 * update_tasks_flags - update the spread flags of tasks in the cpuset. 2167 * @cs: the cpuset in which each task's spread flags needs to be changed 2168 * 2169 * Iterate through each task of @cs updating its spread flags. As this 2170 * function is called with cpuset_mutex held, cpuset membership stays 2171 * stable. 2172 */ 2173 static void update_tasks_flags(struct cpuset *cs) 2174 { 2175 struct css_task_iter it; 2176 struct task_struct *task; 2177 2178 css_task_iter_start(&cs->css, 0, &it); 2179 while ((task = css_task_iter_next(&it))) 2180 cpuset_update_task_spread_flags(cs, task); 2181 css_task_iter_end(&it); 2182 } 2183 2184 /* 2185 * update_flag - read a 0 or a 1 in a file and update associated flag 2186 * bit: the bit to update (see cpuset_flagbits_t) 2187 * cs: the cpuset to update 2188 * turning_on: whether the flag is being set or cleared 2189 * 2190 * Call with cpuset_mutex held. 2191 */ 2192 2193 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 2194 int turning_on) 2195 { 2196 struct cpuset *trialcs; 2197 int balance_flag_changed; 2198 int spread_flag_changed; 2199 int err; 2200 2201 trialcs = alloc_trial_cpuset(cs); 2202 if (!trialcs) 2203 return -ENOMEM; 2204 2205 if (turning_on) 2206 set_bit(bit, &trialcs->flags); 2207 else 2208 clear_bit(bit, &trialcs->flags); 2209 2210 err = validate_change(cs, trialcs); 2211 if (err < 0) 2212 goto out; 2213 2214 balance_flag_changed = (is_sched_load_balance(cs) != 2215 is_sched_load_balance(trialcs)); 2216 2217 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 2218 || (is_spread_page(cs) != is_spread_page(trialcs))); 2219 2220 spin_lock_irq(&callback_lock); 2221 cs->flags = trialcs->flags; 2222 spin_unlock_irq(&callback_lock); 2223 2224 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 2225 rebuild_sched_domains_locked(); 2226 2227 if (spread_flag_changed) 2228 update_tasks_flags(cs); 2229 out: 2230 free_cpuset(trialcs); 2231 return err; 2232 } 2233 2234 /** 2235 * update_prstate - update partition_root_state 2236 * @cs: the cpuset to update 2237 * @new_prs: new partition root state 2238 * Return: 0 if successful, != 0 if error 2239 * 2240 * Call with cpuset_mutex held. 2241 */ 2242 static int update_prstate(struct cpuset *cs, int new_prs) 2243 { 2244 int err = PERR_NONE, old_prs = cs->partition_root_state; 2245 bool sched_domain_rebuilt = false; 2246 struct cpuset *parent = parent_cs(cs); 2247 struct tmpmasks tmpmask; 2248 2249 if (old_prs == new_prs) 2250 return 0; 2251 2252 /* 2253 * For a previously invalid partition root, leave it at being 2254 * invalid if new_prs is not "member". 2255 */ 2256 if (new_prs && is_prs_invalid(old_prs)) { 2257 cs->partition_root_state = -new_prs; 2258 return 0; 2259 } 2260 2261 if (alloc_cpumasks(NULL, &tmpmask)) 2262 return -ENOMEM; 2263 2264 if (!old_prs) { 2265 /* 2266 * Turning on partition root requires setting the 2267 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed 2268 * cannot be empty. 2269 */ 2270 if (cpumask_empty(cs->cpus_allowed)) { 2271 err = PERR_CPUSEMPTY; 2272 goto out; 2273 } 2274 2275 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); 2276 if (err) { 2277 err = PERR_NOTEXCL; 2278 goto out; 2279 } 2280 2281 err = update_parent_subparts_cpumask(cs, partcmd_enable, 2282 NULL, &tmpmask); 2283 if (err) { 2284 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 2285 goto out; 2286 } 2287 2288 if (new_prs == PRS_ISOLATED) { 2289 /* 2290 * Disable the load balance flag should not return an 2291 * error unless the system is running out of memory. 2292 */ 2293 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 2294 sched_domain_rebuilt = true; 2295 } 2296 } else if (old_prs && new_prs) { 2297 /* 2298 * A change in load balance state only, no change in cpumasks. 2299 */ 2300 update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED)); 2301 sched_domain_rebuilt = true; 2302 goto out; /* Sched domain is rebuilt in update_flag() */ 2303 } else { 2304 /* 2305 * Switching back to member is always allowed even if it 2306 * disables child partitions. 2307 */ 2308 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, 2309 &tmpmask); 2310 2311 /* 2312 * If there are child partitions, they will all become invalid. 2313 */ 2314 if (unlikely(cs->nr_subparts_cpus)) { 2315 spin_lock_irq(&callback_lock); 2316 cs->nr_subparts_cpus = 0; 2317 cpumask_clear(cs->subparts_cpus); 2318 compute_effective_cpumask(cs->effective_cpus, cs, parent); 2319 spin_unlock_irq(&callback_lock); 2320 } 2321 2322 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 2323 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 2324 2325 if (!is_sched_load_balance(cs)) { 2326 /* Make sure load balance is on */ 2327 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); 2328 sched_domain_rebuilt = true; 2329 } 2330 } 2331 2332 update_tasks_cpumask(parent, tmpmask.new_cpus); 2333 2334 if (parent->child_ecpus_count) 2335 update_sibling_cpumasks(parent, cs, &tmpmask); 2336 2337 if (!sched_domain_rebuilt) 2338 rebuild_sched_domains_locked(); 2339 out: 2340 /* 2341 * Make partition invalid if an error happen 2342 */ 2343 if (err) 2344 new_prs = -new_prs; 2345 spin_lock_irq(&callback_lock); 2346 cs->partition_root_state = new_prs; 2347 WRITE_ONCE(cs->prs_err, err); 2348 spin_unlock_irq(&callback_lock); 2349 /* 2350 * Update child cpusets, if present. 2351 * Force update if switching back to member. 2352 */ 2353 if (!list_empty(&cs->css.children)) 2354 update_cpumasks_hier(cs, &tmpmask, !new_prs); 2355 2356 notify_partition_change(cs, old_prs); 2357 free_cpumasks(NULL, &tmpmask); 2358 return 0; 2359 } 2360 2361 /* 2362 * Frequency meter - How fast is some event occurring? 2363 * 2364 * These routines manage a digitally filtered, constant time based, 2365 * event frequency meter. There are four routines: 2366 * fmeter_init() - initialize a frequency meter. 2367 * fmeter_markevent() - called each time the event happens. 2368 * fmeter_getrate() - returns the recent rate of such events. 2369 * fmeter_update() - internal routine used to update fmeter. 2370 * 2371 * A common data structure is passed to each of these routines, 2372 * which is used to keep track of the state required to manage the 2373 * frequency meter and its digital filter. 2374 * 2375 * The filter works on the number of events marked per unit time. 2376 * The filter is single-pole low-pass recursive (IIR). The time unit 2377 * is 1 second. Arithmetic is done using 32-bit integers scaled to 2378 * simulate 3 decimal digits of precision (multiplied by 1000). 2379 * 2380 * With an FM_COEF of 933, and a time base of 1 second, the filter 2381 * has a half-life of 10 seconds, meaning that if the events quit 2382 * happening, then the rate returned from the fmeter_getrate() 2383 * will be cut in half each 10 seconds, until it converges to zero. 2384 * 2385 * It is not worth doing a real infinitely recursive filter. If more 2386 * than FM_MAXTICKS ticks have elapsed since the last filter event, 2387 * just compute FM_MAXTICKS ticks worth, by which point the level 2388 * will be stable. 2389 * 2390 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 2391 * arithmetic overflow in the fmeter_update() routine. 2392 * 2393 * Given the simple 32 bit integer arithmetic used, this meter works 2394 * best for reporting rates between one per millisecond (msec) and 2395 * one per 32 (approx) seconds. At constant rates faster than one 2396 * per msec it maxes out at values just under 1,000,000. At constant 2397 * rates between one per msec, and one per second it will stabilize 2398 * to a value N*1000, where N is the rate of events per second. 2399 * At constant rates between one per second and one per 32 seconds, 2400 * it will be choppy, moving up on the seconds that have an event, 2401 * and then decaying until the next event. At rates slower than 2402 * about one in 32 seconds, it decays all the way back to zero between 2403 * each event. 2404 */ 2405 2406 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 2407 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 2408 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 2409 #define FM_SCALE 1000 /* faux fixed point scale */ 2410 2411 /* Initialize a frequency meter */ 2412 static void fmeter_init(struct fmeter *fmp) 2413 { 2414 fmp->cnt = 0; 2415 fmp->val = 0; 2416 fmp->time = 0; 2417 spin_lock_init(&fmp->lock); 2418 } 2419 2420 /* Internal meter update - process cnt events and update value */ 2421 static void fmeter_update(struct fmeter *fmp) 2422 { 2423 time64_t now; 2424 u32 ticks; 2425 2426 now = ktime_get_seconds(); 2427 ticks = now - fmp->time; 2428 2429 if (ticks == 0) 2430 return; 2431 2432 ticks = min(FM_MAXTICKS, ticks); 2433 while (ticks-- > 0) 2434 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 2435 fmp->time = now; 2436 2437 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 2438 fmp->cnt = 0; 2439 } 2440 2441 /* Process any previous ticks, then bump cnt by one (times scale). */ 2442 static void fmeter_markevent(struct fmeter *fmp) 2443 { 2444 spin_lock(&fmp->lock); 2445 fmeter_update(fmp); 2446 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 2447 spin_unlock(&fmp->lock); 2448 } 2449 2450 /* Process any previous ticks, then return current value. */ 2451 static int fmeter_getrate(struct fmeter *fmp) 2452 { 2453 int val; 2454 2455 spin_lock(&fmp->lock); 2456 fmeter_update(fmp); 2457 val = fmp->val; 2458 spin_unlock(&fmp->lock); 2459 return val; 2460 } 2461 2462 static struct cpuset *cpuset_attach_old_cs; 2463 2464 /* 2465 * Check to see if a cpuset can accept a new task 2466 * For v1, cpus_allowed and mems_allowed can't be empty. 2467 * For v2, effective_cpus can't be empty. 2468 * Note that in v1, effective_cpus = cpus_allowed. 2469 */ 2470 static int cpuset_can_attach_check(struct cpuset *cs) 2471 { 2472 if (cpumask_empty(cs->effective_cpus) || 2473 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) 2474 return -ENOSPC; 2475 return 0; 2476 } 2477 2478 static void reset_migrate_dl_data(struct cpuset *cs) 2479 { 2480 cs->nr_migrate_dl_tasks = 0; 2481 cs->sum_migrate_dl_bw = 0; 2482 } 2483 2484 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 2485 static int cpuset_can_attach(struct cgroup_taskset *tset) 2486 { 2487 struct cgroup_subsys_state *css; 2488 struct cpuset *cs, *oldcs; 2489 struct task_struct *task; 2490 int ret; 2491 2492 /* used later by cpuset_attach() */ 2493 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 2494 oldcs = cpuset_attach_old_cs; 2495 cs = css_cs(css); 2496 2497 mutex_lock(&cpuset_mutex); 2498 2499 /* Check to see if task is allowed in the cpuset */ 2500 ret = cpuset_can_attach_check(cs); 2501 if (ret) 2502 goto out_unlock; 2503 2504 cgroup_taskset_for_each(task, css, tset) { 2505 ret = task_can_attach(task); 2506 if (ret) 2507 goto out_unlock; 2508 ret = security_task_setscheduler(task); 2509 if (ret) 2510 goto out_unlock; 2511 2512 if (dl_task(task)) { 2513 cs->nr_migrate_dl_tasks++; 2514 cs->sum_migrate_dl_bw += task->dl.dl_bw; 2515 } 2516 } 2517 2518 if (!cs->nr_migrate_dl_tasks) 2519 goto out_success; 2520 2521 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { 2522 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); 2523 2524 if (unlikely(cpu >= nr_cpu_ids)) { 2525 reset_migrate_dl_data(cs); 2526 ret = -EINVAL; 2527 goto out_unlock; 2528 } 2529 2530 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); 2531 if (ret) { 2532 reset_migrate_dl_data(cs); 2533 goto out_unlock; 2534 } 2535 } 2536 2537 out_success: 2538 /* 2539 * Mark attach is in progress. This makes validate_change() fail 2540 * changes which zero cpus/mems_allowed. 2541 */ 2542 cs->attach_in_progress++; 2543 out_unlock: 2544 mutex_unlock(&cpuset_mutex); 2545 return ret; 2546 } 2547 2548 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 2549 { 2550 struct cgroup_subsys_state *css; 2551 struct cpuset *cs; 2552 2553 cgroup_taskset_first(tset, &css); 2554 cs = css_cs(css); 2555 2556 mutex_lock(&cpuset_mutex); 2557 cs->attach_in_progress--; 2558 if (!cs->attach_in_progress) 2559 wake_up(&cpuset_attach_wq); 2560 2561 if (cs->nr_migrate_dl_tasks) { 2562 int cpu = cpumask_any(cs->effective_cpus); 2563 2564 dl_bw_free(cpu, cs->sum_migrate_dl_bw); 2565 reset_migrate_dl_data(cs); 2566 } 2567 2568 mutex_unlock(&cpuset_mutex); 2569 } 2570 2571 /* 2572 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task() 2573 * but we can't allocate it dynamically there. Define it global and 2574 * allocate from cpuset_init(). 2575 */ 2576 static cpumask_var_t cpus_attach; 2577 static nodemask_t cpuset_attach_nodemask_to; 2578 2579 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) 2580 { 2581 lockdep_assert_held(&cpuset_mutex); 2582 2583 if (cs != &top_cpuset) 2584 guarantee_online_cpus(task, cpus_attach); 2585 else 2586 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), 2587 cs->subparts_cpus); 2588 /* 2589 * can_attach beforehand should guarantee that this doesn't 2590 * fail. TODO: have a better way to handle failure here 2591 */ 2592 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 2593 2594 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 2595 cpuset_update_task_spread_flags(cs, task); 2596 } 2597 2598 static void cpuset_attach(struct cgroup_taskset *tset) 2599 { 2600 struct task_struct *task; 2601 struct task_struct *leader; 2602 struct cgroup_subsys_state *css; 2603 struct cpuset *cs; 2604 struct cpuset *oldcs = cpuset_attach_old_cs; 2605 bool cpus_updated, mems_updated; 2606 2607 cgroup_taskset_first(tset, &css); 2608 cs = css_cs(css); 2609 2610 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ 2611 mutex_lock(&cpuset_mutex); 2612 cpus_updated = !cpumask_equal(cs->effective_cpus, 2613 oldcs->effective_cpus); 2614 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); 2615 2616 /* 2617 * In the default hierarchy, enabling cpuset in the child cgroups 2618 * will trigger a number of cpuset_attach() calls with no change 2619 * in effective cpus and mems. In that case, we can optimize out 2620 * by skipping the task iteration and update. 2621 */ 2622 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2623 !cpus_updated && !mems_updated) { 2624 cpuset_attach_nodemask_to = cs->effective_mems; 2625 goto out; 2626 } 2627 2628 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 2629 2630 cgroup_taskset_for_each(task, css, tset) 2631 cpuset_attach_task(cs, task); 2632 2633 /* 2634 * Change mm for all threadgroup leaders. This is expensive and may 2635 * sleep and should be moved outside migration path proper. Skip it 2636 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is 2637 * not set. 2638 */ 2639 cpuset_attach_nodemask_to = cs->effective_mems; 2640 if (!is_memory_migrate(cs) && !mems_updated) 2641 goto out; 2642 2643 cgroup_taskset_for_each_leader(leader, css, tset) { 2644 struct mm_struct *mm = get_task_mm(leader); 2645 2646 if (mm) { 2647 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 2648 2649 /* 2650 * old_mems_allowed is the same with mems_allowed 2651 * here, except if this task is being moved 2652 * automatically due to hotplug. In that case 2653 * @mems_allowed has been updated and is empty, so 2654 * @old_mems_allowed is the right nodesets that we 2655 * migrate mm from. 2656 */ 2657 if (is_memory_migrate(cs)) 2658 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 2659 &cpuset_attach_nodemask_to); 2660 else 2661 mmput(mm); 2662 } 2663 } 2664 2665 out: 2666 cs->old_mems_allowed = cpuset_attach_nodemask_to; 2667 2668 if (cs->nr_migrate_dl_tasks) { 2669 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; 2670 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; 2671 reset_migrate_dl_data(cs); 2672 } 2673 2674 cs->attach_in_progress--; 2675 if (!cs->attach_in_progress) 2676 wake_up(&cpuset_attach_wq); 2677 2678 mutex_unlock(&cpuset_mutex); 2679 } 2680 2681 /* The various types of files and directories in a cpuset file system */ 2682 2683 typedef enum { 2684 FILE_MEMORY_MIGRATE, 2685 FILE_CPULIST, 2686 FILE_MEMLIST, 2687 FILE_EFFECTIVE_CPULIST, 2688 FILE_EFFECTIVE_MEMLIST, 2689 FILE_SUBPARTS_CPULIST, 2690 FILE_CPU_EXCLUSIVE, 2691 FILE_MEM_EXCLUSIVE, 2692 FILE_MEM_HARDWALL, 2693 FILE_SCHED_LOAD_BALANCE, 2694 FILE_PARTITION_ROOT, 2695 FILE_SCHED_RELAX_DOMAIN_LEVEL, 2696 FILE_MEMORY_PRESSURE_ENABLED, 2697 FILE_MEMORY_PRESSURE, 2698 FILE_SPREAD_PAGE, 2699 FILE_SPREAD_SLAB, 2700 } cpuset_filetype_t; 2701 2702 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 2703 u64 val) 2704 { 2705 struct cpuset *cs = css_cs(css); 2706 cpuset_filetype_t type = cft->private; 2707 int retval = 0; 2708 2709 cpus_read_lock(); 2710 mutex_lock(&cpuset_mutex); 2711 if (!is_cpuset_online(cs)) { 2712 retval = -ENODEV; 2713 goto out_unlock; 2714 } 2715 2716 switch (type) { 2717 case FILE_CPU_EXCLUSIVE: 2718 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 2719 break; 2720 case FILE_MEM_EXCLUSIVE: 2721 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 2722 break; 2723 case FILE_MEM_HARDWALL: 2724 retval = update_flag(CS_MEM_HARDWALL, cs, val); 2725 break; 2726 case FILE_SCHED_LOAD_BALANCE: 2727 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 2728 break; 2729 case FILE_MEMORY_MIGRATE: 2730 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 2731 break; 2732 case FILE_MEMORY_PRESSURE_ENABLED: 2733 cpuset_memory_pressure_enabled = !!val; 2734 break; 2735 case FILE_SPREAD_PAGE: 2736 retval = update_flag(CS_SPREAD_PAGE, cs, val); 2737 break; 2738 case FILE_SPREAD_SLAB: 2739 retval = update_flag(CS_SPREAD_SLAB, cs, val); 2740 break; 2741 default: 2742 retval = -EINVAL; 2743 break; 2744 } 2745 out_unlock: 2746 mutex_unlock(&cpuset_mutex); 2747 cpus_read_unlock(); 2748 return retval; 2749 } 2750 2751 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 2752 s64 val) 2753 { 2754 struct cpuset *cs = css_cs(css); 2755 cpuset_filetype_t type = cft->private; 2756 int retval = -ENODEV; 2757 2758 cpus_read_lock(); 2759 mutex_lock(&cpuset_mutex); 2760 if (!is_cpuset_online(cs)) 2761 goto out_unlock; 2762 2763 switch (type) { 2764 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2765 retval = update_relax_domain_level(cs, val); 2766 break; 2767 default: 2768 retval = -EINVAL; 2769 break; 2770 } 2771 out_unlock: 2772 mutex_unlock(&cpuset_mutex); 2773 cpus_read_unlock(); 2774 return retval; 2775 } 2776 2777 /* 2778 * Common handling for a write to a "cpus" or "mems" file. 2779 */ 2780 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 2781 char *buf, size_t nbytes, loff_t off) 2782 { 2783 struct cpuset *cs = css_cs(of_css(of)); 2784 struct cpuset *trialcs; 2785 int retval = -ENODEV; 2786 2787 buf = strstrip(buf); 2788 2789 /* 2790 * CPU or memory hotunplug may leave @cs w/o any execution 2791 * resources, in which case the hotplug code asynchronously updates 2792 * configuration and transfers all tasks to the nearest ancestor 2793 * which can execute. 2794 * 2795 * As writes to "cpus" or "mems" may restore @cs's execution 2796 * resources, wait for the previously scheduled operations before 2797 * proceeding, so that we don't end up keep removing tasks added 2798 * after execution capability is restored. 2799 * 2800 * cpuset_hotplug_work calls back into cgroup core via 2801 * cgroup_transfer_tasks() and waiting for it from a cgroupfs 2802 * operation like this one can lead to a deadlock through kernfs 2803 * active_ref protection. Let's break the protection. Losing the 2804 * protection is okay as we check whether @cs is online after 2805 * grabbing cpuset_mutex anyway. This only happens on the legacy 2806 * hierarchies. 2807 */ 2808 css_get(&cs->css); 2809 kernfs_break_active_protection(of->kn); 2810 flush_work(&cpuset_hotplug_work); 2811 2812 cpus_read_lock(); 2813 mutex_lock(&cpuset_mutex); 2814 if (!is_cpuset_online(cs)) 2815 goto out_unlock; 2816 2817 trialcs = alloc_trial_cpuset(cs); 2818 if (!trialcs) { 2819 retval = -ENOMEM; 2820 goto out_unlock; 2821 } 2822 2823 switch (of_cft(of)->private) { 2824 case FILE_CPULIST: 2825 retval = update_cpumask(cs, trialcs, buf); 2826 break; 2827 case FILE_MEMLIST: 2828 retval = update_nodemask(cs, trialcs, buf); 2829 break; 2830 default: 2831 retval = -EINVAL; 2832 break; 2833 } 2834 2835 free_cpuset(trialcs); 2836 out_unlock: 2837 mutex_unlock(&cpuset_mutex); 2838 cpus_read_unlock(); 2839 kernfs_unbreak_active_protection(of->kn); 2840 css_put(&cs->css); 2841 flush_workqueue(cpuset_migrate_mm_wq); 2842 return retval ?: nbytes; 2843 } 2844 2845 /* 2846 * These ascii lists should be read in a single call, by using a user 2847 * buffer large enough to hold the entire map. If read in smaller 2848 * chunks, there is no guarantee of atomicity. Since the display format 2849 * used, list of ranges of sequential numbers, is variable length, 2850 * and since these maps can change value dynamically, one could read 2851 * gibberish by doing partial reads while a list was changing. 2852 */ 2853 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 2854 { 2855 struct cpuset *cs = css_cs(seq_css(sf)); 2856 cpuset_filetype_t type = seq_cft(sf)->private; 2857 int ret = 0; 2858 2859 spin_lock_irq(&callback_lock); 2860 2861 switch (type) { 2862 case FILE_CPULIST: 2863 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 2864 break; 2865 case FILE_MEMLIST: 2866 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 2867 break; 2868 case FILE_EFFECTIVE_CPULIST: 2869 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 2870 break; 2871 case FILE_EFFECTIVE_MEMLIST: 2872 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 2873 break; 2874 case FILE_SUBPARTS_CPULIST: 2875 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); 2876 break; 2877 default: 2878 ret = -EINVAL; 2879 } 2880 2881 spin_unlock_irq(&callback_lock); 2882 return ret; 2883 } 2884 2885 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 2886 { 2887 struct cpuset *cs = css_cs(css); 2888 cpuset_filetype_t type = cft->private; 2889 switch (type) { 2890 case FILE_CPU_EXCLUSIVE: 2891 return is_cpu_exclusive(cs); 2892 case FILE_MEM_EXCLUSIVE: 2893 return is_mem_exclusive(cs); 2894 case FILE_MEM_HARDWALL: 2895 return is_mem_hardwall(cs); 2896 case FILE_SCHED_LOAD_BALANCE: 2897 return is_sched_load_balance(cs); 2898 case FILE_MEMORY_MIGRATE: 2899 return is_memory_migrate(cs); 2900 case FILE_MEMORY_PRESSURE_ENABLED: 2901 return cpuset_memory_pressure_enabled; 2902 case FILE_MEMORY_PRESSURE: 2903 return fmeter_getrate(&cs->fmeter); 2904 case FILE_SPREAD_PAGE: 2905 return is_spread_page(cs); 2906 case FILE_SPREAD_SLAB: 2907 return is_spread_slab(cs); 2908 default: 2909 BUG(); 2910 } 2911 2912 /* Unreachable but makes gcc happy */ 2913 return 0; 2914 } 2915 2916 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 2917 { 2918 struct cpuset *cs = css_cs(css); 2919 cpuset_filetype_t type = cft->private; 2920 switch (type) { 2921 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2922 return cs->relax_domain_level; 2923 default: 2924 BUG(); 2925 } 2926 2927 /* Unreachable but makes gcc happy */ 2928 return 0; 2929 } 2930 2931 static int sched_partition_show(struct seq_file *seq, void *v) 2932 { 2933 struct cpuset *cs = css_cs(seq_css(seq)); 2934 const char *err, *type = NULL; 2935 2936 switch (cs->partition_root_state) { 2937 case PRS_ROOT: 2938 seq_puts(seq, "root\n"); 2939 break; 2940 case PRS_ISOLATED: 2941 seq_puts(seq, "isolated\n"); 2942 break; 2943 case PRS_MEMBER: 2944 seq_puts(seq, "member\n"); 2945 break; 2946 case PRS_INVALID_ROOT: 2947 type = "root"; 2948 fallthrough; 2949 case PRS_INVALID_ISOLATED: 2950 if (!type) 2951 type = "isolated"; 2952 err = perr_strings[READ_ONCE(cs->prs_err)]; 2953 if (err) 2954 seq_printf(seq, "%s invalid (%s)\n", type, err); 2955 else 2956 seq_printf(seq, "%s invalid\n", type); 2957 break; 2958 } 2959 return 0; 2960 } 2961 2962 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 2963 size_t nbytes, loff_t off) 2964 { 2965 struct cpuset *cs = css_cs(of_css(of)); 2966 int val; 2967 int retval = -ENODEV; 2968 2969 buf = strstrip(buf); 2970 2971 /* 2972 * Convert "root" to ENABLED, and convert "member" to DISABLED. 2973 */ 2974 if (!strcmp(buf, "root")) 2975 val = PRS_ROOT; 2976 else if (!strcmp(buf, "member")) 2977 val = PRS_MEMBER; 2978 else if (!strcmp(buf, "isolated")) 2979 val = PRS_ISOLATED; 2980 else 2981 return -EINVAL; 2982 2983 css_get(&cs->css); 2984 cpus_read_lock(); 2985 mutex_lock(&cpuset_mutex); 2986 if (!is_cpuset_online(cs)) 2987 goto out_unlock; 2988 2989 retval = update_prstate(cs, val); 2990 out_unlock: 2991 mutex_unlock(&cpuset_mutex); 2992 cpus_read_unlock(); 2993 css_put(&cs->css); 2994 return retval ?: nbytes; 2995 } 2996 2997 /* 2998 * for the common functions, 'private' gives the type of file 2999 */ 3000 3001 static struct cftype legacy_files[] = { 3002 { 3003 .name = "cpus", 3004 .seq_show = cpuset_common_seq_show, 3005 .write = cpuset_write_resmask, 3006 .max_write_len = (100U + 6 * NR_CPUS), 3007 .private = FILE_CPULIST, 3008 }, 3009 3010 { 3011 .name = "mems", 3012 .seq_show = cpuset_common_seq_show, 3013 .write = cpuset_write_resmask, 3014 .max_write_len = (100U + 6 * MAX_NUMNODES), 3015 .private = FILE_MEMLIST, 3016 }, 3017 3018 { 3019 .name = "effective_cpus", 3020 .seq_show = cpuset_common_seq_show, 3021 .private = FILE_EFFECTIVE_CPULIST, 3022 }, 3023 3024 { 3025 .name = "effective_mems", 3026 .seq_show = cpuset_common_seq_show, 3027 .private = FILE_EFFECTIVE_MEMLIST, 3028 }, 3029 3030 { 3031 .name = "cpu_exclusive", 3032 .read_u64 = cpuset_read_u64, 3033 .write_u64 = cpuset_write_u64, 3034 .private = FILE_CPU_EXCLUSIVE, 3035 }, 3036 3037 { 3038 .name = "mem_exclusive", 3039 .read_u64 = cpuset_read_u64, 3040 .write_u64 = cpuset_write_u64, 3041 .private = FILE_MEM_EXCLUSIVE, 3042 }, 3043 3044 { 3045 .name = "mem_hardwall", 3046 .read_u64 = cpuset_read_u64, 3047 .write_u64 = cpuset_write_u64, 3048 .private = FILE_MEM_HARDWALL, 3049 }, 3050 3051 { 3052 .name = "sched_load_balance", 3053 .read_u64 = cpuset_read_u64, 3054 .write_u64 = cpuset_write_u64, 3055 .private = FILE_SCHED_LOAD_BALANCE, 3056 }, 3057 3058 { 3059 .name = "sched_relax_domain_level", 3060 .read_s64 = cpuset_read_s64, 3061 .write_s64 = cpuset_write_s64, 3062 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 3063 }, 3064 3065 { 3066 .name = "memory_migrate", 3067 .read_u64 = cpuset_read_u64, 3068 .write_u64 = cpuset_write_u64, 3069 .private = FILE_MEMORY_MIGRATE, 3070 }, 3071 3072 { 3073 .name = "memory_pressure", 3074 .read_u64 = cpuset_read_u64, 3075 .private = FILE_MEMORY_PRESSURE, 3076 }, 3077 3078 { 3079 .name = "memory_spread_page", 3080 .read_u64 = cpuset_read_u64, 3081 .write_u64 = cpuset_write_u64, 3082 .private = FILE_SPREAD_PAGE, 3083 }, 3084 3085 { 3086 .name = "memory_spread_slab", 3087 .read_u64 = cpuset_read_u64, 3088 .write_u64 = cpuset_write_u64, 3089 .private = FILE_SPREAD_SLAB, 3090 }, 3091 3092 { 3093 .name = "memory_pressure_enabled", 3094 .flags = CFTYPE_ONLY_ON_ROOT, 3095 .read_u64 = cpuset_read_u64, 3096 .write_u64 = cpuset_write_u64, 3097 .private = FILE_MEMORY_PRESSURE_ENABLED, 3098 }, 3099 3100 { } /* terminate */ 3101 }; 3102 3103 /* 3104 * This is currently a minimal set for the default hierarchy. It can be 3105 * expanded later on by migrating more features and control files from v1. 3106 */ 3107 static struct cftype dfl_files[] = { 3108 { 3109 .name = "cpus", 3110 .seq_show = cpuset_common_seq_show, 3111 .write = cpuset_write_resmask, 3112 .max_write_len = (100U + 6 * NR_CPUS), 3113 .private = FILE_CPULIST, 3114 .flags = CFTYPE_NOT_ON_ROOT, 3115 }, 3116 3117 { 3118 .name = "mems", 3119 .seq_show = cpuset_common_seq_show, 3120 .write = cpuset_write_resmask, 3121 .max_write_len = (100U + 6 * MAX_NUMNODES), 3122 .private = FILE_MEMLIST, 3123 .flags = CFTYPE_NOT_ON_ROOT, 3124 }, 3125 3126 { 3127 .name = "cpus.effective", 3128 .seq_show = cpuset_common_seq_show, 3129 .private = FILE_EFFECTIVE_CPULIST, 3130 }, 3131 3132 { 3133 .name = "mems.effective", 3134 .seq_show = cpuset_common_seq_show, 3135 .private = FILE_EFFECTIVE_MEMLIST, 3136 }, 3137 3138 { 3139 .name = "cpus.partition", 3140 .seq_show = sched_partition_show, 3141 .write = sched_partition_write, 3142 .private = FILE_PARTITION_ROOT, 3143 .flags = CFTYPE_NOT_ON_ROOT, 3144 .file_offset = offsetof(struct cpuset, partition_file), 3145 }, 3146 3147 { 3148 .name = "cpus.subpartitions", 3149 .seq_show = cpuset_common_seq_show, 3150 .private = FILE_SUBPARTS_CPULIST, 3151 .flags = CFTYPE_DEBUG, 3152 }, 3153 3154 { } /* terminate */ 3155 }; 3156 3157 3158 /** 3159 * cpuset_css_alloc - Allocate a cpuset css 3160 * @parent_css: Parent css of the control group that the new cpuset will be 3161 * part of 3162 * Return: cpuset css on success, -ENOMEM on failure. 3163 * 3164 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return 3165 * top cpuset css otherwise. 3166 */ 3167 static struct cgroup_subsys_state * 3168 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 3169 { 3170 struct cpuset *cs; 3171 3172 if (!parent_css) 3173 return &top_cpuset.css; 3174 3175 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 3176 if (!cs) 3177 return ERR_PTR(-ENOMEM); 3178 3179 if (alloc_cpumasks(cs, NULL)) { 3180 kfree(cs); 3181 return ERR_PTR(-ENOMEM); 3182 } 3183 3184 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 3185 nodes_clear(cs->mems_allowed); 3186 nodes_clear(cs->effective_mems); 3187 fmeter_init(&cs->fmeter); 3188 cs->relax_domain_level = -1; 3189 3190 /* Set CS_MEMORY_MIGRATE for default hierarchy */ 3191 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) 3192 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); 3193 3194 return &cs->css; 3195 } 3196 3197 static int cpuset_css_online(struct cgroup_subsys_state *css) 3198 { 3199 struct cpuset *cs = css_cs(css); 3200 struct cpuset *parent = parent_cs(cs); 3201 struct cpuset *tmp_cs; 3202 struct cgroup_subsys_state *pos_css; 3203 3204 if (!parent) 3205 return 0; 3206 3207 cpus_read_lock(); 3208 mutex_lock(&cpuset_mutex); 3209 3210 set_bit(CS_ONLINE, &cs->flags); 3211 if (is_spread_page(parent)) 3212 set_bit(CS_SPREAD_PAGE, &cs->flags); 3213 if (is_spread_slab(parent)) 3214 set_bit(CS_SPREAD_SLAB, &cs->flags); 3215 3216 cpuset_inc(); 3217 3218 spin_lock_irq(&callback_lock); 3219 if (is_in_v2_mode()) { 3220 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 3221 cs->effective_mems = parent->effective_mems; 3222 cs->use_parent_ecpus = true; 3223 parent->child_ecpus_count++; 3224 } 3225 spin_unlock_irq(&callback_lock); 3226 3227 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 3228 goto out_unlock; 3229 3230 /* 3231 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 3232 * set. This flag handling is implemented in cgroup core for 3233 * historical reasons - the flag may be specified during mount. 3234 * 3235 * Currently, if any sibling cpusets have exclusive cpus or mem, we 3236 * refuse to clone the configuration - thereby refusing the task to 3237 * be entered, and as a result refusing the sys_unshare() or 3238 * clone() which initiated it. If this becomes a problem for some 3239 * users who wish to allow that scenario, then this could be 3240 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 3241 * (and likewise for mems) to the new cgroup. 3242 */ 3243 rcu_read_lock(); 3244 cpuset_for_each_child(tmp_cs, pos_css, parent) { 3245 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 3246 rcu_read_unlock(); 3247 goto out_unlock; 3248 } 3249 } 3250 rcu_read_unlock(); 3251 3252 spin_lock_irq(&callback_lock); 3253 cs->mems_allowed = parent->mems_allowed; 3254 cs->effective_mems = parent->mems_allowed; 3255 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 3256 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 3257 spin_unlock_irq(&callback_lock); 3258 out_unlock: 3259 mutex_unlock(&cpuset_mutex); 3260 cpus_read_unlock(); 3261 return 0; 3262 } 3263 3264 /* 3265 * If the cpuset being removed has its flag 'sched_load_balance' 3266 * enabled, then simulate turning sched_load_balance off, which 3267 * will call rebuild_sched_domains_locked(). That is not needed 3268 * in the default hierarchy where only changes in partition 3269 * will cause repartitioning. 3270 * 3271 * If the cpuset has the 'sched.partition' flag enabled, simulate 3272 * turning 'sched.partition" off. 3273 */ 3274 3275 static void cpuset_css_offline(struct cgroup_subsys_state *css) 3276 { 3277 struct cpuset *cs = css_cs(css); 3278 3279 cpus_read_lock(); 3280 mutex_lock(&cpuset_mutex); 3281 3282 if (is_partition_valid(cs)) 3283 update_prstate(cs, 0); 3284 3285 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 3286 is_sched_load_balance(cs)) 3287 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 3288 3289 if (cs->use_parent_ecpus) { 3290 struct cpuset *parent = parent_cs(cs); 3291 3292 cs->use_parent_ecpus = false; 3293 parent->child_ecpus_count--; 3294 } 3295 3296 cpuset_dec(); 3297 clear_bit(CS_ONLINE, &cs->flags); 3298 3299 mutex_unlock(&cpuset_mutex); 3300 cpus_read_unlock(); 3301 } 3302 3303 static void cpuset_css_free(struct cgroup_subsys_state *css) 3304 { 3305 struct cpuset *cs = css_cs(css); 3306 3307 free_cpuset(cs); 3308 } 3309 3310 static void cpuset_bind(struct cgroup_subsys_state *root_css) 3311 { 3312 mutex_lock(&cpuset_mutex); 3313 spin_lock_irq(&callback_lock); 3314 3315 if (is_in_v2_mode()) { 3316 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 3317 top_cpuset.mems_allowed = node_possible_map; 3318 } else { 3319 cpumask_copy(top_cpuset.cpus_allowed, 3320 top_cpuset.effective_cpus); 3321 top_cpuset.mems_allowed = top_cpuset.effective_mems; 3322 } 3323 3324 spin_unlock_irq(&callback_lock); 3325 mutex_unlock(&cpuset_mutex); 3326 } 3327 3328 /* 3329 * In case the child is cloned into a cpuset different from its parent, 3330 * additional checks are done to see if the move is allowed. 3331 */ 3332 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) 3333 { 3334 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 3335 bool same_cs; 3336 int ret; 3337 3338 rcu_read_lock(); 3339 same_cs = (cs == task_cs(current)); 3340 rcu_read_unlock(); 3341 3342 if (same_cs) 3343 return 0; 3344 3345 lockdep_assert_held(&cgroup_mutex); 3346 mutex_lock(&cpuset_mutex); 3347 3348 /* Check to see if task is allowed in the cpuset */ 3349 ret = cpuset_can_attach_check(cs); 3350 if (ret) 3351 goto out_unlock; 3352 3353 ret = task_can_attach(task); 3354 if (ret) 3355 goto out_unlock; 3356 3357 ret = security_task_setscheduler(task); 3358 if (ret) 3359 goto out_unlock; 3360 3361 /* 3362 * Mark attach is in progress. This makes validate_change() fail 3363 * changes which zero cpus/mems_allowed. 3364 */ 3365 cs->attach_in_progress++; 3366 out_unlock: 3367 mutex_unlock(&cpuset_mutex); 3368 return ret; 3369 } 3370 3371 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset) 3372 { 3373 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); 3374 bool same_cs; 3375 3376 rcu_read_lock(); 3377 same_cs = (cs == task_cs(current)); 3378 rcu_read_unlock(); 3379 3380 if (same_cs) 3381 return; 3382 3383 mutex_lock(&cpuset_mutex); 3384 cs->attach_in_progress--; 3385 if (!cs->attach_in_progress) 3386 wake_up(&cpuset_attach_wq); 3387 mutex_unlock(&cpuset_mutex); 3388 } 3389 3390 /* 3391 * Make sure the new task conform to the current state of its parent, 3392 * which could have been changed by cpuset just after it inherits the 3393 * state from the parent and before it sits on the cgroup's task list. 3394 */ 3395 static void cpuset_fork(struct task_struct *task) 3396 { 3397 struct cpuset *cs; 3398 bool same_cs; 3399 3400 rcu_read_lock(); 3401 cs = task_cs(task); 3402 same_cs = (cs == task_cs(current)); 3403 rcu_read_unlock(); 3404 3405 if (same_cs) { 3406 if (cs == &top_cpuset) 3407 return; 3408 3409 set_cpus_allowed_ptr(task, current->cpus_ptr); 3410 task->mems_allowed = current->mems_allowed; 3411 return; 3412 } 3413 3414 /* CLONE_INTO_CGROUP */ 3415 mutex_lock(&cpuset_mutex); 3416 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 3417 cpuset_attach_task(cs, task); 3418 3419 cs->attach_in_progress--; 3420 if (!cs->attach_in_progress) 3421 wake_up(&cpuset_attach_wq); 3422 3423 mutex_unlock(&cpuset_mutex); 3424 } 3425 3426 struct cgroup_subsys cpuset_cgrp_subsys = { 3427 .css_alloc = cpuset_css_alloc, 3428 .css_online = cpuset_css_online, 3429 .css_offline = cpuset_css_offline, 3430 .css_free = cpuset_css_free, 3431 .can_attach = cpuset_can_attach, 3432 .cancel_attach = cpuset_cancel_attach, 3433 .attach = cpuset_attach, 3434 .post_attach = cpuset_post_attach, 3435 .bind = cpuset_bind, 3436 .can_fork = cpuset_can_fork, 3437 .cancel_fork = cpuset_cancel_fork, 3438 .fork = cpuset_fork, 3439 .legacy_cftypes = legacy_files, 3440 .dfl_cftypes = dfl_files, 3441 .early_init = true, 3442 .threaded = true, 3443 }; 3444 3445 /** 3446 * cpuset_init - initialize cpusets at system boot 3447 * 3448 * Description: Initialize top_cpuset 3449 **/ 3450 3451 int __init cpuset_init(void) 3452 { 3453 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 3454 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 3455 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); 3456 3457 cpumask_setall(top_cpuset.cpus_allowed); 3458 nodes_setall(top_cpuset.mems_allowed); 3459 cpumask_setall(top_cpuset.effective_cpus); 3460 nodes_setall(top_cpuset.effective_mems); 3461 3462 fmeter_init(&top_cpuset.fmeter); 3463 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 3464 top_cpuset.relax_domain_level = -1; 3465 3466 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 3467 3468 return 0; 3469 } 3470 3471 /* 3472 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 3473 * or memory nodes, we need to walk over the cpuset hierarchy, 3474 * removing that CPU or node from all cpusets. If this removes the 3475 * last CPU or node from a cpuset, then move the tasks in the empty 3476 * cpuset to its next-highest non-empty parent. 3477 */ 3478 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 3479 { 3480 struct cpuset *parent; 3481 3482 /* 3483 * Find its next-highest non-empty parent, (top cpuset 3484 * has online cpus, so can't be empty). 3485 */ 3486 parent = parent_cs(cs); 3487 while (cpumask_empty(parent->cpus_allowed) || 3488 nodes_empty(parent->mems_allowed)) 3489 parent = parent_cs(parent); 3490 3491 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 3492 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 3493 pr_cont_cgroup_name(cs->css.cgroup); 3494 pr_cont("\n"); 3495 } 3496 } 3497 3498 static void 3499 hotplug_update_tasks_legacy(struct cpuset *cs, 3500 struct cpumask *new_cpus, nodemask_t *new_mems, 3501 bool cpus_updated, bool mems_updated) 3502 { 3503 bool is_empty; 3504 3505 spin_lock_irq(&callback_lock); 3506 cpumask_copy(cs->cpus_allowed, new_cpus); 3507 cpumask_copy(cs->effective_cpus, new_cpus); 3508 cs->mems_allowed = *new_mems; 3509 cs->effective_mems = *new_mems; 3510 spin_unlock_irq(&callback_lock); 3511 3512 /* 3513 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 3514 * as the tasks will be migrated to an ancestor. 3515 */ 3516 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 3517 update_tasks_cpumask(cs, new_cpus); 3518 if (mems_updated && !nodes_empty(cs->mems_allowed)) 3519 update_tasks_nodemask(cs); 3520 3521 is_empty = cpumask_empty(cs->cpus_allowed) || 3522 nodes_empty(cs->mems_allowed); 3523 3524 mutex_unlock(&cpuset_mutex); 3525 3526 /* 3527 * Move tasks to the nearest ancestor with execution resources, 3528 * This is full cgroup operation which will also call back into 3529 * cpuset. Should be done outside any lock. 3530 */ 3531 if (is_empty) 3532 remove_tasks_in_empty_cpuset(cs); 3533 3534 mutex_lock(&cpuset_mutex); 3535 } 3536 3537 static void 3538 hotplug_update_tasks(struct cpuset *cs, 3539 struct cpumask *new_cpus, nodemask_t *new_mems, 3540 bool cpus_updated, bool mems_updated) 3541 { 3542 /* A partition root is allowed to have empty effective cpus */ 3543 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) 3544 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 3545 if (nodes_empty(*new_mems)) 3546 *new_mems = parent_cs(cs)->effective_mems; 3547 3548 spin_lock_irq(&callback_lock); 3549 cpumask_copy(cs->effective_cpus, new_cpus); 3550 cs->effective_mems = *new_mems; 3551 spin_unlock_irq(&callback_lock); 3552 3553 if (cpus_updated) 3554 update_tasks_cpumask(cs, new_cpus); 3555 if (mems_updated) 3556 update_tasks_nodemask(cs); 3557 } 3558 3559 static bool force_rebuild; 3560 3561 void cpuset_force_rebuild(void) 3562 { 3563 force_rebuild = true; 3564 } 3565 3566 /** 3567 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 3568 * @cs: cpuset in interest 3569 * @tmp: the tmpmasks structure pointer 3570 * 3571 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 3572 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 3573 * all its tasks are moved to the nearest ancestor with both resources. 3574 */ 3575 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 3576 { 3577 static cpumask_t new_cpus; 3578 static nodemask_t new_mems; 3579 bool cpus_updated; 3580 bool mems_updated; 3581 struct cpuset *parent; 3582 retry: 3583 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 3584 3585 mutex_lock(&cpuset_mutex); 3586 3587 /* 3588 * We have raced with task attaching. We wait until attaching 3589 * is finished, so we won't attach a task to an empty cpuset. 3590 */ 3591 if (cs->attach_in_progress) { 3592 mutex_unlock(&cpuset_mutex); 3593 goto retry; 3594 } 3595 3596 parent = parent_cs(cs); 3597 compute_effective_cpumask(&new_cpus, cs, parent); 3598 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 3599 3600 if (cs->nr_subparts_cpus) 3601 /* 3602 * Make sure that CPUs allocated to child partitions 3603 * do not show up in effective_cpus. 3604 */ 3605 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); 3606 3607 if (!tmp || !cs->partition_root_state) 3608 goto update_tasks; 3609 3610 /* 3611 * In the unlikely event that a partition root has empty 3612 * effective_cpus with tasks, we will have to invalidate child 3613 * partitions, if present, by setting nr_subparts_cpus to 0 to 3614 * reclaim their cpus. 3615 */ 3616 if (cs->nr_subparts_cpus && is_partition_valid(cs) && 3617 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { 3618 spin_lock_irq(&callback_lock); 3619 cs->nr_subparts_cpus = 0; 3620 cpumask_clear(cs->subparts_cpus); 3621 spin_unlock_irq(&callback_lock); 3622 compute_effective_cpumask(&new_cpus, cs, parent); 3623 } 3624 3625 /* 3626 * Force the partition to become invalid if either one of 3627 * the following conditions hold: 3628 * 1) empty effective cpus but not valid empty partition. 3629 * 2) parent is invalid or doesn't grant any cpus to child 3630 * partitions. 3631 */ 3632 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || 3633 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) { 3634 int old_prs, parent_prs; 3635 3636 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); 3637 if (cs->nr_subparts_cpus) { 3638 spin_lock_irq(&callback_lock); 3639 cs->nr_subparts_cpus = 0; 3640 cpumask_clear(cs->subparts_cpus); 3641 spin_unlock_irq(&callback_lock); 3642 compute_effective_cpumask(&new_cpus, cs, parent); 3643 } 3644 3645 old_prs = cs->partition_root_state; 3646 parent_prs = parent->partition_root_state; 3647 if (is_partition_valid(cs)) { 3648 spin_lock_irq(&callback_lock); 3649 make_partition_invalid(cs); 3650 spin_unlock_irq(&callback_lock); 3651 if (is_prs_invalid(parent_prs)) 3652 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); 3653 else if (!parent_prs) 3654 WRITE_ONCE(cs->prs_err, PERR_NOTPART); 3655 else 3656 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); 3657 notify_partition_change(cs, old_prs); 3658 } 3659 cpuset_force_rebuild(); 3660 } 3661 3662 /* 3663 * On the other hand, an invalid partition root may be transitioned 3664 * back to a regular one. 3665 */ 3666 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { 3667 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp); 3668 if (is_partition_valid(cs)) 3669 cpuset_force_rebuild(); 3670 } 3671 3672 update_tasks: 3673 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 3674 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 3675 if (!cpus_updated && !mems_updated) 3676 goto unlock; /* Hotplug doesn't affect this cpuset */ 3677 3678 if (mems_updated) 3679 check_insane_mems_config(&new_mems); 3680 3681 if (is_in_v2_mode()) 3682 hotplug_update_tasks(cs, &new_cpus, &new_mems, 3683 cpus_updated, mems_updated); 3684 else 3685 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 3686 cpus_updated, mems_updated); 3687 3688 unlock: 3689 mutex_unlock(&cpuset_mutex); 3690 } 3691 3692 /** 3693 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 3694 * 3695 * This function is called after either CPU or memory configuration has 3696 * changed and updates cpuset accordingly. The top_cpuset is always 3697 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 3698 * order to make cpusets transparent (of no affect) on systems that are 3699 * actively using CPU hotplug but making no active use of cpusets. 3700 * 3701 * Non-root cpusets are only affected by offlining. If any CPUs or memory 3702 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 3703 * all descendants. 3704 * 3705 * Note that CPU offlining during suspend is ignored. We don't modify 3706 * cpusets across suspend/resume cycles at all. 3707 */ 3708 static void cpuset_hotplug_workfn(struct work_struct *work) 3709 { 3710 static cpumask_t new_cpus; 3711 static nodemask_t new_mems; 3712 bool cpus_updated, mems_updated; 3713 bool on_dfl = is_in_v2_mode(); 3714 struct tmpmasks tmp, *ptmp = NULL; 3715 3716 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 3717 ptmp = &tmp; 3718 3719 mutex_lock(&cpuset_mutex); 3720 3721 /* fetch the available cpus/mems and find out which changed how */ 3722 cpumask_copy(&new_cpus, cpu_active_mask); 3723 new_mems = node_states[N_MEMORY]; 3724 3725 /* 3726 * If subparts_cpus is populated, it is likely that the check below 3727 * will produce a false positive on cpus_updated when the cpu list 3728 * isn't changed. It is extra work, but it is better to be safe. 3729 */ 3730 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); 3731 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 3732 3733 /* 3734 * In the rare case that hotplug removes all the cpus in subparts_cpus, 3735 * we assumed that cpus are updated. 3736 */ 3737 if (!cpus_updated && top_cpuset.nr_subparts_cpus) 3738 cpus_updated = true; 3739 3740 /* synchronize cpus_allowed to cpu_active_mask */ 3741 if (cpus_updated) { 3742 spin_lock_irq(&callback_lock); 3743 if (!on_dfl) 3744 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 3745 /* 3746 * Make sure that CPUs allocated to child partitions 3747 * do not show up in effective_cpus. If no CPU is left, 3748 * we clear the subparts_cpus & let the child partitions 3749 * fight for the CPUs again. 3750 */ 3751 if (top_cpuset.nr_subparts_cpus) { 3752 if (cpumask_subset(&new_cpus, 3753 top_cpuset.subparts_cpus)) { 3754 top_cpuset.nr_subparts_cpus = 0; 3755 cpumask_clear(top_cpuset.subparts_cpus); 3756 } else { 3757 cpumask_andnot(&new_cpus, &new_cpus, 3758 top_cpuset.subparts_cpus); 3759 } 3760 } 3761 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 3762 spin_unlock_irq(&callback_lock); 3763 /* we don't mess with cpumasks of tasks in top_cpuset */ 3764 } 3765 3766 /* synchronize mems_allowed to N_MEMORY */ 3767 if (mems_updated) { 3768 spin_lock_irq(&callback_lock); 3769 if (!on_dfl) 3770 top_cpuset.mems_allowed = new_mems; 3771 top_cpuset.effective_mems = new_mems; 3772 spin_unlock_irq(&callback_lock); 3773 update_tasks_nodemask(&top_cpuset); 3774 } 3775 3776 mutex_unlock(&cpuset_mutex); 3777 3778 /* if cpus or mems changed, we need to propagate to descendants */ 3779 if (cpus_updated || mems_updated) { 3780 struct cpuset *cs; 3781 struct cgroup_subsys_state *pos_css; 3782 3783 rcu_read_lock(); 3784 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 3785 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 3786 continue; 3787 rcu_read_unlock(); 3788 3789 cpuset_hotplug_update_tasks(cs, ptmp); 3790 3791 rcu_read_lock(); 3792 css_put(&cs->css); 3793 } 3794 rcu_read_unlock(); 3795 } 3796 3797 /* rebuild sched domains if cpus_allowed has changed */ 3798 if (cpus_updated || force_rebuild) { 3799 force_rebuild = false; 3800 rebuild_sched_domains(); 3801 } 3802 3803 free_cpumasks(NULL, ptmp); 3804 } 3805 3806 void cpuset_update_active_cpus(void) 3807 { 3808 /* 3809 * We're inside cpu hotplug critical region which usually nests 3810 * inside cgroup synchronization. Bounce actual hotplug processing 3811 * to a work item to avoid reverse locking order. 3812 */ 3813 schedule_work(&cpuset_hotplug_work); 3814 } 3815 3816 void cpuset_wait_for_hotplug(void) 3817 { 3818 flush_work(&cpuset_hotplug_work); 3819 } 3820 3821 /* 3822 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 3823 * Call this routine anytime after node_states[N_MEMORY] changes. 3824 * See cpuset_update_active_cpus() for CPU hotplug handling. 3825 */ 3826 static int cpuset_track_online_nodes(struct notifier_block *self, 3827 unsigned long action, void *arg) 3828 { 3829 schedule_work(&cpuset_hotplug_work); 3830 return NOTIFY_OK; 3831 } 3832 3833 /** 3834 * cpuset_init_smp - initialize cpus_allowed 3835 * 3836 * Description: Finish top cpuset after cpu, node maps are initialized 3837 */ 3838 void __init cpuset_init_smp(void) 3839 { 3840 /* 3841 * cpus_allowd/mems_allowed set to v2 values in the initial 3842 * cpuset_bind() call will be reset to v1 values in another 3843 * cpuset_bind() call when v1 cpuset is mounted. 3844 */ 3845 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 3846 3847 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 3848 top_cpuset.effective_mems = node_states[N_MEMORY]; 3849 3850 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI); 3851 3852 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 3853 BUG_ON(!cpuset_migrate_mm_wq); 3854 } 3855 3856 /** 3857 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 3858 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 3859 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 3860 * 3861 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 3862 * attached to the specified @tsk. Guaranteed to return some non-empty 3863 * subset of cpu_online_mask, even if this means going outside the 3864 * tasks cpuset, except when the task is in the top cpuset. 3865 **/ 3866 3867 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 3868 { 3869 unsigned long flags; 3870 struct cpuset *cs; 3871 3872 spin_lock_irqsave(&callback_lock, flags); 3873 rcu_read_lock(); 3874 3875 cs = task_cs(tsk); 3876 if (cs != &top_cpuset) 3877 guarantee_online_cpus(tsk, pmask); 3878 /* 3879 * Tasks in the top cpuset won't get update to their cpumasks 3880 * when a hotplug online/offline event happens. So we include all 3881 * offline cpus in the allowed cpu list. 3882 */ 3883 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { 3884 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 3885 3886 /* 3887 * We first exclude cpus allocated to partitions. If there is no 3888 * allowable online cpu left, we fall back to all possible cpus. 3889 */ 3890 cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus); 3891 if (!cpumask_intersects(pmask, cpu_online_mask)) 3892 cpumask_copy(pmask, possible_mask); 3893 } 3894 3895 rcu_read_unlock(); 3896 spin_unlock_irqrestore(&callback_lock, flags); 3897 } 3898 3899 /** 3900 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. 3901 * @tsk: pointer to task_struct with which the scheduler is struggling 3902 * 3903 * Description: In the case that the scheduler cannot find an allowed cpu in 3904 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy 3905 * mode however, this value is the same as task_cs(tsk)->effective_cpus, 3906 * which will not contain a sane cpumask during cases such as cpu hotplugging. 3907 * This is the absolute last resort for the scheduler and it is only used if 3908 * _every_ other avenue has been traveled. 3909 * 3910 * Returns true if the affinity of @tsk was changed, false otherwise. 3911 **/ 3912 3913 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) 3914 { 3915 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); 3916 const struct cpumask *cs_mask; 3917 bool changed = false; 3918 3919 rcu_read_lock(); 3920 cs_mask = task_cs(tsk)->cpus_allowed; 3921 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { 3922 do_set_cpus_allowed(tsk, cs_mask); 3923 changed = true; 3924 } 3925 rcu_read_unlock(); 3926 3927 /* 3928 * We own tsk->cpus_allowed, nobody can change it under us. 3929 * 3930 * But we used cs && cs->cpus_allowed lockless and thus can 3931 * race with cgroup_attach_task() or update_cpumask() and get 3932 * the wrong tsk->cpus_allowed. However, both cases imply the 3933 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 3934 * which takes task_rq_lock(). 3935 * 3936 * If we are called after it dropped the lock we must see all 3937 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 3938 * set any mask even if it is not right from task_cs() pov, 3939 * the pending set_cpus_allowed_ptr() will fix things. 3940 * 3941 * select_fallback_rq() will fix things ups and set cpu_possible_mask 3942 * if required. 3943 */ 3944 return changed; 3945 } 3946 3947 void __init cpuset_init_current_mems_allowed(void) 3948 { 3949 nodes_setall(current->mems_allowed); 3950 } 3951 3952 /** 3953 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 3954 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 3955 * 3956 * Description: Returns the nodemask_t mems_allowed of the cpuset 3957 * attached to the specified @tsk. Guaranteed to return some non-empty 3958 * subset of node_states[N_MEMORY], even if this means going outside the 3959 * tasks cpuset. 3960 **/ 3961 3962 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 3963 { 3964 nodemask_t mask; 3965 unsigned long flags; 3966 3967 spin_lock_irqsave(&callback_lock, flags); 3968 rcu_read_lock(); 3969 guarantee_online_mems(task_cs(tsk), &mask); 3970 rcu_read_unlock(); 3971 spin_unlock_irqrestore(&callback_lock, flags); 3972 3973 return mask; 3974 } 3975 3976 /** 3977 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed 3978 * @nodemask: the nodemask to be checked 3979 * 3980 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 3981 */ 3982 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 3983 { 3984 return nodes_intersects(*nodemask, current->mems_allowed); 3985 } 3986 3987 /* 3988 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 3989 * mem_hardwall ancestor to the specified cpuset. Call holding 3990 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 3991 * (an unusual configuration), then returns the root cpuset. 3992 */ 3993 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 3994 { 3995 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 3996 cs = parent_cs(cs); 3997 return cs; 3998 } 3999 4000 /* 4001 * cpuset_node_allowed - Can we allocate on a memory node? 4002 * @node: is this an allowed node? 4003 * @gfp_mask: memory allocation flags 4004 * 4005 * If we're in interrupt, yes, we can always allocate. If @node is set in 4006 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 4007 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 4008 * yes. If current has access to memory reserves as an oom victim, yes. 4009 * Otherwise, no. 4010 * 4011 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 4012 * and do not allow allocations outside the current tasks cpuset 4013 * unless the task has been OOM killed. 4014 * GFP_KERNEL allocations are not so marked, so can escape to the 4015 * nearest enclosing hardwalled ancestor cpuset. 4016 * 4017 * Scanning up parent cpusets requires callback_lock. The 4018 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 4019 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 4020 * current tasks mems_allowed came up empty on the first pass over 4021 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 4022 * cpuset are short of memory, might require taking the callback_lock. 4023 * 4024 * The first call here from mm/page_alloc:get_page_from_freelist() 4025 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 4026 * so no allocation on a node outside the cpuset is allowed (unless 4027 * in interrupt, of course). 4028 * 4029 * The second pass through get_page_from_freelist() doesn't even call 4030 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 4031 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 4032 * in alloc_flags. That logic and the checks below have the combined 4033 * affect that: 4034 * in_interrupt - any node ok (current task context irrelevant) 4035 * GFP_ATOMIC - any node ok 4036 * tsk_is_oom_victim - any node ok 4037 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 4038 * GFP_USER - only nodes in current tasks mems allowed ok. 4039 */ 4040 bool cpuset_node_allowed(int node, gfp_t gfp_mask) 4041 { 4042 struct cpuset *cs; /* current cpuset ancestors */ 4043 bool allowed; /* is allocation in zone z allowed? */ 4044 unsigned long flags; 4045 4046 if (in_interrupt()) 4047 return true; 4048 if (node_isset(node, current->mems_allowed)) 4049 return true; 4050 /* 4051 * Allow tasks that have access to memory reserves because they have 4052 * been OOM killed to get memory anywhere. 4053 */ 4054 if (unlikely(tsk_is_oom_victim(current))) 4055 return true; 4056 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 4057 return false; 4058 4059 if (current->flags & PF_EXITING) /* Let dying task have memory */ 4060 return true; 4061 4062 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 4063 spin_lock_irqsave(&callback_lock, flags); 4064 4065 rcu_read_lock(); 4066 cs = nearest_hardwall_ancestor(task_cs(current)); 4067 allowed = node_isset(node, cs->mems_allowed); 4068 rcu_read_unlock(); 4069 4070 spin_unlock_irqrestore(&callback_lock, flags); 4071 return allowed; 4072 } 4073 4074 /** 4075 * cpuset_spread_node() - On which node to begin search for a page 4076 * 4077 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 4078 * tasks in a cpuset with is_spread_page or is_spread_slab set), 4079 * and if the memory allocation used cpuset_mem_spread_node() 4080 * to determine on which node to start looking, as it will for 4081 * certain page cache or slab cache pages such as used for file 4082 * system buffers and inode caches, then instead of starting on the 4083 * local node to look for a free page, rather spread the starting 4084 * node around the tasks mems_allowed nodes. 4085 * 4086 * We don't have to worry about the returned node being offline 4087 * because "it can't happen", and even if it did, it would be ok. 4088 * 4089 * The routines calling guarantee_online_mems() are careful to 4090 * only set nodes in task->mems_allowed that are online. So it 4091 * should not be possible for the following code to return an 4092 * offline node. But if it did, that would be ok, as this routine 4093 * is not returning the node where the allocation must be, only 4094 * the node where the search should start. The zonelist passed to 4095 * __alloc_pages() will include all nodes. If the slab allocator 4096 * is passed an offline node, it will fall back to the local node. 4097 * See kmem_cache_alloc_node(). 4098 */ 4099 static int cpuset_spread_node(int *rotor) 4100 { 4101 return *rotor = next_node_in(*rotor, current->mems_allowed); 4102 } 4103 4104 /** 4105 * cpuset_mem_spread_node() - On which node to begin search for a file page 4106 */ 4107 int cpuset_mem_spread_node(void) 4108 { 4109 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 4110 current->cpuset_mem_spread_rotor = 4111 node_random(¤t->mems_allowed); 4112 4113 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 4114 } 4115 4116 /** 4117 * cpuset_slab_spread_node() - On which node to begin search for a slab page 4118 */ 4119 int cpuset_slab_spread_node(void) 4120 { 4121 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 4122 current->cpuset_slab_spread_rotor = 4123 node_random(¤t->mems_allowed); 4124 4125 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 4126 } 4127 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 4128 4129 /** 4130 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 4131 * @tsk1: pointer to task_struct of some task. 4132 * @tsk2: pointer to task_struct of some other task. 4133 * 4134 * Description: Return true if @tsk1's mems_allowed intersects the 4135 * mems_allowed of @tsk2. Used by the OOM killer to determine if 4136 * one of the task's memory usage might impact the memory available 4137 * to the other. 4138 **/ 4139 4140 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 4141 const struct task_struct *tsk2) 4142 { 4143 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 4144 } 4145 4146 /** 4147 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 4148 * 4149 * Description: Prints current's name, cpuset name, and cached copy of its 4150 * mems_allowed to the kernel log. 4151 */ 4152 void cpuset_print_current_mems_allowed(void) 4153 { 4154 struct cgroup *cgrp; 4155 4156 rcu_read_lock(); 4157 4158 cgrp = task_cs(current)->css.cgroup; 4159 pr_cont(",cpuset="); 4160 pr_cont_cgroup_name(cgrp); 4161 pr_cont(",mems_allowed=%*pbl", 4162 nodemask_pr_args(¤t->mems_allowed)); 4163 4164 rcu_read_unlock(); 4165 } 4166 4167 /* 4168 * Collection of memory_pressure is suppressed unless 4169 * this flag is enabled by writing "1" to the special 4170 * cpuset file 'memory_pressure_enabled' in the root cpuset. 4171 */ 4172 4173 int cpuset_memory_pressure_enabled __read_mostly; 4174 4175 /* 4176 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 4177 * 4178 * Keep a running average of the rate of synchronous (direct) 4179 * page reclaim efforts initiated by tasks in each cpuset. 4180 * 4181 * This represents the rate at which some task in the cpuset 4182 * ran low on memory on all nodes it was allowed to use, and 4183 * had to enter the kernels page reclaim code in an effort to 4184 * create more free memory by tossing clean pages or swapping 4185 * or writing dirty pages. 4186 * 4187 * Display to user space in the per-cpuset read-only file 4188 * "memory_pressure". Value displayed is an integer 4189 * representing the recent rate of entry into the synchronous 4190 * (direct) page reclaim by any task attached to the cpuset. 4191 */ 4192 4193 void __cpuset_memory_pressure_bump(void) 4194 { 4195 rcu_read_lock(); 4196 fmeter_markevent(&task_cs(current)->fmeter); 4197 rcu_read_unlock(); 4198 } 4199 4200 #ifdef CONFIG_PROC_PID_CPUSET 4201 /* 4202 * proc_cpuset_show() 4203 * - Print tasks cpuset path into seq_file. 4204 * - Used for /proc/<pid>/cpuset. 4205 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 4206 * doesn't really matter if tsk->cpuset changes after we read it, 4207 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 4208 * anyway. 4209 */ 4210 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 4211 struct pid *pid, struct task_struct *tsk) 4212 { 4213 char *buf; 4214 struct cgroup_subsys_state *css; 4215 int retval; 4216 4217 retval = -ENOMEM; 4218 buf = kmalloc(PATH_MAX, GFP_KERNEL); 4219 if (!buf) 4220 goto out; 4221 4222 css = task_get_css(tsk, cpuset_cgrp_id); 4223 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, 4224 current->nsproxy->cgroup_ns); 4225 css_put(css); 4226 if (retval >= PATH_MAX) 4227 retval = -ENAMETOOLONG; 4228 if (retval < 0) 4229 goto out_free; 4230 seq_puts(m, buf); 4231 seq_putc(m, '\n'); 4232 retval = 0; 4233 out_free: 4234 kfree(buf); 4235 out: 4236 return retval; 4237 } 4238 #endif /* CONFIG_PROC_PID_CPUSET */ 4239 4240 /* Display task mems_allowed in /proc/<pid>/status file. */ 4241 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 4242 { 4243 seq_printf(m, "Mems_allowed:\t%*pb\n", 4244 nodemask_pr_args(&task->mems_allowed)); 4245 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 4246 nodemask_pr_args(&task->mems_allowed)); 4247 } 4248