1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 25 #include <linux/cpu.h> 26 #include <linux/cpumask.h> 27 #include <linux/cpuset.h> 28 #include <linux/err.h> 29 #include <linux/errno.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/kernel.h> 35 #include <linux/kmod.h> 36 #include <linux/list.h> 37 #include <linux/mempolicy.h> 38 #include <linux/mm.h> 39 #include <linux/memory.h> 40 #include <linux/export.h> 41 #include <linux/mount.h> 42 #include <linux/fs_context.h> 43 #include <linux/namei.h> 44 #include <linux/pagemap.h> 45 #include <linux/proc_fs.h> 46 #include <linux/rcupdate.h> 47 #include <linux/sched.h> 48 #include <linux/sched/deadline.h> 49 #include <linux/sched/mm.h> 50 #include <linux/sched/task.h> 51 #include <linux/seq_file.h> 52 #include <linux/security.h> 53 #include <linux/slab.h> 54 #include <linux/spinlock.h> 55 #include <linux/stat.h> 56 #include <linux/string.h> 57 #include <linux/time.h> 58 #include <linux/time64.h> 59 #include <linux/backing-dev.h> 60 #include <linux/sort.h> 61 #include <linux/oom.h> 62 #include <linux/sched/isolation.h> 63 #include <linux/uaccess.h> 64 #include <linux/atomic.h> 65 #include <linux/mutex.h> 66 #include <linux/cgroup.h> 67 #include <linux/wait.h> 68 69 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 70 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 71 72 /* See "Frequency meter" comments, below. */ 73 74 struct fmeter { 75 int cnt; /* unprocessed events count */ 76 int val; /* most recent output value */ 77 time64_t time; /* clock (secs) when val computed */ 78 spinlock_t lock; /* guards read or write of above */ 79 }; 80 81 struct cpuset { 82 struct cgroup_subsys_state css; 83 84 unsigned long flags; /* "unsigned long" so bitops work */ 85 86 /* 87 * On default hierarchy: 88 * 89 * The user-configured masks can only be changed by writing to 90 * cpuset.cpus and cpuset.mems, and won't be limited by the 91 * parent masks. 92 * 93 * The effective masks is the real masks that apply to the tasks 94 * in the cpuset. They may be changed if the configured masks are 95 * changed or hotplug happens. 96 * 97 * effective_mask == configured_mask & parent's effective_mask, 98 * and if it ends up empty, it will inherit the parent's mask. 99 * 100 * 101 * On legacy hierachy: 102 * 103 * The user-configured masks are always the same with effective masks. 104 */ 105 106 /* user-configured CPUs and Memory Nodes allow to tasks */ 107 cpumask_var_t cpus_allowed; 108 nodemask_t mems_allowed; 109 110 /* effective CPUs and Memory Nodes allow to tasks */ 111 cpumask_var_t effective_cpus; 112 nodemask_t effective_mems; 113 114 /* 115 * CPUs allocated to child sub-partitions (default hierarchy only) 116 * - CPUs granted by the parent = effective_cpus U subparts_cpus 117 * - effective_cpus and subparts_cpus are mutually exclusive. 118 * 119 * effective_cpus contains only onlined CPUs, but subparts_cpus 120 * may have offlined ones. 121 */ 122 cpumask_var_t subparts_cpus; 123 124 /* 125 * This is old Memory Nodes tasks took on. 126 * 127 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 128 * - A new cpuset's old_mems_allowed is initialized when some 129 * task is moved into it. 130 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 131 * cpuset.mems_allowed and have tasks' nodemask updated, and 132 * then old_mems_allowed is updated to mems_allowed. 133 */ 134 nodemask_t old_mems_allowed; 135 136 struct fmeter fmeter; /* memory_pressure filter */ 137 138 /* 139 * Tasks are being attached to this cpuset. Used to prevent 140 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 141 */ 142 int attach_in_progress; 143 144 /* partition number for rebuild_sched_domains() */ 145 int pn; 146 147 /* for custom sched domain */ 148 int relax_domain_level; 149 150 /* number of CPUs in subparts_cpus */ 151 int nr_subparts_cpus; 152 153 /* partition root state */ 154 int partition_root_state; 155 156 /* 157 * Default hierarchy only: 158 * use_parent_ecpus - set if using parent's effective_cpus 159 * child_ecpus_count - # of children with use_parent_ecpus set 160 */ 161 int use_parent_ecpus; 162 int child_ecpus_count; 163 }; 164 165 /* 166 * Partition root states: 167 * 168 * 0 - not a partition root 169 * 170 * 1 - partition root 171 * 172 * -1 - invalid partition root 173 * None of the cpus in cpus_allowed can be put into the parent's 174 * subparts_cpus. In this case, the cpuset is not a real partition 175 * root anymore. However, the CPU_EXCLUSIVE bit will still be set 176 * and the cpuset can be restored back to a partition root if the 177 * parent cpuset can give more CPUs back to this child cpuset. 178 */ 179 #define PRS_DISABLED 0 180 #define PRS_ENABLED 1 181 #define PRS_ERROR -1 182 183 /* 184 * Temporary cpumasks for working with partitions that are passed among 185 * functions to avoid memory allocation in inner functions. 186 */ 187 struct tmpmasks { 188 cpumask_var_t addmask, delmask; /* For partition root */ 189 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 190 }; 191 192 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 193 { 194 return css ? container_of(css, struct cpuset, css) : NULL; 195 } 196 197 /* Retrieve the cpuset for a task */ 198 static inline struct cpuset *task_cs(struct task_struct *task) 199 { 200 return css_cs(task_css(task, cpuset_cgrp_id)); 201 } 202 203 static inline struct cpuset *parent_cs(struct cpuset *cs) 204 { 205 return css_cs(cs->css.parent); 206 } 207 208 /* bits in struct cpuset flags field */ 209 typedef enum { 210 CS_ONLINE, 211 CS_CPU_EXCLUSIVE, 212 CS_MEM_EXCLUSIVE, 213 CS_MEM_HARDWALL, 214 CS_MEMORY_MIGRATE, 215 CS_SCHED_LOAD_BALANCE, 216 CS_SPREAD_PAGE, 217 CS_SPREAD_SLAB, 218 } cpuset_flagbits_t; 219 220 /* convenient tests for these bits */ 221 static inline bool is_cpuset_online(struct cpuset *cs) 222 { 223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 224 } 225 226 static inline int is_cpu_exclusive(const struct cpuset *cs) 227 { 228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 229 } 230 231 static inline int is_mem_exclusive(const struct cpuset *cs) 232 { 233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 234 } 235 236 static inline int is_mem_hardwall(const struct cpuset *cs) 237 { 238 return test_bit(CS_MEM_HARDWALL, &cs->flags); 239 } 240 241 static inline int is_sched_load_balance(const struct cpuset *cs) 242 { 243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 244 } 245 246 static inline int is_memory_migrate(const struct cpuset *cs) 247 { 248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 249 } 250 251 static inline int is_spread_page(const struct cpuset *cs) 252 { 253 return test_bit(CS_SPREAD_PAGE, &cs->flags); 254 } 255 256 static inline int is_spread_slab(const struct cpuset *cs) 257 { 258 return test_bit(CS_SPREAD_SLAB, &cs->flags); 259 } 260 261 static inline int is_partition_root(const struct cpuset *cs) 262 { 263 return cs->partition_root_state > 0; 264 } 265 266 static struct cpuset top_cpuset = { 267 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | 268 (1 << CS_MEM_EXCLUSIVE)), 269 .partition_root_state = PRS_ENABLED, 270 }; 271 272 /** 273 * cpuset_for_each_child - traverse online children of a cpuset 274 * @child_cs: loop cursor pointing to the current child 275 * @pos_css: used for iteration 276 * @parent_cs: target cpuset to walk children of 277 * 278 * Walk @child_cs through the online children of @parent_cs. Must be used 279 * with RCU read locked. 280 */ 281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 282 css_for_each_child((pos_css), &(parent_cs)->css) \ 283 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 284 285 /** 286 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 287 * @des_cs: loop cursor pointing to the current descendant 288 * @pos_css: used for iteration 289 * @root_cs: target cpuset to walk ancestor of 290 * 291 * Walk @des_cs through the online descendants of @root_cs. Must be used 292 * with RCU read locked. The caller may modify @pos_css by calling 293 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 294 * iteration and the first node to be visited. 295 */ 296 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 297 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 298 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 299 300 /* 301 * There are two global locks guarding cpuset structures - cpuset_mutex and 302 * callback_lock. We also require taking task_lock() when dereferencing a 303 * task's cpuset pointer. See "The task_lock() exception", at the end of this 304 * comment. 305 * 306 * A task must hold both locks to modify cpusets. If a task holds 307 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it 308 * is the only task able to also acquire callback_lock and be able to 309 * modify cpusets. It can perform various checks on the cpuset structure 310 * first, knowing nothing will change. It can also allocate memory while 311 * just holding cpuset_mutex. While it is performing these checks, various 312 * callback routines can briefly acquire callback_lock to query cpusets. 313 * Once it is ready to make the changes, it takes callback_lock, blocking 314 * everyone else. 315 * 316 * Calls to the kernel memory allocator can not be made while holding 317 * callback_lock, as that would risk double tripping on callback_lock 318 * from one of the callbacks into the cpuset code from within 319 * __alloc_pages(). 320 * 321 * If a task is only holding callback_lock, then it has read-only 322 * access to cpusets. 323 * 324 * Now, the task_struct fields mems_allowed and mempolicy may be changed 325 * by other task, we use alloc_lock in the task_struct fields to protect 326 * them. 327 * 328 * The cpuset_common_file_read() handlers only hold callback_lock across 329 * small pieces of code, such as when reading out possibly multi-word 330 * cpumasks and nodemasks. 331 * 332 * Accessing a task's cpuset should be done in accordance with the 333 * guidelines for accessing subsystem state in kernel/cgroup.c 334 */ 335 336 static DEFINE_MUTEX(cpuset_mutex); 337 static DEFINE_SPINLOCK(callback_lock); 338 339 static struct workqueue_struct *cpuset_migrate_mm_wq; 340 341 /* 342 * CPU / memory hotplug is handled asynchronously. 343 */ 344 static void cpuset_hotplug_workfn(struct work_struct *work); 345 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); 346 347 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 348 349 /* 350 * Cgroup v2 behavior is used when on default hierarchy or the 351 * cgroup_v2_mode flag is set. 352 */ 353 static inline bool is_in_v2_mode(void) 354 { 355 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 356 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 357 } 358 359 /* 360 * Return in pmask the portion of a cpusets's cpus_allowed that 361 * are online. If none are online, walk up the cpuset hierarchy 362 * until we find one that does have some online cpus. 363 * 364 * One way or another, we guarantee to return some non-empty subset 365 * of cpu_online_mask. 366 * 367 * Call with callback_lock or cpuset_mutex held. 368 */ 369 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 370 { 371 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { 372 cs = parent_cs(cs); 373 if (unlikely(!cs)) { 374 /* 375 * The top cpuset doesn't have any online cpu as a 376 * consequence of a race between cpuset_hotplug_work 377 * and cpu hotplug notifier. But we know the top 378 * cpuset's effective_cpus is on its way to to be 379 * identical to cpu_online_mask. 380 */ 381 cpumask_copy(pmask, cpu_online_mask); 382 return; 383 } 384 } 385 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); 386 } 387 388 /* 389 * Return in *pmask the portion of a cpusets's mems_allowed that 390 * are online, with memory. If none are online with memory, walk 391 * up the cpuset hierarchy until we find one that does have some 392 * online mems. The top cpuset always has some mems online. 393 * 394 * One way or another, we guarantee to return some non-empty subset 395 * of node_states[N_MEMORY]. 396 * 397 * Call with callback_lock or cpuset_mutex held. 398 */ 399 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 400 { 401 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 402 cs = parent_cs(cs); 403 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 404 } 405 406 /* 407 * update task's spread flag if cpuset's page/slab spread flag is set 408 * 409 * Call with callback_lock or cpuset_mutex held. 410 */ 411 static void cpuset_update_task_spread_flag(struct cpuset *cs, 412 struct task_struct *tsk) 413 { 414 if (is_spread_page(cs)) 415 task_set_spread_page(tsk); 416 else 417 task_clear_spread_page(tsk); 418 419 if (is_spread_slab(cs)) 420 task_set_spread_slab(tsk); 421 else 422 task_clear_spread_slab(tsk); 423 } 424 425 /* 426 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 427 * 428 * One cpuset is a subset of another if all its allowed CPUs and 429 * Memory Nodes are a subset of the other, and its exclusive flags 430 * are only set if the other's are set. Call holding cpuset_mutex. 431 */ 432 433 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 434 { 435 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 436 nodes_subset(p->mems_allowed, q->mems_allowed) && 437 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 438 is_mem_exclusive(p) <= is_mem_exclusive(q); 439 } 440 441 /** 442 * alloc_cpumasks - allocate three cpumasks for cpuset 443 * @cs: the cpuset that have cpumasks to be allocated. 444 * @tmp: the tmpmasks structure pointer 445 * Return: 0 if successful, -ENOMEM otherwise. 446 * 447 * Only one of the two input arguments should be non-NULL. 448 */ 449 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 450 { 451 cpumask_var_t *pmask1, *pmask2, *pmask3; 452 453 if (cs) { 454 pmask1 = &cs->cpus_allowed; 455 pmask2 = &cs->effective_cpus; 456 pmask3 = &cs->subparts_cpus; 457 } else { 458 pmask1 = &tmp->new_cpus; 459 pmask2 = &tmp->addmask; 460 pmask3 = &tmp->delmask; 461 } 462 463 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 464 return -ENOMEM; 465 466 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 467 goto free_one; 468 469 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 470 goto free_two; 471 472 return 0; 473 474 free_two: 475 free_cpumask_var(*pmask2); 476 free_one: 477 free_cpumask_var(*pmask1); 478 return -ENOMEM; 479 } 480 481 /** 482 * free_cpumasks - free cpumasks in a tmpmasks structure 483 * @cs: the cpuset that have cpumasks to be free. 484 * @tmp: the tmpmasks structure pointer 485 */ 486 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 487 { 488 if (cs) { 489 free_cpumask_var(cs->cpus_allowed); 490 free_cpumask_var(cs->effective_cpus); 491 free_cpumask_var(cs->subparts_cpus); 492 } 493 if (tmp) { 494 free_cpumask_var(tmp->new_cpus); 495 free_cpumask_var(tmp->addmask); 496 free_cpumask_var(tmp->delmask); 497 } 498 } 499 500 /** 501 * alloc_trial_cpuset - allocate a trial cpuset 502 * @cs: the cpuset that the trial cpuset duplicates 503 */ 504 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 505 { 506 struct cpuset *trial; 507 508 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 509 if (!trial) 510 return NULL; 511 512 if (alloc_cpumasks(trial, NULL)) { 513 kfree(trial); 514 return NULL; 515 } 516 517 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 518 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 519 return trial; 520 } 521 522 /** 523 * free_cpuset - free the cpuset 524 * @cs: the cpuset to be freed 525 */ 526 static inline void free_cpuset(struct cpuset *cs) 527 { 528 free_cpumasks(cs, NULL); 529 kfree(cs); 530 } 531 532 /* 533 * validate_change() - Used to validate that any proposed cpuset change 534 * follows the structural rules for cpusets. 535 * 536 * If we replaced the flag and mask values of the current cpuset 537 * (cur) with those values in the trial cpuset (trial), would 538 * our various subset and exclusive rules still be valid? Presumes 539 * cpuset_mutex held. 540 * 541 * 'cur' is the address of an actual, in-use cpuset. Operations 542 * such as list traversal that depend on the actual address of the 543 * cpuset in the list must use cur below, not trial. 544 * 545 * 'trial' is the address of bulk structure copy of cur, with 546 * perhaps one or more of the fields cpus_allowed, mems_allowed, 547 * or flags changed to new, trial values. 548 * 549 * Return 0 if valid, -errno if not. 550 */ 551 552 static int validate_change(struct cpuset *cur, struct cpuset *trial) 553 { 554 struct cgroup_subsys_state *css; 555 struct cpuset *c, *par; 556 int ret; 557 558 rcu_read_lock(); 559 560 /* Each of our child cpusets must be a subset of us */ 561 ret = -EBUSY; 562 cpuset_for_each_child(c, css, cur) 563 if (!is_cpuset_subset(c, trial)) 564 goto out; 565 566 /* Remaining checks don't apply to root cpuset */ 567 ret = 0; 568 if (cur == &top_cpuset) 569 goto out; 570 571 par = parent_cs(cur); 572 573 /* On legacy hiearchy, we must be a subset of our parent cpuset. */ 574 ret = -EACCES; 575 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) 576 goto out; 577 578 /* 579 * If either I or some sibling (!= me) is exclusive, we can't 580 * overlap 581 */ 582 ret = -EINVAL; 583 cpuset_for_each_child(c, css, par) { 584 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 585 c != cur && 586 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) 587 goto out; 588 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 589 c != cur && 590 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 591 goto out; 592 } 593 594 /* 595 * Cpusets with tasks - existing or newly being attached - can't 596 * be changed to have empty cpus_allowed or mems_allowed. 597 */ 598 ret = -ENOSPC; 599 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 600 if (!cpumask_empty(cur->cpus_allowed) && 601 cpumask_empty(trial->cpus_allowed)) 602 goto out; 603 if (!nodes_empty(cur->mems_allowed) && 604 nodes_empty(trial->mems_allowed)) 605 goto out; 606 } 607 608 /* 609 * We can't shrink if we won't have enough room for SCHED_DEADLINE 610 * tasks. 611 */ 612 ret = -EBUSY; 613 if (is_cpu_exclusive(cur) && 614 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 615 trial->cpus_allowed)) 616 goto out; 617 618 ret = 0; 619 out: 620 rcu_read_unlock(); 621 return ret; 622 } 623 624 #ifdef CONFIG_SMP 625 /* 626 * Helper routine for generate_sched_domains(). 627 * Do cpusets a, b have overlapping effective cpus_allowed masks? 628 */ 629 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 630 { 631 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 632 } 633 634 static void 635 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 636 { 637 if (dattr->relax_domain_level < c->relax_domain_level) 638 dattr->relax_domain_level = c->relax_domain_level; 639 return; 640 } 641 642 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 643 struct cpuset *root_cs) 644 { 645 struct cpuset *cp; 646 struct cgroup_subsys_state *pos_css; 647 648 rcu_read_lock(); 649 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 650 /* skip the whole subtree if @cp doesn't have any CPU */ 651 if (cpumask_empty(cp->cpus_allowed)) { 652 pos_css = css_rightmost_descendant(pos_css); 653 continue; 654 } 655 656 if (is_sched_load_balance(cp)) 657 update_domain_attr(dattr, cp); 658 } 659 rcu_read_unlock(); 660 } 661 662 /* Must be called with cpuset_mutex held. */ 663 static inline int nr_cpusets(void) 664 { 665 /* jump label reference count + the top-level cpuset */ 666 return static_key_count(&cpusets_enabled_key.key) + 1; 667 } 668 669 /* 670 * generate_sched_domains() 671 * 672 * This function builds a partial partition of the systems CPUs 673 * A 'partial partition' is a set of non-overlapping subsets whose 674 * union is a subset of that set. 675 * The output of this function needs to be passed to kernel/sched/core.c 676 * partition_sched_domains() routine, which will rebuild the scheduler's 677 * load balancing domains (sched domains) as specified by that partial 678 * partition. 679 * 680 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst 681 * for a background explanation of this. 682 * 683 * Does not return errors, on the theory that the callers of this 684 * routine would rather not worry about failures to rebuild sched 685 * domains when operating in the severe memory shortage situations 686 * that could cause allocation failures below. 687 * 688 * Must be called with cpuset_mutex held. 689 * 690 * The three key local variables below are: 691 * cp - cpuset pointer, used (together with pos_css) to perform a 692 * top-down scan of all cpusets. For our purposes, rebuilding 693 * the schedulers sched domains, we can ignore !is_sched_load_ 694 * balance cpusets. 695 * csa - (for CpuSet Array) Array of pointers to all the cpusets 696 * that need to be load balanced, for convenient iterative 697 * access by the subsequent code that finds the best partition, 698 * i.e the set of domains (subsets) of CPUs such that the 699 * cpus_allowed of every cpuset marked is_sched_load_balance 700 * is a subset of one of these domains, while there are as 701 * many such domains as possible, each as small as possible. 702 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 703 * the kernel/sched/core.c routine partition_sched_domains() in a 704 * convenient format, that can be easily compared to the prior 705 * value to determine what partition elements (sched domains) 706 * were changed (added or removed.) 707 * 708 * Finding the best partition (set of domains): 709 * The triple nested loops below over i, j, k scan over the 710 * load balanced cpusets (using the array of cpuset pointers in 711 * csa[]) looking for pairs of cpusets that have overlapping 712 * cpus_allowed, but which don't have the same 'pn' partition 713 * number and gives them in the same partition number. It keeps 714 * looping on the 'restart' label until it can no longer find 715 * any such pairs. 716 * 717 * The union of the cpus_allowed masks from the set of 718 * all cpusets having the same 'pn' value then form the one 719 * element of the partition (one sched domain) to be passed to 720 * partition_sched_domains(). 721 */ 722 static int generate_sched_domains(cpumask_var_t **domains, 723 struct sched_domain_attr **attributes) 724 { 725 struct cpuset *cp; /* top-down scan of cpusets */ 726 struct cpuset **csa; /* array of all cpuset ptrs */ 727 int csn; /* how many cpuset ptrs in csa so far */ 728 int i, j, k; /* indices for partition finding loops */ 729 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 730 struct sched_domain_attr *dattr; /* attributes for custom domains */ 731 int ndoms = 0; /* number of sched domains in result */ 732 int nslot; /* next empty doms[] struct cpumask slot */ 733 struct cgroup_subsys_state *pos_css; 734 bool root_load_balance = is_sched_load_balance(&top_cpuset); 735 736 doms = NULL; 737 dattr = NULL; 738 csa = NULL; 739 740 /* Special case for the 99% of systems with one, full, sched domain */ 741 if (root_load_balance && !top_cpuset.nr_subparts_cpus) { 742 ndoms = 1; 743 doms = alloc_sched_domains(ndoms); 744 if (!doms) 745 goto done; 746 747 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 748 if (dattr) { 749 *dattr = SD_ATTR_INIT; 750 update_domain_attr_tree(dattr, &top_cpuset); 751 } 752 cpumask_and(doms[0], top_cpuset.effective_cpus, 753 housekeeping_cpumask(HK_FLAG_DOMAIN)); 754 755 goto done; 756 } 757 758 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 759 if (!csa) 760 goto done; 761 csn = 0; 762 763 rcu_read_lock(); 764 if (root_load_balance) 765 csa[csn++] = &top_cpuset; 766 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 767 if (cp == &top_cpuset) 768 continue; 769 /* 770 * Continue traversing beyond @cp iff @cp has some CPUs and 771 * isn't load balancing. The former is obvious. The 772 * latter: All child cpusets contain a subset of the 773 * parent's cpus, so just skip them, and then we call 774 * update_domain_attr_tree() to calc relax_domain_level of 775 * the corresponding sched domain. 776 * 777 * If root is load-balancing, we can skip @cp if it 778 * is a subset of the root's effective_cpus. 779 */ 780 if (!cpumask_empty(cp->cpus_allowed) && 781 !(is_sched_load_balance(cp) && 782 cpumask_intersects(cp->cpus_allowed, 783 housekeeping_cpumask(HK_FLAG_DOMAIN)))) 784 continue; 785 786 if (root_load_balance && 787 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) 788 continue; 789 790 if (is_sched_load_balance(cp)) 791 csa[csn++] = cp; 792 793 /* skip @cp's subtree if not a partition root */ 794 if (!is_partition_root(cp)) 795 pos_css = css_rightmost_descendant(pos_css); 796 } 797 rcu_read_unlock(); 798 799 for (i = 0; i < csn; i++) 800 csa[i]->pn = i; 801 ndoms = csn; 802 803 restart: 804 /* Find the best partition (set of sched domains) */ 805 for (i = 0; i < csn; i++) { 806 struct cpuset *a = csa[i]; 807 int apn = a->pn; 808 809 for (j = 0; j < csn; j++) { 810 struct cpuset *b = csa[j]; 811 int bpn = b->pn; 812 813 if (apn != bpn && cpusets_overlap(a, b)) { 814 for (k = 0; k < csn; k++) { 815 struct cpuset *c = csa[k]; 816 817 if (c->pn == bpn) 818 c->pn = apn; 819 } 820 ndoms--; /* one less element */ 821 goto restart; 822 } 823 } 824 } 825 826 /* 827 * Now we know how many domains to create. 828 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 829 */ 830 doms = alloc_sched_domains(ndoms); 831 if (!doms) 832 goto done; 833 834 /* 835 * The rest of the code, including the scheduler, can deal with 836 * dattr==NULL case. No need to abort if alloc fails. 837 */ 838 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 839 GFP_KERNEL); 840 841 for (nslot = 0, i = 0; i < csn; i++) { 842 struct cpuset *a = csa[i]; 843 struct cpumask *dp; 844 int apn = a->pn; 845 846 if (apn < 0) { 847 /* Skip completed partitions */ 848 continue; 849 } 850 851 dp = doms[nslot]; 852 853 if (nslot == ndoms) { 854 static int warnings = 10; 855 if (warnings) { 856 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 857 nslot, ndoms, csn, i, apn); 858 warnings--; 859 } 860 continue; 861 } 862 863 cpumask_clear(dp); 864 if (dattr) 865 *(dattr + nslot) = SD_ATTR_INIT; 866 for (j = i; j < csn; j++) { 867 struct cpuset *b = csa[j]; 868 869 if (apn == b->pn) { 870 cpumask_or(dp, dp, b->effective_cpus); 871 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); 872 if (dattr) 873 update_domain_attr_tree(dattr + nslot, b); 874 875 /* Done with this partition */ 876 b->pn = -1; 877 } 878 } 879 nslot++; 880 } 881 BUG_ON(nslot != ndoms); 882 883 done: 884 kfree(csa); 885 886 /* 887 * Fallback to the default domain if kmalloc() failed. 888 * See comments in partition_sched_domains(). 889 */ 890 if (doms == NULL) 891 ndoms = 1; 892 893 *domains = doms; 894 *attributes = dattr; 895 return ndoms; 896 } 897 898 static void update_tasks_root_domain(struct cpuset *cs) 899 { 900 struct css_task_iter it; 901 struct task_struct *task; 902 903 css_task_iter_start(&cs->css, 0, &it); 904 905 while ((task = css_task_iter_next(&it))) 906 dl_add_task_root_domain(task); 907 908 css_task_iter_end(&it); 909 } 910 911 static void rebuild_root_domains(void) 912 { 913 struct cpuset *cs = NULL; 914 struct cgroup_subsys_state *pos_css; 915 916 lockdep_assert_held(&cpuset_mutex); 917 lockdep_assert_cpus_held(); 918 lockdep_assert_held(&sched_domains_mutex); 919 920 cgroup_enable_task_cg_lists(); 921 922 rcu_read_lock(); 923 924 /* 925 * Clear default root domain DL accounting, it will be computed again 926 * if a task belongs to it. 927 */ 928 dl_clear_root_domain(&def_root_domain); 929 930 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 931 932 if (cpumask_empty(cs->effective_cpus)) { 933 pos_css = css_rightmost_descendant(pos_css); 934 continue; 935 } 936 937 css_get(&cs->css); 938 939 rcu_read_unlock(); 940 941 update_tasks_root_domain(cs); 942 943 rcu_read_lock(); 944 css_put(&cs->css); 945 } 946 rcu_read_unlock(); 947 } 948 949 static void 950 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], 951 struct sched_domain_attr *dattr_new) 952 { 953 mutex_lock(&sched_domains_mutex); 954 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); 955 rebuild_root_domains(); 956 mutex_unlock(&sched_domains_mutex); 957 } 958 959 /* 960 * Rebuild scheduler domains. 961 * 962 * If the flag 'sched_load_balance' of any cpuset with non-empty 963 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 964 * which has that flag enabled, or if any cpuset with a non-empty 965 * 'cpus' is removed, then call this routine to rebuild the 966 * scheduler's dynamic sched domains. 967 * 968 * Call with cpuset_mutex held. Takes get_online_cpus(). 969 */ 970 static void rebuild_sched_domains_locked(void) 971 { 972 struct sched_domain_attr *attr; 973 cpumask_var_t *doms; 974 int ndoms; 975 976 lockdep_assert_held(&cpuset_mutex); 977 get_online_cpus(); 978 979 /* 980 * We have raced with CPU hotplug. Don't do anything to avoid 981 * passing doms with offlined cpu to partition_sched_domains(). 982 * Anyways, hotplug work item will rebuild sched domains. 983 */ 984 if (!top_cpuset.nr_subparts_cpus && 985 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 986 goto out; 987 988 if (top_cpuset.nr_subparts_cpus && 989 !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask)) 990 goto out; 991 992 /* Generate domain masks and attrs */ 993 ndoms = generate_sched_domains(&doms, &attr); 994 995 /* Have scheduler rebuild the domains */ 996 partition_and_rebuild_sched_domains(ndoms, doms, attr); 997 out: 998 put_online_cpus(); 999 } 1000 #else /* !CONFIG_SMP */ 1001 static void rebuild_sched_domains_locked(void) 1002 { 1003 } 1004 #endif /* CONFIG_SMP */ 1005 1006 void rebuild_sched_domains(void) 1007 { 1008 mutex_lock(&cpuset_mutex); 1009 rebuild_sched_domains_locked(); 1010 mutex_unlock(&cpuset_mutex); 1011 } 1012 1013 /** 1014 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 1015 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 1016 * 1017 * Iterate through each task of @cs updating its cpus_allowed to the 1018 * effective cpuset's. As this function is called with cpuset_mutex held, 1019 * cpuset membership stays stable. 1020 */ 1021 static void update_tasks_cpumask(struct cpuset *cs) 1022 { 1023 struct css_task_iter it; 1024 struct task_struct *task; 1025 1026 css_task_iter_start(&cs->css, 0, &it); 1027 while ((task = css_task_iter_next(&it))) 1028 set_cpus_allowed_ptr(task, cs->effective_cpus); 1029 css_task_iter_end(&it); 1030 } 1031 1032 /** 1033 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1034 * @new_cpus: the temp variable for the new effective_cpus mask 1035 * @cs: the cpuset the need to recompute the new effective_cpus mask 1036 * @parent: the parent cpuset 1037 * 1038 * If the parent has subpartition CPUs, include them in the list of 1039 * allowable CPUs in computing the new effective_cpus mask. Since offlined 1040 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask 1041 * to mask those out. 1042 */ 1043 static void compute_effective_cpumask(struct cpumask *new_cpus, 1044 struct cpuset *cs, struct cpuset *parent) 1045 { 1046 if (parent->nr_subparts_cpus) { 1047 cpumask_or(new_cpus, parent->effective_cpus, 1048 parent->subparts_cpus); 1049 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); 1050 cpumask_and(new_cpus, new_cpus, cpu_active_mask); 1051 } else { 1052 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1053 } 1054 } 1055 1056 /* 1057 * Commands for update_parent_subparts_cpumask 1058 */ 1059 enum subparts_cmd { 1060 partcmd_enable, /* Enable partition root */ 1061 partcmd_disable, /* Disable partition root */ 1062 partcmd_update, /* Update parent's subparts_cpus */ 1063 }; 1064 1065 /** 1066 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset 1067 * @cpuset: The cpuset that requests change in partition root state 1068 * @cmd: Partition root state change command 1069 * @newmask: Optional new cpumask for partcmd_update 1070 * @tmp: Temporary addmask and delmask 1071 * Return: 0, 1 or an error code 1072 * 1073 * For partcmd_enable, the cpuset is being transformed from a non-partition 1074 * root to a partition root. The cpus_allowed mask of the given cpuset will 1075 * be put into parent's subparts_cpus and taken away from parent's 1076 * effective_cpus. The function will return 0 if all the CPUs listed in 1077 * cpus_allowed can be granted or an error code will be returned. 1078 * 1079 * For partcmd_disable, the cpuset is being transofrmed from a partition 1080 * root back to a non-partition root. any CPUs in cpus_allowed that are in 1081 * parent's subparts_cpus will be taken away from that cpumask and put back 1082 * into parent's effective_cpus. 0 should always be returned. 1083 * 1084 * For partcmd_update, if the optional newmask is specified, the cpu 1085 * list is to be changed from cpus_allowed to newmask. Otherwise, 1086 * cpus_allowed is assumed to remain the same. The cpuset should either 1087 * be a partition root or an invalid partition root. The partition root 1088 * state may change if newmask is NULL and none of the requested CPUs can 1089 * be granted by the parent. The function will return 1 if changes to 1090 * parent's subparts_cpus and effective_cpus happen or 0 otherwise. 1091 * Error code should only be returned when newmask is non-NULL. 1092 * 1093 * The partcmd_enable and partcmd_disable commands are used by 1094 * update_prstate(). The partcmd_update command is used by 1095 * update_cpumasks_hier() with newmask NULL and update_cpumask() with 1096 * newmask set. 1097 * 1098 * The checking is more strict when enabling partition root than the 1099 * other two commands. 1100 * 1101 * Because of the implicit cpu exclusive nature of a partition root, 1102 * cpumask changes that violates the cpu exclusivity rule will not be 1103 * permitted when checked by validate_change(). The validate_change() 1104 * function will also prevent any changes to the cpu list if it is not 1105 * a superset of children's cpu lists. 1106 */ 1107 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, 1108 struct cpumask *newmask, 1109 struct tmpmasks *tmp) 1110 { 1111 struct cpuset *parent = parent_cs(cpuset); 1112 int adding; /* Moving cpus from effective_cpus to subparts_cpus */ 1113 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ 1114 bool part_error = false; /* Partition error? */ 1115 1116 lockdep_assert_held(&cpuset_mutex); 1117 1118 /* 1119 * The parent must be a partition root. 1120 * The new cpumask, if present, or the current cpus_allowed must 1121 * not be empty. 1122 */ 1123 if (!is_partition_root(parent) || 1124 (newmask && cpumask_empty(newmask)) || 1125 (!newmask && cpumask_empty(cpuset->cpus_allowed))) 1126 return -EINVAL; 1127 1128 /* 1129 * Enabling/disabling partition root is not allowed if there are 1130 * online children. 1131 */ 1132 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) 1133 return -EBUSY; 1134 1135 /* 1136 * Enabling partition root is not allowed if not all the CPUs 1137 * can be granted from parent's effective_cpus or at least one 1138 * CPU will be left after that. 1139 */ 1140 if ((cmd == partcmd_enable) && 1141 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || 1142 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) 1143 return -EINVAL; 1144 1145 /* 1146 * A cpumask update cannot make parent's effective_cpus become empty. 1147 */ 1148 adding = deleting = false; 1149 if (cmd == partcmd_enable) { 1150 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); 1151 adding = true; 1152 } else if (cmd == partcmd_disable) { 1153 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1154 parent->subparts_cpus); 1155 } else if (newmask) { 1156 /* 1157 * partcmd_update with newmask: 1158 * 1159 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus 1160 * addmask = newmask & parent->effective_cpus 1161 * & ~parent->subparts_cpus 1162 */ 1163 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); 1164 deleting = cpumask_and(tmp->delmask, tmp->delmask, 1165 parent->subparts_cpus); 1166 1167 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); 1168 adding = cpumask_andnot(tmp->addmask, tmp->addmask, 1169 parent->subparts_cpus); 1170 /* 1171 * Return error if the new effective_cpus could become empty. 1172 */ 1173 if (adding && 1174 cpumask_equal(parent->effective_cpus, tmp->addmask)) { 1175 if (!deleting) 1176 return -EINVAL; 1177 /* 1178 * As some of the CPUs in subparts_cpus might have 1179 * been offlined, we need to compute the real delmask 1180 * to confirm that. 1181 */ 1182 if (!cpumask_and(tmp->addmask, tmp->delmask, 1183 cpu_active_mask)) 1184 return -EINVAL; 1185 cpumask_copy(tmp->addmask, parent->effective_cpus); 1186 } 1187 } else { 1188 /* 1189 * partcmd_update w/o newmask: 1190 * 1191 * addmask = cpus_allowed & parent->effectiveb_cpus 1192 * 1193 * Note that parent's subparts_cpus may have been 1194 * pre-shrunk in case there is a change in the cpu list. 1195 * So no deletion is needed. 1196 */ 1197 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, 1198 parent->effective_cpus); 1199 part_error = cpumask_equal(tmp->addmask, 1200 parent->effective_cpus); 1201 } 1202 1203 if (cmd == partcmd_update) { 1204 int prev_prs = cpuset->partition_root_state; 1205 1206 /* 1207 * Check for possible transition between PRS_ENABLED 1208 * and PRS_ERROR. 1209 */ 1210 switch (cpuset->partition_root_state) { 1211 case PRS_ENABLED: 1212 if (part_error) 1213 cpuset->partition_root_state = PRS_ERROR; 1214 break; 1215 case PRS_ERROR: 1216 if (!part_error) 1217 cpuset->partition_root_state = PRS_ENABLED; 1218 break; 1219 } 1220 /* 1221 * Set part_error if previously in invalid state. 1222 */ 1223 part_error = (prev_prs == PRS_ERROR); 1224 } 1225 1226 if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) 1227 return 0; /* Nothing need to be done */ 1228 1229 if (cpuset->partition_root_state == PRS_ERROR) { 1230 /* 1231 * Remove all its cpus from parent's subparts_cpus. 1232 */ 1233 adding = false; 1234 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1235 parent->subparts_cpus); 1236 } 1237 1238 if (!adding && !deleting) 1239 return 0; 1240 1241 /* 1242 * Change the parent's subparts_cpus. 1243 * Newly added CPUs will be removed from effective_cpus and 1244 * newly deleted ones will be added back to effective_cpus. 1245 */ 1246 spin_lock_irq(&callback_lock); 1247 if (adding) { 1248 cpumask_or(parent->subparts_cpus, 1249 parent->subparts_cpus, tmp->addmask); 1250 cpumask_andnot(parent->effective_cpus, 1251 parent->effective_cpus, tmp->addmask); 1252 } 1253 if (deleting) { 1254 cpumask_andnot(parent->subparts_cpus, 1255 parent->subparts_cpus, tmp->delmask); 1256 /* 1257 * Some of the CPUs in subparts_cpus might have been offlined. 1258 */ 1259 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); 1260 cpumask_or(parent->effective_cpus, 1261 parent->effective_cpus, tmp->delmask); 1262 } 1263 1264 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); 1265 spin_unlock_irq(&callback_lock); 1266 1267 return cmd == partcmd_update; 1268 } 1269 1270 /* 1271 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 1272 * @cs: the cpuset to consider 1273 * @tmp: temp variables for calculating effective_cpus & partition setup 1274 * 1275 * When congifured cpumask is changed, the effective cpumasks of this cpuset 1276 * and all its descendants need to be updated. 1277 * 1278 * On legacy hierachy, effective_cpus will be the same with cpu_allowed. 1279 * 1280 * Called with cpuset_mutex held 1281 */ 1282 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) 1283 { 1284 struct cpuset *cp; 1285 struct cgroup_subsys_state *pos_css; 1286 bool need_rebuild_sched_domains = false; 1287 1288 rcu_read_lock(); 1289 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1290 struct cpuset *parent = parent_cs(cp); 1291 1292 compute_effective_cpumask(tmp->new_cpus, cp, parent); 1293 1294 /* 1295 * If it becomes empty, inherit the effective mask of the 1296 * parent, which is guaranteed to have some CPUs. 1297 */ 1298 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { 1299 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 1300 if (!cp->use_parent_ecpus) { 1301 cp->use_parent_ecpus = true; 1302 parent->child_ecpus_count++; 1303 } 1304 } else if (cp->use_parent_ecpus) { 1305 cp->use_parent_ecpus = false; 1306 WARN_ON_ONCE(!parent->child_ecpus_count); 1307 parent->child_ecpus_count--; 1308 } 1309 1310 /* 1311 * Skip the whole subtree if the cpumask remains the same 1312 * and has no partition root state. 1313 */ 1314 if (!cp->partition_root_state && 1315 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { 1316 pos_css = css_rightmost_descendant(pos_css); 1317 continue; 1318 } 1319 1320 /* 1321 * update_parent_subparts_cpumask() should have been called 1322 * for cs already in update_cpumask(). We should also call 1323 * update_tasks_cpumask() again for tasks in the parent 1324 * cpuset if the parent's subparts_cpus changes. 1325 */ 1326 if ((cp != cs) && cp->partition_root_state) { 1327 switch (parent->partition_root_state) { 1328 case PRS_DISABLED: 1329 /* 1330 * If parent is not a partition root or an 1331 * invalid partition root, clear the state 1332 * state and the CS_CPU_EXCLUSIVE flag. 1333 */ 1334 WARN_ON_ONCE(cp->partition_root_state 1335 != PRS_ERROR); 1336 cp->partition_root_state = 0; 1337 1338 /* 1339 * clear_bit() is an atomic operation and 1340 * readers aren't interested in the state 1341 * of CS_CPU_EXCLUSIVE anyway. So we can 1342 * just update the flag without holding 1343 * the callback_lock. 1344 */ 1345 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); 1346 break; 1347 1348 case PRS_ENABLED: 1349 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) 1350 update_tasks_cpumask(parent); 1351 break; 1352 1353 case PRS_ERROR: 1354 /* 1355 * When parent is invalid, it has to be too. 1356 */ 1357 cp->partition_root_state = PRS_ERROR; 1358 if (cp->nr_subparts_cpus) { 1359 cp->nr_subparts_cpus = 0; 1360 cpumask_clear(cp->subparts_cpus); 1361 } 1362 break; 1363 } 1364 } 1365 1366 if (!css_tryget_online(&cp->css)) 1367 continue; 1368 rcu_read_unlock(); 1369 1370 spin_lock_irq(&callback_lock); 1371 1372 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1373 if (cp->nr_subparts_cpus && 1374 (cp->partition_root_state != PRS_ENABLED)) { 1375 cp->nr_subparts_cpus = 0; 1376 cpumask_clear(cp->subparts_cpus); 1377 } else if (cp->nr_subparts_cpus) { 1378 /* 1379 * Make sure that effective_cpus & subparts_cpus 1380 * are mutually exclusive. 1381 * 1382 * In the unlikely event that effective_cpus 1383 * becomes empty. we clear cp->nr_subparts_cpus and 1384 * let its child partition roots to compete for 1385 * CPUs again. 1386 */ 1387 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, 1388 cp->subparts_cpus); 1389 if (cpumask_empty(cp->effective_cpus)) { 1390 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1391 cpumask_clear(cp->subparts_cpus); 1392 cp->nr_subparts_cpus = 0; 1393 } else if (!cpumask_subset(cp->subparts_cpus, 1394 tmp->new_cpus)) { 1395 cpumask_andnot(cp->subparts_cpus, 1396 cp->subparts_cpus, tmp->new_cpus); 1397 cp->nr_subparts_cpus 1398 = cpumask_weight(cp->subparts_cpus); 1399 } 1400 } 1401 spin_unlock_irq(&callback_lock); 1402 1403 WARN_ON(!is_in_v2_mode() && 1404 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 1405 1406 update_tasks_cpumask(cp); 1407 1408 /* 1409 * On legacy hierarchy, if the effective cpumask of any non- 1410 * empty cpuset is changed, we need to rebuild sched domains. 1411 * On default hierarchy, the cpuset needs to be a partition 1412 * root as well. 1413 */ 1414 if (!cpumask_empty(cp->cpus_allowed) && 1415 is_sched_load_balance(cp) && 1416 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 1417 is_partition_root(cp))) 1418 need_rebuild_sched_domains = true; 1419 1420 rcu_read_lock(); 1421 css_put(&cp->css); 1422 } 1423 rcu_read_unlock(); 1424 1425 if (need_rebuild_sched_domains) 1426 rebuild_sched_domains_locked(); 1427 } 1428 1429 /** 1430 * update_sibling_cpumasks - Update siblings cpumasks 1431 * @parent: Parent cpuset 1432 * @cs: Current cpuset 1433 * @tmp: Temp variables 1434 */ 1435 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1436 struct tmpmasks *tmp) 1437 { 1438 struct cpuset *sibling; 1439 struct cgroup_subsys_state *pos_css; 1440 1441 /* 1442 * Check all its siblings and call update_cpumasks_hier() 1443 * if their use_parent_ecpus flag is set in order for them 1444 * to use the right effective_cpus value. 1445 */ 1446 rcu_read_lock(); 1447 cpuset_for_each_child(sibling, pos_css, parent) { 1448 if (sibling == cs) 1449 continue; 1450 if (!sibling->use_parent_ecpus) 1451 continue; 1452 1453 update_cpumasks_hier(sibling, tmp); 1454 } 1455 rcu_read_unlock(); 1456 } 1457 1458 /** 1459 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 1460 * @cs: the cpuset to consider 1461 * @trialcs: trial cpuset 1462 * @buf: buffer of cpu numbers written to this cpuset 1463 */ 1464 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 1465 const char *buf) 1466 { 1467 int retval; 1468 struct tmpmasks tmp; 1469 1470 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 1471 if (cs == &top_cpuset) 1472 return -EACCES; 1473 1474 /* 1475 * An empty cpus_allowed is ok only if the cpuset has no tasks. 1476 * Since cpulist_parse() fails on an empty mask, we special case 1477 * that parsing. The validate_change() call ensures that cpusets 1478 * with tasks have cpus. 1479 */ 1480 if (!*buf) { 1481 cpumask_clear(trialcs->cpus_allowed); 1482 } else { 1483 retval = cpulist_parse(buf, trialcs->cpus_allowed); 1484 if (retval < 0) 1485 return retval; 1486 1487 if (!cpumask_subset(trialcs->cpus_allowed, 1488 top_cpuset.cpus_allowed)) 1489 return -EINVAL; 1490 } 1491 1492 /* Nothing to do if the cpus didn't change */ 1493 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 1494 return 0; 1495 1496 retval = validate_change(cs, trialcs); 1497 if (retval < 0) 1498 return retval; 1499 1500 #ifdef CONFIG_CPUMASK_OFFSTACK 1501 /* 1502 * Use the cpumasks in trialcs for tmpmasks when they are pointers 1503 * to allocated cpumasks. 1504 */ 1505 tmp.addmask = trialcs->subparts_cpus; 1506 tmp.delmask = trialcs->effective_cpus; 1507 tmp.new_cpus = trialcs->cpus_allowed; 1508 #endif 1509 1510 if (cs->partition_root_state) { 1511 /* Cpumask of a partition root cannot be empty */ 1512 if (cpumask_empty(trialcs->cpus_allowed)) 1513 return -EINVAL; 1514 if (update_parent_subparts_cpumask(cs, partcmd_update, 1515 trialcs->cpus_allowed, &tmp) < 0) 1516 return -EINVAL; 1517 } 1518 1519 spin_lock_irq(&callback_lock); 1520 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 1521 1522 /* 1523 * Make sure that subparts_cpus is a subset of cpus_allowed. 1524 */ 1525 if (cs->nr_subparts_cpus) { 1526 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, 1527 cs->cpus_allowed); 1528 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); 1529 } 1530 spin_unlock_irq(&callback_lock); 1531 1532 update_cpumasks_hier(cs, &tmp); 1533 1534 if (cs->partition_root_state) { 1535 struct cpuset *parent = parent_cs(cs); 1536 1537 /* 1538 * For partition root, update the cpumasks of sibling 1539 * cpusets if they use parent's effective_cpus. 1540 */ 1541 if (parent->child_ecpus_count) 1542 update_sibling_cpumasks(parent, cs, &tmp); 1543 } 1544 return 0; 1545 } 1546 1547 /* 1548 * Migrate memory region from one set of nodes to another. This is 1549 * performed asynchronously as it can be called from process migration path 1550 * holding locks involved in process management. All mm migrations are 1551 * performed in the queued order and can be waited for by flushing 1552 * cpuset_migrate_mm_wq. 1553 */ 1554 1555 struct cpuset_migrate_mm_work { 1556 struct work_struct work; 1557 struct mm_struct *mm; 1558 nodemask_t from; 1559 nodemask_t to; 1560 }; 1561 1562 static void cpuset_migrate_mm_workfn(struct work_struct *work) 1563 { 1564 struct cpuset_migrate_mm_work *mwork = 1565 container_of(work, struct cpuset_migrate_mm_work, work); 1566 1567 /* on a wq worker, no need to worry about %current's mems_allowed */ 1568 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 1569 mmput(mwork->mm); 1570 kfree(mwork); 1571 } 1572 1573 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1574 const nodemask_t *to) 1575 { 1576 struct cpuset_migrate_mm_work *mwork; 1577 1578 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 1579 if (mwork) { 1580 mwork->mm = mm; 1581 mwork->from = *from; 1582 mwork->to = *to; 1583 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 1584 queue_work(cpuset_migrate_mm_wq, &mwork->work); 1585 } else { 1586 mmput(mm); 1587 } 1588 } 1589 1590 static void cpuset_post_attach(void) 1591 { 1592 flush_workqueue(cpuset_migrate_mm_wq); 1593 } 1594 1595 /* 1596 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 1597 * @tsk: the task to change 1598 * @newmems: new nodes that the task will be set 1599 * 1600 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 1601 * and rebind an eventual tasks' mempolicy. If the task is allocating in 1602 * parallel, it might temporarily see an empty intersection, which results in 1603 * a seqlock check and retry before OOM or allocation failure. 1604 */ 1605 static void cpuset_change_task_nodemask(struct task_struct *tsk, 1606 nodemask_t *newmems) 1607 { 1608 task_lock(tsk); 1609 1610 local_irq_disable(); 1611 write_seqcount_begin(&tsk->mems_allowed_seq); 1612 1613 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1614 mpol_rebind_task(tsk, newmems); 1615 tsk->mems_allowed = *newmems; 1616 1617 write_seqcount_end(&tsk->mems_allowed_seq); 1618 local_irq_enable(); 1619 1620 task_unlock(tsk); 1621 } 1622 1623 static void *cpuset_being_rebound; 1624 1625 /** 1626 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1627 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1628 * 1629 * Iterate through each task of @cs updating its mems_allowed to the 1630 * effective cpuset's. As this function is called with cpuset_mutex held, 1631 * cpuset membership stays stable. 1632 */ 1633 static void update_tasks_nodemask(struct cpuset *cs) 1634 { 1635 static nodemask_t newmems; /* protected by cpuset_mutex */ 1636 struct css_task_iter it; 1637 struct task_struct *task; 1638 1639 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1640 1641 guarantee_online_mems(cs, &newmems); 1642 1643 /* 1644 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't 1645 * take while holding tasklist_lock. Forks can happen - the 1646 * mpol_dup() cpuset_being_rebound check will catch such forks, 1647 * and rebind their vma mempolicies too. Because we still hold 1648 * the global cpuset_mutex, we know that no other rebind effort 1649 * will be contending for the global variable cpuset_being_rebound. 1650 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1651 * is idempotent. Also migrate pages in each mm to new nodes. 1652 */ 1653 css_task_iter_start(&cs->css, 0, &it); 1654 while ((task = css_task_iter_next(&it))) { 1655 struct mm_struct *mm; 1656 bool migrate; 1657 1658 cpuset_change_task_nodemask(task, &newmems); 1659 1660 mm = get_task_mm(task); 1661 if (!mm) 1662 continue; 1663 1664 migrate = is_memory_migrate(cs); 1665 1666 mpol_rebind_mm(mm, &cs->mems_allowed); 1667 if (migrate) 1668 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 1669 else 1670 mmput(mm); 1671 } 1672 css_task_iter_end(&it); 1673 1674 /* 1675 * All the tasks' nodemasks have been updated, update 1676 * cs->old_mems_allowed. 1677 */ 1678 cs->old_mems_allowed = newmems; 1679 1680 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 1681 cpuset_being_rebound = NULL; 1682 } 1683 1684 /* 1685 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 1686 * @cs: the cpuset to consider 1687 * @new_mems: a temp variable for calculating new effective_mems 1688 * 1689 * When configured nodemask is changed, the effective nodemasks of this cpuset 1690 * and all its descendants need to be updated. 1691 * 1692 * On legacy hiearchy, effective_mems will be the same with mems_allowed. 1693 * 1694 * Called with cpuset_mutex held 1695 */ 1696 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 1697 { 1698 struct cpuset *cp; 1699 struct cgroup_subsys_state *pos_css; 1700 1701 rcu_read_lock(); 1702 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1703 struct cpuset *parent = parent_cs(cp); 1704 1705 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 1706 1707 /* 1708 * If it becomes empty, inherit the effective mask of the 1709 * parent, which is guaranteed to have some MEMs. 1710 */ 1711 if (is_in_v2_mode() && nodes_empty(*new_mems)) 1712 *new_mems = parent->effective_mems; 1713 1714 /* Skip the whole subtree if the nodemask remains the same. */ 1715 if (nodes_equal(*new_mems, cp->effective_mems)) { 1716 pos_css = css_rightmost_descendant(pos_css); 1717 continue; 1718 } 1719 1720 if (!css_tryget_online(&cp->css)) 1721 continue; 1722 rcu_read_unlock(); 1723 1724 spin_lock_irq(&callback_lock); 1725 cp->effective_mems = *new_mems; 1726 spin_unlock_irq(&callback_lock); 1727 1728 WARN_ON(!is_in_v2_mode() && 1729 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 1730 1731 update_tasks_nodemask(cp); 1732 1733 rcu_read_lock(); 1734 css_put(&cp->css); 1735 } 1736 rcu_read_unlock(); 1737 } 1738 1739 /* 1740 * Handle user request to change the 'mems' memory placement 1741 * of a cpuset. Needs to validate the request, update the 1742 * cpusets mems_allowed, and for each task in the cpuset, 1743 * update mems_allowed and rebind task's mempolicy and any vma 1744 * mempolicies and if the cpuset is marked 'memory_migrate', 1745 * migrate the tasks pages to the new memory. 1746 * 1747 * Call with cpuset_mutex held. May take callback_lock during call. 1748 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1749 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 1750 * their mempolicies to the cpusets new mems_allowed. 1751 */ 1752 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 1753 const char *buf) 1754 { 1755 int retval; 1756 1757 /* 1758 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 1759 * it's read-only 1760 */ 1761 if (cs == &top_cpuset) { 1762 retval = -EACCES; 1763 goto done; 1764 } 1765 1766 /* 1767 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 1768 * Since nodelist_parse() fails on an empty mask, we special case 1769 * that parsing. The validate_change() call ensures that cpusets 1770 * with tasks have memory. 1771 */ 1772 if (!*buf) { 1773 nodes_clear(trialcs->mems_allowed); 1774 } else { 1775 retval = nodelist_parse(buf, trialcs->mems_allowed); 1776 if (retval < 0) 1777 goto done; 1778 1779 if (!nodes_subset(trialcs->mems_allowed, 1780 top_cpuset.mems_allowed)) { 1781 retval = -EINVAL; 1782 goto done; 1783 } 1784 } 1785 1786 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 1787 retval = 0; /* Too easy - nothing to do */ 1788 goto done; 1789 } 1790 retval = validate_change(cs, trialcs); 1791 if (retval < 0) 1792 goto done; 1793 1794 spin_lock_irq(&callback_lock); 1795 cs->mems_allowed = trialcs->mems_allowed; 1796 spin_unlock_irq(&callback_lock); 1797 1798 /* use trialcs->mems_allowed as a temp variable */ 1799 update_nodemasks_hier(cs, &trialcs->mems_allowed); 1800 done: 1801 return retval; 1802 } 1803 1804 bool current_cpuset_is_being_rebound(void) 1805 { 1806 bool ret; 1807 1808 rcu_read_lock(); 1809 ret = task_cs(current) == cpuset_being_rebound; 1810 rcu_read_unlock(); 1811 1812 return ret; 1813 } 1814 1815 static int update_relax_domain_level(struct cpuset *cs, s64 val) 1816 { 1817 #ifdef CONFIG_SMP 1818 if (val < -1 || val >= sched_domain_level_max) 1819 return -EINVAL; 1820 #endif 1821 1822 if (val != cs->relax_domain_level) { 1823 cs->relax_domain_level = val; 1824 if (!cpumask_empty(cs->cpus_allowed) && 1825 is_sched_load_balance(cs)) 1826 rebuild_sched_domains_locked(); 1827 } 1828 1829 return 0; 1830 } 1831 1832 /** 1833 * update_tasks_flags - update the spread flags of tasks in the cpuset. 1834 * @cs: the cpuset in which each task's spread flags needs to be changed 1835 * 1836 * Iterate through each task of @cs updating its spread flags. As this 1837 * function is called with cpuset_mutex held, cpuset membership stays 1838 * stable. 1839 */ 1840 static void update_tasks_flags(struct cpuset *cs) 1841 { 1842 struct css_task_iter it; 1843 struct task_struct *task; 1844 1845 css_task_iter_start(&cs->css, 0, &it); 1846 while ((task = css_task_iter_next(&it))) 1847 cpuset_update_task_spread_flag(cs, task); 1848 css_task_iter_end(&it); 1849 } 1850 1851 /* 1852 * update_flag - read a 0 or a 1 in a file and update associated flag 1853 * bit: the bit to update (see cpuset_flagbits_t) 1854 * cs: the cpuset to update 1855 * turning_on: whether the flag is being set or cleared 1856 * 1857 * Call with cpuset_mutex held. 1858 */ 1859 1860 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1861 int turning_on) 1862 { 1863 struct cpuset *trialcs; 1864 int balance_flag_changed; 1865 int spread_flag_changed; 1866 int err; 1867 1868 trialcs = alloc_trial_cpuset(cs); 1869 if (!trialcs) 1870 return -ENOMEM; 1871 1872 if (turning_on) 1873 set_bit(bit, &trialcs->flags); 1874 else 1875 clear_bit(bit, &trialcs->flags); 1876 1877 err = validate_change(cs, trialcs); 1878 if (err < 0) 1879 goto out; 1880 1881 balance_flag_changed = (is_sched_load_balance(cs) != 1882 is_sched_load_balance(trialcs)); 1883 1884 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 1885 || (is_spread_page(cs) != is_spread_page(trialcs))); 1886 1887 spin_lock_irq(&callback_lock); 1888 cs->flags = trialcs->flags; 1889 spin_unlock_irq(&callback_lock); 1890 1891 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1892 rebuild_sched_domains_locked(); 1893 1894 if (spread_flag_changed) 1895 update_tasks_flags(cs); 1896 out: 1897 free_cpuset(trialcs); 1898 return err; 1899 } 1900 1901 /* 1902 * update_prstate - update partititon_root_state 1903 * cs: the cpuset to update 1904 * val: 0 - disabled, 1 - enabled 1905 * 1906 * Call with cpuset_mutex held. 1907 */ 1908 static int update_prstate(struct cpuset *cs, int val) 1909 { 1910 int err; 1911 struct cpuset *parent = parent_cs(cs); 1912 struct tmpmasks tmp; 1913 1914 if ((val != 0) && (val != 1)) 1915 return -EINVAL; 1916 if (val == cs->partition_root_state) 1917 return 0; 1918 1919 /* 1920 * Cannot force a partial or invalid partition root to a full 1921 * partition root. 1922 */ 1923 if (val && cs->partition_root_state) 1924 return -EINVAL; 1925 1926 if (alloc_cpumasks(NULL, &tmp)) 1927 return -ENOMEM; 1928 1929 err = -EINVAL; 1930 if (!cs->partition_root_state) { 1931 /* 1932 * Turning on partition root requires setting the 1933 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed 1934 * cannot be NULL. 1935 */ 1936 if (cpumask_empty(cs->cpus_allowed)) 1937 goto out; 1938 1939 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); 1940 if (err) 1941 goto out; 1942 1943 err = update_parent_subparts_cpumask(cs, partcmd_enable, 1944 NULL, &tmp); 1945 if (err) { 1946 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1947 goto out; 1948 } 1949 cs->partition_root_state = PRS_ENABLED; 1950 } else { 1951 /* 1952 * Turning off partition root will clear the 1953 * CS_CPU_EXCLUSIVE bit. 1954 */ 1955 if (cs->partition_root_state == PRS_ERROR) { 1956 cs->partition_root_state = 0; 1957 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1958 err = 0; 1959 goto out; 1960 } 1961 1962 err = update_parent_subparts_cpumask(cs, partcmd_disable, 1963 NULL, &tmp); 1964 if (err) 1965 goto out; 1966 1967 cs->partition_root_state = 0; 1968 1969 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 1970 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1971 } 1972 1973 /* 1974 * Update cpumask of parent's tasks except when it is the top 1975 * cpuset as some system daemons cannot be mapped to other CPUs. 1976 */ 1977 if (parent != &top_cpuset) 1978 update_tasks_cpumask(parent); 1979 1980 if (parent->child_ecpus_count) 1981 update_sibling_cpumasks(parent, cs, &tmp); 1982 1983 rebuild_sched_domains_locked(); 1984 out: 1985 free_cpumasks(NULL, &tmp); 1986 return err; 1987 } 1988 1989 /* 1990 * Frequency meter - How fast is some event occurring? 1991 * 1992 * These routines manage a digitally filtered, constant time based, 1993 * event frequency meter. There are four routines: 1994 * fmeter_init() - initialize a frequency meter. 1995 * fmeter_markevent() - called each time the event happens. 1996 * fmeter_getrate() - returns the recent rate of such events. 1997 * fmeter_update() - internal routine used to update fmeter. 1998 * 1999 * A common data structure is passed to each of these routines, 2000 * which is used to keep track of the state required to manage the 2001 * frequency meter and its digital filter. 2002 * 2003 * The filter works on the number of events marked per unit time. 2004 * The filter is single-pole low-pass recursive (IIR). The time unit 2005 * is 1 second. Arithmetic is done using 32-bit integers scaled to 2006 * simulate 3 decimal digits of precision (multiplied by 1000). 2007 * 2008 * With an FM_COEF of 933, and a time base of 1 second, the filter 2009 * has a half-life of 10 seconds, meaning that if the events quit 2010 * happening, then the rate returned from the fmeter_getrate() 2011 * will be cut in half each 10 seconds, until it converges to zero. 2012 * 2013 * It is not worth doing a real infinitely recursive filter. If more 2014 * than FM_MAXTICKS ticks have elapsed since the last filter event, 2015 * just compute FM_MAXTICKS ticks worth, by which point the level 2016 * will be stable. 2017 * 2018 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 2019 * arithmetic overflow in the fmeter_update() routine. 2020 * 2021 * Given the simple 32 bit integer arithmetic used, this meter works 2022 * best for reporting rates between one per millisecond (msec) and 2023 * one per 32 (approx) seconds. At constant rates faster than one 2024 * per msec it maxes out at values just under 1,000,000. At constant 2025 * rates between one per msec, and one per second it will stabilize 2026 * to a value N*1000, where N is the rate of events per second. 2027 * At constant rates between one per second and one per 32 seconds, 2028 * it will be choppy, moving up on the seconds that have an event, 2029 * and then decaying until the next event. At rates slower than 2030 * about one in 32 seconds, it decays all the way back to zero between 2031 * each event. 2032 */ 2033 2034 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 2035 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 2036 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 2037 #define FM_SCALE 1000 /* faux fixed point scale */ 2038 2039 /* Initialize a frequency meter */ 2040 static void fmeter_init(struct fmeter *fmp) 2041 { 2042 fmp->cnt = 0; 2043 fmp->val = 0; 2044 fmp->time = 0; 2045 spin_lock_init(&fmp->lock); 2046 } 2047 2048 /* Internal meter update - process cnt events and update value */ 2049 static void fmeter_update(struct fmeter *fmp) 2050 { 2051 time64_t now; 2052 u32 ticks; 2053 2054 now = ktime_get_seconds(); 2055 ticks = now - fmp->time; 2056 2057 if (ticks == 0) 2058 return; 2059 2060 ticks = min(FM_MAXTICKS, ticks); 2061 while (ticks-- > 0) 2062 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 2063 fmp->time = now; 2064 2065 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 2066 fmp->cnt = 0; 2067 } 2068 2069 /* Process any previous ticks, then bump cnt by one (times scale). */ 2070 static void fmeter_markevent(struct fmeter *fmp) 2071 { 2072 spin_lock(&fmp->lock); 2073 fmeter_update(fmp); 2074 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 2075 spin_unlock(&fmp->lock); 2076 } 2077 2078 /* Process any previous ticks, then return current value. */ 2079 static int fmeter_getrate(struct fmeter *fmp) 2080 { 2081 int val; 2082 2083 spin_lock(&fmp->lock); 2084 fmeter_update(fmp); 2085 val = fmp->val; 2086 spin_unlock(&fmp->lock); 2087 return val; 2088 } 2089 2090 static struct cpuset *cpuset_attach_old_cs; 2091 2092 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 2093 static int cpuset_can_attach(struct cgroup_taskset *tset) 2094 { 2095 struct cgroup_subsys_state *css; 2096 struct cpuset *cs; 2097 struct task_struct *task; 2098 int ret; 2099 2100 /* used later by cpuset_attach() */ 2101 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 2102 cs = css_cs(css); 2103 2104 mutex_lock(&cpuset_mutex); 2105 2106 /* allow moving tasks into an empty cpuset if on default hierarchy */ 2107 ret = -ENOSPC; 2108 if (!is_in_v2_mode() && 2109 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 2110 goto out_unlock; 2111 2112 cgroup_taskset_for_each(task, css, tset) { 2113 ret = task_can_attach(task, cs->cpus_allowed); 2114 if (ret) 2115 goto out_unlock; 2116 ret = security_task_setscheduler(task); 2117 if (ret) 2118 goto out_unlock; 2119 } 2120 2121 /* 2122 * Mark attach is in progress. This makes validate_change() fail 2123 * changes which zero cpus/mems_allowed. 2124 */ 2125 cs->attach_in_progress++; 2126 ret = 0; 2127 out_unlock: 2128 mutex_unlock(&cpuset_mutex); 2129 return ret; 2130 } 2131 2132 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 2133 { 2134 struct cgroup_subsys_state *css; 2135 2136 cgroup_taskset_first(tset, &css); 2137 2138 mutex_lock(&cpuset_mutex); 2139 css_cs(css)->attach_in_progress--; 2140 mutex_unlock(&cpuset_mutex); 2141 } 2142 2143 /* 2144 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() 2145 * but we can't allocate it dynamically there. Define it global and 2146 * allocate from cpuset_init(). 2147 */ 2148 static cpumask_var_t cpus_attach; 2149 2150 static void cpuset_attach(struct cgroup_taskset *tset) 2151 { 2152 /* static buf protected by cpuset_mutex */ 2153 static nodemask_t cpuset_attach_nodemask_to; 2154 struct task_struct *task; 2155 struct task_struct *leader; 2156 struct cgroup_subsys_state *css; 2157 struct cpuset *cs; 2158 struct cpuset *oldcs = cpuset_attach_old_cs; 2159 2160 cgroup_taskset_first(tset, &css); 2161 cs = css_cs(css); 2162 2163 mutex_lock(&cpuset_mutex); 2164 2165 /* prepare for attach */ 2166 if (cs == &top_cpuset) 2167 cpumask_copy(cpus_attach, cpu_possible_mask); 2168 else 2169 guarantee_online_cpus(cs, cpus_attach); 2170 2171 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 2172 2173 cgroup_taskset_for_each(task, css, tset) { 2174 /* 2175 * can_attach beforehand should guarantee that this doesn't 2176 * fail. TODO: have a better way to handle failure here 2177 */ 2178 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 2179 2180 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 2181 cpuset_update_task_spread_flag(cs, task); 2182 } 2183 2184 /* 2185 * Change mm for all threadgroup leaders. This is expensive and may 2186 * sleep and should be moved outside migration path proper. 2187 */ 2188 cpuset_attach_nodemask_to = cs->effective_mems; 2189 cgroup_taskset_for_each_leader(leader, css, tset) { 2190 struct mm_struct *mm = get_task_mm(leader); 2191 2192 if (mm) { 2193 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 2194 2195 /* 2196 * old_mems_allowed is the same with mems_allowed 2197 * here, except if this task is being moved 2198 * automatically due to hotplug. In that case 2199 * @mems_allowed has been updated and is empty, so 2200 * @old_mems_allowed is the right nodesets that we 2201 * migrate mm from. 2202 */ 2203 if (is_memory_migrate(cs)) 2204 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 2205 &cpuset_attach_nodemask_to); 2206 else 2207 mmput(mm); 2208 } 2209 } 2210 2211 cs->old_mems_allowed = cpuset_attach_nodemask_to; 2212 2213 cs->attach_in_progress--; 2214 if (!cs->attach_in_progress) 2215 wake_up(&cpuset_attach_wq); 2216 2217 mutex_unlock(&cpuset_mutex); 2218 } 2219 2220 /* The various types of files and directories in a cpuset file system */ 2221 2222 typedef enum { 2223 FILE_MEMORY_MIGRATE, 2224 FILE_CPULIST, 2225 FILE_MEMLIST, 2226 FILE_EFFECTIVE_CPULIST, 2227 FILE_EFFECTIVE_MEMLIST, 2228 FILE_SUBPARTS_CPULIST, 2229 FILE_CPU_EXCLUSIVE, 2230 FILE_MEM_EXCLUSIVE, 2231 FILE_MEM_HARDWALL, 2232 FILE_SCHED_LOAD_BALANCE, 2233 FILE_PARTITION_ROOT, 2234 FILE_SCHED_RELAX_DOMAIN_LEVEL, 2235 FILE_MEMORY_PRESSURE_ENABLED, 2236 FILE_MEMORY_PRESSURE, 2237 FILE_SPREAD_PAGE, 2238 FILE_SPREAD_SLAB, 2239 } cpuset_filetype_t; 2240 2241 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 2242 u64 val) 2243 { 2244 struct cpuset *cs = css_cs(css); 2245 cpuset_filetype_t type = cft->private; 2246 int retval = 0; 2247 2248 mutex_lock(&cpuset_mutex); 2249 if (!is_cpuset_online(cs)) { 2250 retval = -ENODEV; 2251 goto out_unlock; 2252 } 2253 2254 switch (type) { 2255 case FILE_CPU_EXCLUSIVE: 2256 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 2257 break; 2258 case FILE_MEM_EXCLUSIVE: 2259 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 2260 break; 2261 case FILE_MEM_HARDWALL: 2262 retval = update_flag(CS_MEM_HARDWALL, cs, val); 2263 break; 2264 case FILE_SCHED_LOAD_BALANCE: 2265 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 2266 break; 2267 case FILE_MEMORY_MIGRATE: 2268 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 2269 break; 2270 case FILE_MEMORY_PRESSURE_ENABLED: 2271 cpuset_memory_pressure_enabled = !!val; 2272 break; 2273 case FILE_SPREAD_PAGE: 2274 retval = update_flag(CS_SPREAD_PAGE, cs, val); 2275 break; 2276 case FILE_SPREAD_SLAB: 2277 retval = update_flag(CS_SPREAD_SLAB, cs, val); 2278 break; 2279 default: 2280 retval = -EINVAL; 2281 break; 2282 } 2283 out_unlock: 2284 mutex_unlock(&cpuset_mutex); 2285 return retval; 2286 } 2287 2288 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 2289 s64 val) 2290 { 2291 struct cpuset *cs = css_cs(css); 2292 cpuset_filetype_t type = cft->private; 2293 int retval = -ENODEV; 2294 2295 mutex_lock(&cpuset_mutex); 2296 if (!is_cpuset_online(cs)) 2297 goto out_unlock; 2298 2299 switch (type) { 2300 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2301 retval = update_relax_domain_level(cs, val); 2302 break; 2303 default: 2304 retval = -EINVAL; 2305 break; 2306 } 2307 out_unlock: 2308 mutex_unlock(&cpuset_mutex); 2309 return retval; 2310 } 2311 2312 /* 2313 * Common handling for a write to a "cpus" or "mems" file. 2314 */ 2315 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 2316 char *buf, size_t nbytes, loff_t off) 2317 { 2318 struct cpuset *cs = css_cs(of_css(of)); 2319 struct cpuset *trialcs; 2320 int retval = -ENODEV; 2321 2322 buf = strstrip(buf); 2323 2324 /* 2325 * CPU or memory hotunplug may leave @cs w/o any execution 2326 * resources, in which case the hotplug code asynchronously updates 2327 * configuration and transfers all tasks to the nearest ancestor 2328 * which can execute. 2329 * 2330 * As writes to "cpus" or "mems" may restore @cs's execution 2331 * resources, wait for the previously scheduled operations before 2332 * proceeding, so that we don't end up keep removing tasks added 2333 * after execution capability is restored. 2334 * 2335 * cpuset_hotplug_work calls back into cgroup core via 2336 * cgroup_transfer_tasks() and waiting for it from a cgroupfs 2337 * operation like this one can lead to a deadlock through kernfs 2338 * active_ref protection. Let's break the protection. Losing the 2339 * protection is okay as we check whether @cs is online after 2340 * grabbing cpuset_mutex anyway. This only happens on the legacy 2341 * hierarchies. 2342 */ 2343 css_get(&cs->css); 2344 kernfs_break_active_protection(of->kn); 2345 flush_work(&cpuset_hotplug_work); 2346 2347 mutex_lock(&cpuset_mutex); 2348 if (!is_cpuset_online(cs)) 2349 goto out_unlock; 2350 2351 trialcs = alloc_trial_cpuset(cs); 2352 if (!trialcs) { 2353 retval = -ENOMEM; 2354 goto out_unlock; 2355 } 2356 2357 switch (of_cft(of)->private) { 2358 case FILE_CPULIST: 2359 retval = update_cpumask(cs, trialcs, buf); 2360 break; 2361 case FILE_MEMLIST: 2362 retval = update_nodemask(cs, trialcs, buf); 2363 break; 2364 default: 2365 retval = -EINVAL; 2366 break; 2367 } 2368 2369 free_cpuset(trialcs); 2370 out_unlock: 2371 mutex_unlock(&cpuset_mutex); 2372 kernfs_unbreak_active_protection(of->kn); 2373 css_put(&cs->css); 2374 flush_workqueue(cpuset_migrate_mm_wq); 2375 return retval ?: nbytes; 2376 } 2377 2378 /* 2379 * These ascii lists should be read in a single call, by using a user 2380 * buffer large enough to hold the entire map. If read in smaller 2381 * chunks, there is no guarantee of atomicity. Since the display format 2382 * used, list of ranges of sequential numbers, is variable length, 2383 * and since these maps can change value dynamically, one could read 2384 * gibberish by doing partial reads while a list was changing. 2385 */ 2386 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 2387 { 2388 struct cpuset *cs = css_cs(seq_css(sf)); 2389 cpuset_filetype_t type = seq_cft(sf)->private; 2390 int ret = 0; 2391 2392 spin_lock_irq(&callback_lock); 2393 2394 switch (type) { 2395 case FILE_CPULIST: 2396 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 2397 break; 2398 case FILE_MEMLIST: 2399 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 2400 break; 2401 case FILE_EFFECTIVE_CPULIST: 2402 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 2403 break; 2404 case FILE_EFFECTIVE_MEMLIST: 2405 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 2406 break; 2407 case FILE_SUBPARTS_CPULIST: 2408 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); 2409 break; 2410 default: 2411 ret = -EINVAL; 2412 } 2413 2414 spin_unlock_irq(&callback_lock); 2415 return ret; 2416 } 2417 2418 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 2419 { 2420 struct cpuset *cs = css_cs(css); 2421 cpuset_filetype_t type = cft->private; 2422 switch (type) { 2423 case FILE_CPU_EXCLUSIVE: 2424 return is_cpu_exclusive(cs); 2425 case FILE_MEM_EXCLUSIVE: 2426 return is_mem_exclusive(cs); 2427 case FILE_MEM_HARDWALL: 2428 return is_mem_hardwall(cs); 2429 case FILE_SCHED_LOAD_BALANCE: 2430 return is_sched_load_balance(cs); 2431 case FILE_MEMORY_MIGRATE: 2432 return is_memory_migrate(cs); 2433 case FILE_MEMORY_PRESSURE_ENABLED: 2434 return cpuset_memory_pressure_enabled; 2435 case FILE_MEMORY_PRESSURE: 2436 return fmeter_getrate(&cs->fmeter); 2437 case FILE_SPREAD_PAGE: 2438 return is_spread_page(cs); 2439 case FILE_SPREAD_SLAB: 2440 return is_spread_slab(cs); 2441 default: 2442 BUG(); 2443 } 2444 2445 /* Unreachable but makes gcc happy */ 2446 return 0; 2447 } 2448 2449 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 2450 { 2451 struct cpuset *cs = css_cs(css); 2452 cpuset_filetype_t type = cft->private; 2453 switch (type) { 2454 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2455 return cs->relax_domain_level; 2456 default: 2457 BUG(); 2458 } 2459 2460 /* Unrechable but makes gcc happy */ 2461 return 0; 2462 } 2463 2464 static int sched_partition_show(struct seq_file *seq, void *v) 2465 { 2466 struct cpuset *cs = css_cs(seq_css(seq)); 2467 2468 switch (cs->partition_root_state) { 2469 case PRS_ENABLED: 2470 seq_puts(seq, "root\n"); 2471 break; 2472 case PRS_DISABLED: 2473 seq_puts(seq, "member\n"); 2474 break; 2475 case PRS_ERROR: 2476 seq_puts(seq, "root invalid\n"); 2477 break; 2478 } 2479 return 0; 2480 } 2481 2482 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 2483 size_t nbytes, loff_t off) 2484 { 2485 struct cpuset *cs = css_cs(of_css(of)); 2486 int val; 2487 int retval = -ENODEV; 2488 2489 buf = strstrip(buf); 2490 2491 /* 2492 * Convert "root" to ENABLED, and convert "member" to DISABLED. 2493 */ 2494 if (!strcmp(buf, "root")) 2495 val = PRS_ENABLED; 2496 else if (!strcmp(buf, "member")) 2497 val = PRS_DISABLED; 2498 else 2499 return -EINVAL; 2500 2501 css_get(&cs->css); 2502 mutex_lock(&cpuset_mutex); 2503 if (!is_cpuset_online(cs)) 2504 goto out_unlock; 2505 2506 retval = update_prstate(cs, val); 2507 out_unlock: 2508 mutex_unlock(&cpuset_mutex); 2509 css_put(&cs->css); 2510 return retval ?: nbytes; 2511 } 2512 2513 /* 2514 * for the common functions, 'private' gives the type of file 2515 */ 2516 2517 static struct cftype legacy_files[] = { 2518 { 2519 .name = "cpus", 2520 .seq_show = cpuset_common_seq_show, 2521 .write = cpuset_write_resmask, 2522 .max_write_len = (100U + 6 * NR_CPUS), 2523 .private = FILE_CPULIST, 2524 }, 2525 2526 { 2527 .name = "mems", 2528 .seq_show = cpuset_common_seq_show, 2529 .write = cpuset_write_resmask, 2530 .max_write_len = (100U + 6 * MAX_NUMNODES), 2531 .private = FILE_MEMLIST, 2532 }, 2533 2534 { 2535 .name = "effective_cpus", 2536 .seq_show = cpuset_common_seq_show, 2537 .private = FILE_EFFECTIVE_CPULIST, 2538 }, 2539 2540 { 2541 .name = "effective_mems", 2542 .seq_show = cpuset_common_seq_show, 2543 .private = FILE_EFFECTIVE_MEMLIST, 2544 }, 2545 2546 { 2547 .name = "cpu_exclusive", 2548 .read_u64 = cpuset_read_u64, 2549 .write_u64 = cpuset_write_u64, 2550 .private = FILE_CPU_EXCLUSIVE, 2551 }, 2552 2553 { 2554 .name = "mem_exclusive", 2555 .read_u64 = cpuset_read_u64, 2556 .write_u64 = cpuset_write_u64, 2557 .private = FILE_MEM_EXCLUSIVE, 2558 }, 2559 2560 { 2561 .name = "mem_hardwall", 2562 .read_u64 = cpuset_read_u64, 2563 .write_u64 = cpuset_write_u64, 2564 .private = FILE_MEM_HARDWALL, 2565 }, 2566 2567 { 2568 .name = "sched_load_balance", 2569 .read_u64 = cpuset_read_u64, 2570 .write_u64 = cpuset_write_u64, 2571 .private = FILE_SCHED_LOAD_BALANCE, 2572 }, 2573 2574 { 2575 .name = "sched_relax_domain_level", 2576 .read_s64 = cpuset_read_s64, 2577 .write_s64 = cpuset_write_s64, 2578 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 2579 }, 2580 2581 { 2582 .name = "memory_migrate", 2583 .read_u64 = cpuset_read_u64, 2584 .write_u64 = cpuset_write_u64, 2585 .private = FILE_MEMORY_MIGRATE, 2586 }, 2587 2588 { 2589 .name = "memory_pressure", 2590 .read_u64 = cpuset_read_u64, 2591 .private = FILE_MEMORY_PRESSURE, 2592 }, 2593 2594 { 2595 .name = "memory_spread_page", 2596 .read_u64 = cpuset_read_u64, 2597 .write_u64 = cpuset_write_u64, 2598 .private = FILE_SPREAD_PAGE, 2599 }, 2600 2601 { 2602 .name = "memory_spread_slab", 2603 .read_u64 = cpuset_read_u64, 2604 .write_u64 = cpuset_write_u64, 2605 .private = FILE_SPREAD_SLAB, 2606 }, 2607 2608 { 2609 .name = "memory_pressure_enabled", 2610 .flags = CFTYPE_ONLY_ON_ROOT, 2611 .read_u64 = cpuset_read_u64, 2612 .write_u64 = cpuset_write_u64, 2613 .private = FILE_MEMORY_PRESSURE_ENABLED, 2614 }, 2615 2616 { } /* terminate */ 2617 }; 2618 2619 /* 2620 * This is currently a minimal set for the default hierarchy. It can be 2621 * expanded later on by migrating more features and control files from v1. 2622 */ 2623 static struct cftype dfl_files[] = { 2624 { 2625 .name = "cpus", 2626 .seq_show = cpuset_common_seq_show, 2627 .write = cpuset_write_resmask, 2628 .max_write_len = (100U + 6 * NR_CPUS), 2629 .private = FILE_CPULIST, 2630 .flags = CFTYPE_NOT_ON_ROOT, 2631 }, 2632 2633 { 2634 .name = "mems", 2635 .seq_show = cpuset_common_seq_show, 2636 .write = cpuset_write_resmask, 2637 .max_write_len = (100U + 6 * MAX_NUMNODES), 2638 .private = FILE_MEMLIST, 2639 .flags = CFTYPE_NOT_ON_ROOT, 2640 }, 2641 2642 { 2643 .name = "cpus.effective", 2644 .seq_show = cpuset_common_seq_show, 2645 .private = FILE_EFFECTIVE_CPULIST, 2646 }, 2647 2648 { 2649 .name = "mems.effective", 2650 .seq_show = cpuset_common_seq_show, 2651 .private = FILE_EFFECTIVE_MEMLIST, 2652 }, 2653 2654 { 2655 .name = "cpus.partition", 2656 .seq_show = sched_partition_show, 2657 .write = sched_partition_write, 2658 .private = FILE_PARTITION_ROOT, 2659 .flags = CFTYPE_NOT_ON_ROOT, 2660 }, 2661 2662 { 2663 .name = "cpus.subpartitions", 2664 .seq_show = cpuset_common_seq_show, 2665 .private = FILE_SUBPARTS_CPULIST, 2666 .flags = CFTYPE_DEBUG, 2667 }, 2668 2669 { } /* terminate */ 2670 }; 2671 2672 2673 /* 2674 * cpuset_css_alloc - allocate a cpuset css 2675 * cgrp: control group that the new cpuset will be part of 2676 */ 2677 2678 static struct cgroup_subsys_state * 2679 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 2680 { 2681 struct cpuset *cs; 2682 2683 if (!parent_css) 2684 return &top_cpuset.css; 2685 2686 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 2687 if (!cs) 2688 return ERR_PTR(-ENOMEM); 2689 2690 if (alloc_cpumasks(cs, NULL)) { 2691 kfree(cs); 2692 return ERR_PTR(-ENOMEM); 2693 } 2694 2695 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 2696 nodes_clear(cs->mems_allowed); 2697 nodes_clear(cs->effective_mems); 2698 fmeter_init(&cs->fmeter); 2699 cs->relax_domain_level = -1; 2700 2701 return &cs->css; 2702 } 2703 2704 static int cpuset_css_online(struct cgroup_subsys_state *css) 2705 { 2706 struct cpuset *cs = css_cs(css); 2707 struct cpuset *parent = parent_cs(cs); 2708 struct cpuset *tmp_cs; 2709 struct cgroup_subsys_state *pos_css; 2710 2711 if (!parent) 2712 return 0; 2713 2714 mutex_lock(&cpuset_mutex); 2715 2716 set_bit(CS_ONLINE, &cs->flags); 2717 if (is_spread_page(parent)) 2718 set_bit(CS_SPREAD_PAGE, &cs->flags); 2719 if (is_spread_slab(parent)) 2720 set_bit(CS_SPREAD_SLAB, &cs->flags); 2721 2722 cpuset_inc(); 2723 2724 spin_lock_irq(&callback_lock); 2725 if (is_in_v2_mode()) { 2726 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 2727 cs->effective_mems = parent->effective_mems; 2728 cs->use_parent_ecpus = true; 2729 parent->child_ecpus_count++; 2730 } 2731 spin_unlock_irq(&callback_lock); 2732 2733 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 2734 goto out_unlock; 2735 2736 /* 2737 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 2738 * set. This flag handling is implemented in cgroup core for 2739 * histrical reasons - the flag may be specified during mount. 2740 * 2741 * Currently, if any sibling cpusets have exclusive cpus or mem, we 2742 * refuse to clone the configuration - thereby refusing the task to 2743 * be entered, and as a result refusing the sys_unshare() or 2744 * clone() which initiated it. If this becomes a problem for some 2745 * users who wish to allow that scenario, then this could be 2746 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 2747 * (and likewise for mems) to the new cgroup. 2748 */ 2749 rcu_read_lock(); 2750 cpuset_for_each_child(tmp_cs, pos_css, parent) { 2751 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 2752 rcu_read_unlock(); 2753 goto out_unlock; 2754 } 2755 } 2756 rcu_read_unlock(); 2757 2758 spin_lock_irq(&callback_lock); 2759 cs->mems_allowed = parent->mems_allowed; 2760 cs->effective_mems = parent->mems_allowed; 2761 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 2762 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 2763 spin_unlock_irq(&callback_lock); 2764 out_unlock: 2765 mutex_unlock(&cpuset_mutex); 2766 return 0; 2767 } 2768 2769 /* 2770 * If the cpuset being removed has its flag 'sched_load_balance' 2771 * enabled, then simulate turning sched_load_balance off, which 2772 * will call rebuild_sched_domains_locked(). That is not needed 2773 * in the default hierarchy where only changes in partition 2774 * will cause repartitioning. 2775 * 2776 * If the cpuset has the 'sched.partition' flag enabled, simulate 2777 * turning 'sched.partition" off. 2778 */ 2779 2780 static void cpuset_css_offline(struct cgroup_subsys_state *css) 2781 { 2782 struct cpuset *cs = css_cs(css); 2783 2784 mutex_lock(&cpuset_mutex); 2785 2786 if (is_partition_root(cs)) 2787 update_prstate(cs, 0); 2788 2789 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2790 is_sched_load_balance(cs)) 2791 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 2792 2793 if (cs->use_parent_ecpus) { 2794 struct cpuset *parent = parent_cs(cs); 2795 2796 cs->use_parent_ecpus = false; 2797 parent->child_ecpus_count--; 2798 } 2799 2800 cpuset_dec(); 2801 clear_bit(CS_ONLINE, &cs->flags); 2802 2803 mutex_unlock(&cpuset_mutex); 2804 } 2805 2806 static void cpuset_css_free(struct cgroup_subsys_state *css) 2807 { 2808 struct cpuset *cs = css_cs(css); 2809 2810 free_cpuset(cs); 2811 } 2812 2813 static void cpuset_bind(struct cgroup_subsys_state *root_css) 2814 { 2815 mutex_lock(&cpuset_mutex); 2816 spin_lock_irq(&callback_lock); 2817 2818 if (is_in_v2_mode()) { 2819 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 2820 top_cpuset.mems_allowed = node_possible_map; 2821 } else { 2822 cpumask_copy(top_cpuset.cpus_allowed, 2823 top_cpuset.effective_cpus); 2824 top_cpuset.mems_allowed = top_cpuset.effective_mems; 2825 } 2826 2827 spin_unlock_irq(&callback_lock); 2828 mutex_unlock(&cpuset_mutex); 2829 } 2830 2831 /* 2832 * Make sure the new task conform to the current state of its parent, 2833 * which could have been changed by cpuset just after it inherits the 2834 * state from the parent and before it sits on the cgroup's task list. 2835 */ 2836 static void cpuset_fork(struct task_struct *task) 2837 { 2838 if (task_css_is_root(task, cpuset_cgrp_id)) 2839 return; 2840 2841 set_cpus_allowed_ptr(task, current->cpus_ptr); 2842 task->mems_allowed = current->mems_allowed; 2843 } 2844 2845 struct cgroup_subsys cpuset_cgrp_subsys = { 2846 .css_alloc = cpuset_css_alloc, 2847 .css_online = cpuset_css_online, 2848 .css_offline = cpuset_css_offline, 2849 .css_free = cpuset_css_free, 2850 .can_attach = cpuset_can_attach, 2851 .cancel_attach = cpuset_cancel_attach, 2852 .attach = cpuset_attach, 2853 .post_attach = cpuset_post_attach, 2854 .bind = cpuset_bind, 2855 .fork = cpuset_fork, 2856 .legacy_cftypes = legacy_files, 2857 .dfl_cftypes = dfl_files, 2858 .early_init = true, 2859 .threaded = true, 2860 }; 2861 2862 /** 2863 * cpuset_init - initialize cpusets at system boot 2864 * 2865 * Description: Initialize top_cpuset 2866 **/ 2867 2868 int __init cpuset_init(void) 2869 { 2870 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 2871 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 2872 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); 2873 2874 cpumask_setall(top_cpuset.cpus_allowed); 2875 nodes_setall(top_cpuset.mems_allowed); 2876 cpumask_setall(top_cpuset.effective_cpus); 2877 nodes_setall(top_cpuset.effective_mems); 2878 2879 fmeter_init(&top_cpuset.fmeter); 2880 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 2881 top_cpuset.relax_domain_level = -1; 2882 2883 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 2884 2885 return 0; 2886 } 2887 2888 /* 2889 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 2890 * or memory nodes, we need to walk over the cpuset hierarchy, 2891 * removing that CPU or node from all cpusets. If this removes the 2892 * last CPU or node from a cpuset, then move the tasks in the empty 2893 * cpuset to its next-highest non-empty parent. 2894 */ 2895 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 2896 { 2897 struct cpuset *parent; 2898 2899 /* 2900 * Find its next-highest non-empty parent, (top cpuset 2901 * has online cpus, so can't be empty). 2902 */ 2903 parent = parent_cs(cs); 2904 while (cpumask_empty(parent->cpus_allowed) || 2905 nodes_empty(parent->mems_allowed)) 2906 parent = parent_cs(parent); 2907 2908 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 2909 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 2910 pr_cont_cgroup_name(cs->css.cgroup); 2911 pr_cont("\n"); 2912 } 2913 } 2914 2915 static void 2916 hotplug_update_tasks_legacy(struct cpuset *cs, 2917 struct cpumask *new_cpus, nodemask_t *new_mems, 2918 bool cpus_updated, bool mems_updated) 2919 { 2920 bool is_empty; 2921 2922 spin_lock_irq(&callback_lock); 2923 cpumask_copy(cs->cpus_allowed, new_cpus); 2924 cpumask_copy(cs->effective_cpus, new_cpus); 2925 cs->mems_allowed = *new_mems; 2926 cs->effective_mems = *new_mems; 2927 spin_unlock_irq(&callback_lock); 2928 2929 /* 2930 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 2931 * as the tasks will be migratecd to an ancestor. 2932 */ 2933 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 2934 update_tasks_cpumask(cs); 2935 if (mems_updated && !nodes_empty(cs->mems_allowed)) 2936 update_tasks_nodemask(cs); 2937 2938 is_empty = cpumask_empty(cs->cpus_allowed) || 2939 nodes_empty(cs->mems_allowed); 2940 2941 mutex_unlock(&cpuset_mutex); 2942 2943 /* 2944 * Move tasks to the nearest ancestor with execution resources, 2945 * This is full cgroup operation which will also call back into 2946 * cpuset. Should be done outside any lock. 2947 */ 2948 if (is_empty) 2949 remove_tasks_in_empty_cpuset(cs); 2950 2951 mutex_lock(&cpuset_mutex); 2952 } 2953 2954 static void 2955 hotplug_update_tasks(struct cpuset *cs, 2956 struct cpumask *new_cpus, nodemask_t *new_mems, 2957 bool cpus_updated, bool mems_updated) 2958 { 2959 if (cpumask_empty(new_cpus)) 2960 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 2961 if (nodes_empty(*new_mems)) 2962 *new_mems = parent_cs(cs)->effective_mems; 2963 2964 spin_lock_irq(&callback_lock); 2965 cpumask_copy(cs->effective_cpus, new_cpus); 2966 cs->effective_mems = *new_mems; 2967 spin_unlock_irq(&callback_lock); 2968 2969 if (cpus_updated) 2970 update_tasks_cpumask(cs); 2971 if (mems_updated) 2972 update_tasks_nodemask(cs); 2973 } 2974 2975 static bool force_rebuild; 2976 2977 void cpuset_force_rebuild(void) 2978 { 2979 force_rebuild = true; 2980 } 2981 2982 /** 2983 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 2984 * @cs: cpuset in interest 2985 * @tmp: the tmpmasks structure pointer 2986 * 2987 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 2988 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 2989 * all its tasks are moved to the nearest ancestor with both resources. 2990 */ 2991 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 2992 { 2993 static cpumask_t new_cpus; 2994 static nodemask_t new_mems; 2995 bool cpus_updated; 2996 bool mems_updated; 2997 struct cpuset *parent; 2998 retry: 2999 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 3000 3001 mutex_lock(&cpuset_mutex); 3002 3003 /* 3004 * We have raced with task attaching. We wait until attaching 3005 * is finished, so we won't attach a task to an empty cpuset. 3006 */ 3007 if (cs->attach_in_progress) { 3008 mutex_unlock(&cpuset_mutex); 3009 goto retry; 3010 } 3011 3012 parent = parent_cs(cs); 3013 compute_effective_cpumask(&new_cpus, cs, parent); 3014 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 3015 3016 if (cs->nr_subparts_cpus) 3017 /* 3018 * Make sure that CPUs allocated to child partitions 3019 * do not show up in effective_cpus. 3020 */ 3021 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); 3022 3023 if (!tmp || !cs->partition_root_state) 3024 goto update_tasks; 3025 3026 /* 3027 * In the unlikely event that a partition root has empty 3028 * effective_cpus or its parent becomes erroneous, we have to 3029 * transition it to the erroneous state. 3030 */ 3031 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || 3032 (parent->partition_root_state == PRS_ERROR))) { 3033 if (cs->nr_subparts_cpus) { 3034 cs->nr_subparts_cpus = 0; 3035 cpumask_clear(cs->subparts_cpus); 3036 compute_effective_cpumask(&new_cpus, cs, parent); 3037 } 3038 3039 /* 3040 * If the effective_cpus is empty because the child 3041 * partitions take away all the CPUs, we can keep 3042 * the current partition and let the child partitions 3043 * fight for available CPUs. 3044 */ 3045 if ((parent->partition_root_state == PRS_ERROR) || 3046 cpumask_empty(&new_cpus)) { 3047 update_parent_subparts_cpumask(cs, partcmd_disable, 3048 NULL, tmp); 3049 cs->partition_root_state = PRS_ERROR; 3050 } 3051 cpuset_force_rebuild(); 3052 } 3053 3054 /* 3055 * On the other hand, an erroneous partition root may be transitioned 3056 * back to a regular one or a partition root with no CPU allocated 3057 * from the parent may change to erroneous. 3058 */ 3059 if (is_partition_root(parent) && 3060 ((cs->partition_root_state == PRS_ERROR) || 3061 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && 3062 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) 3063 cpuset_force_rebuild(); 3064 3065 update_tasks: 3066 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 3067 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 3068 3069 if (is_in_v2_mode()) 3070 hotplug_update_tasks(cs, &new_cpus, &new_mems, 3071 cpus_updated, mems_updated); 3072 else 3073 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 3074 cpus_updated, mems_updated); 3075 3076 mutex_unlock(&cpuset_mutex); 3077 } 3078 3079 /** 3080 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 3081 * 3082 * This function is called after either CPU or memory configuration has 3083 * changed and updates cpuset accordingly. The top_cpuset is always 3084 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 3085 * order to make cpusets transparent (of no affect) on systems that are 3086 * actively using CPU hotplug but making no active use of cpusets. 3087 * 3088 * Non-root cpusets are only affected by offlining. If any CPUs or memory 3089 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 3090 * all descendants. 3091 * 3092 * Note that CPU offlining during suspend is ignored. We don't modify 3093 * cpusets across suspend/resume cycles at all. 3094 */ 3095 static void cpuset_hotplug_workfn(struct work_struct *work) 3096 { 3097 static cpumask_t new_cpus; 3098 static nodemask_t new_mems; 3099 bool cpus_updated, mems_updated; 3100 bool on_dfl = is_in_v2_mode(); 3101 struct tmpmasks tmp, *ptmp = NULL; 3102 3103 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 3104 ptmp = &tmp; 3105 3106 mutex_lock(&cpuset_mutex); 3107 3108 /* fetch the available cpus/mems and find out which changed how */ 3109 cpumask_copy(&new_cpus, cpu_active_mask); 3110 new_mems = node_states[N_MEMORY]; 3111 3112 /* 3113 * If subparts_cpus is populated, it is likely that the check below 3114 * will produce a false positive on cpus_updated when the cpu list 3115 * isn't changed. It is extra work, but it is better to be safe. 3116 */ 3117 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); 3118 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 3119 3120 /* synchronize cpus_allowed to cpu_active_mask */ 3121 if (cpus_updated) { 3122 spin_lock_irq(&callback_lock); 3123 if (!on_dfl) 3124 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 3125 /* 3126 * Make sure that CPUs allocated to child partitions 3127 * do not show up in effective_cpus. If no CPU is left, 3128 * we clear the subparts_cpus & let the child partitions 3129 * fight for the CPUs again. 3130 */ 3131 if (top_cpuset.nr_subparts_cpus) { 3132 if (cpumask_subset(&new_cpus, 3133 top_cpuset.subparts_cpus)) { 3134 top_cpuset.nr_subparts_cpus = 0; 3135 cpumask_clear(top_cpuset.subparts_cpus); 3136 } else { 3137 cpumask_andnot(&new_cpus, &new_cpus, 3138 top_cpuset.subparts_cpus); 3139 } 3140 } 3141 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 3142 spin_unlock_irq(&callback_lock); 3143 /* we don't mess with cpumasks of tasks in top_cpuset */ 3144 } 3145 3146 /* synchronize mems_allowed to N_MEMORY */ 3147 if (mems_updated) { 3148 spin_lock_irq(&callback_lock); 3149 if (!on_dfl) 3150 top_cpuset.mems_allowed = new_mems; 3151 top_cpuset.effective_mems = new_mems; 3152 spin_unlock_irq(&callback_lock); 3153 update_tasks_nodemask(&top_cpuset); 3154 } 3155 3156 mutex_unlock(&cpuset_mutex); 3157 3158 /* if cpus or mems changed, we need to propagate to descendants */ 3159 if (cpus_updated || mems_updated) { 3160 struct cpuset *cs; 3161 struct cgroup_subsys_state *pos_css; 3162 3163 rcu_read_lock(); 3164 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 3165 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 3166 continue; 3167 rcu_read_unlock(); 3168 3169 cpuset_hotplug_update_tasks(cs, ptmp); 3170 3171 rcu_read_lock(); 3172 css_put(&cs->css); 3173 } 3174 rcu_read_unlock(); 3175 } 3176 3177 /* rebuild sched domains if cpus_allowed has changed */ 3178 if (cpus_updated || force_rebuild) { 3179 force_rebuild = false; 3180 rebuild_sched_domains(); 3181 } 3182 3183 free_cpumasks(NULL, ptmp); 3184 } 3185 3186 void cpuset_update_active_cpus(void) 3187 { 3188 /* 3189 * We're inside cpu hotplug critical region which usually nests 3190 * inside cgroup synchronization. Bounce actual hotplug processing 3191 * to a work item to avoid reverse locking order. 3192 */ 3193 schedule_work(&cpuset_hotplug_work); 3194 } 3195 3196 void cpuset_wait_for_hotplug(void) 3197 { 3198 flush_work(&cpuset_hotplug_work); 3199 } 3200 3201 /* 3202 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 3203 * Call this routine anytime after node_states[N_MEMORY] changes. 3204 * See cpuset_update_active_cpus() for CPU hotplug handling. 3205 */ 3206 static int cpuset_track_online_nodes(struct notifier_block *self, 3207 unsigned long action, void *arg) 3208 { 3209 schedule_work(&cpuset_hotplug_work); 3210 return NOTIFY_OK; 3211 } 3212 3213 static struct notifier_block cpuset_track_online_nodes_nb = { 3214 .notifier_call = cpuset_track_online_nodes, 3215 .priority = 10, /* ??! */ 3216 }; 3217 3218 /** 3219 * cpuset_init_smp - initialize cpus_allowed 3220 * 3221 * Description: Finish top cpuset after cpu, node maps are initialized 3222 */ 3223 void __init cpuset_init_smp(void) 3224 { 3225 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); 3226 top_cpuset.mems_allowed = node_states[N_MEMORY]; 3227 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 3228 3229 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 3230 top_cpuset.effective_mems = node_states[N_MEMORY]; 3231 3232 register_hotmemory_notifier(&cpuset_track_online_nodes_nb); 3233 3234 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 3235 BUG_ON(!cpuset_migrate_mm_wq); 3236 } 3237 3238 /** 3239 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 3240 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 3241 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 3242 * 3243 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 3244 * attached to the specified @tsk. Guaranteed to return some non-empty 3245 * subset of cpu_online_mask, even if this means going outside the 3246 * tasks cpuset. 3247 **/ 3248 3249 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 3250 { 3251 unsigned long flags; 3252 3253 spin_lock_irqsave(&callback_lock, flags); 3254 rcu_read_lock(); 3255 guarantee_online_cpus(task_cs(tsk), pmask); 3256 rcu_read_unlock(); 3257 spin_unlock_irqrestore(&callback_lock, flags); 3258 } 3259 3260 /** 3261 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. 3262 * @tsk: pointer to task_struct with which the scheduler is struggling 3263 * 3264 * Description: In the case that the scheduler cannot find an allowed cpu in 3265 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy 3266 * mode however, this value is the same as task_cs(tsk)->effective_cpus, 3267 * which will not contain a sane cpumask during cases such as cpu hotplugging. 3268 * This is the absolute last resort for the scheduler and it is only used if 3269 * _every_ other avenue has been traveled. 3270 **/ 3271 3272 void cpuset_cpus_allowed_fallback(struct task_struct *tsk) 3273 { 3274 rcu_read_lock(); 3275 do_set_cpus_allowed(tsk, is_in_v2_mode() ? 3276 task_cs(tsk)->cpus_allowed : cpu_possible_mask); 3277 rcu_read_unlock(); 3278 3279 /* 3280 * We own tsk->cpus_allowed, nobody can change it under us. 3281 * 3282 * But we used cs && cs->cpus_allowed lockless and thus can 3283 * race with cgroup_attach_task() or update_cpumask() and get 3284 * the wrong tsk->cpus_allowed. However, both cases imply the 3285 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 3286 * which takes task_rq_lock(). 3287 * 3288 * If we are called after it dropped the lock we must see all 3289 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 3290 * set any mask even if it is not right from task_cs() pov, 3291 * the pending set_cpus_allowed_ptr() will fix things. 3292 * 3293 * select_fallback_rq() will fix things ups and set cpu_possible_mask 3294 * if required. 3295 */ 3296 } 3297 3298 void __init cpuset_init_current_mems_allowed(void) 3299 { 3300 nodes_setall(current->mems_allowed); 3301 } 3302 3303 /** 3304 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 3305 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 3306 * 3307 * Description: Returns the nodemask_t mems_allowed of the cpuset 3308 * attached to the specified @tsk. Guaranteed to return some non-empty 3309 * subset of node_states[N_MEMORY], even if this means going outside the 3310 * tasks cpuset. 3311 **/ 3312 3313 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 3314 { 3315 nodemask_t mask; 3316 unsigned long flags; 3317 3318 spin_lock_irqsave(&callback_lock, flags); 3319 rcu_read_lock(); 3320 guarantee_online_mems(task_cs(tsk), &mask); 3321 rcu_read_unlock(); 3322 spin_unlock_irqrestore(&callback_lock, flags); 3323 3324 return mask; 3325 } 3326 3327 /** 3328 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed 3329 * @nodemask: the nodemask to be checked 3330 * 3331 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 3332 */ 3333 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 3334 { 3335 return nodes_intersects(*nodemask, current->mems_allowed); 3336 } 3337 3338 /* 3339 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 3340 * mem_hardwall ancestor to the specified cpuset. Call holding 3341 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 3342 * (an unusual configuration), then returns the root cpuset. 3343 */ 3344 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 3345 { 3346 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 3347 cs = parent_cs(cs); 3348 return cs; 3349 } 3350 3351 /** 3352 * cpuset_node_allowed - Can we allocate on a memory node? 3353 * @node: is this an allowed node? 3354 * @gfp_mask: memory allocation flags 3355 * 3356 * If we're in interrupt, yes, we can always allocate. If @node is set in 3357 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 3358 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 3359 * yes. If current has access to memory reserves as an oom victim, yes. 3360 * Otherwise, no. 3361 * 3362 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 3363 * and do not allow allocations outside the current tasks cpuset 3364 * unless the task has been OOM killed. 3365 * GFP_KERNEL allocations are not so marked, so can escape to the 3366 * nearest enclosing hardwalled ancestor cpuset. 3367 * 3368 * Scanning up parent cpusets requires callback_lock. The 3369 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 3370 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 3371 * current tasks mems_allowed came up empty on the first pass over 3372 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 3373 * cpuset are short of memory, might require taking the callback_lock. 3374 * 3375 * The first call here from mm/page_alloc:get_page_from_freelist() 3376 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 3377 * so no allocation on a node outside the cpuset is allowed (unless 3378 * in interrupt, of course). 3379 * 3380 * The second pass through get_page_from_freelist() doesn't even call 3381 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 3382 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 3383 * in alloc_flags. That logic and the checks below have the combined 3384 * affect that: 3385 * in_interrupt - any node ok (current task context irrelevant) 3386 * GFP_ATOMIC - any node ok 3387 * tsk_is_oom_victim - any node ok 3388 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 3389 * GFP_USER - only nodes in current tasks mems allowed ok. 3390 */ 3391 bool __cpuset_node_allowed(int node, gfp_t gfp_mask) 3392 { 3393 struct cpuset *cs; /* current cpuset ancestors */ 3394 int allowed; /* is allocation in zone z allowed? */ 3395 unsigned long flags; 3396 3397 if (in_interrupt()) 3398 return true; 3399 if (node_isset(node, current->mems_allowed)) 3400 return true; 3401 /* 3402 * Allow tasks that have access to memory reserves because they have 3403 * been OOM killed to get memory anywhere. 3404 */ 3405 if (unlikely(tsk_is_oom_victim(current))) 3406 return true; 3407 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 3408 return false; 3409 3410 if (current->flags & PF_EXITING) /* Let dying task have memory */ 3411 return true; 3412 3413 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 3414 spin_lock_irqsave(&callback_lock, flags); 3415 3416 rcu_read_lock(); 3417 cs = nearest_hardwall_ancestor(task_cs(current)); 3418 allowed = node_isset(node, cs->mems_allowed); 3419 rcu_read_unlock(); 3420 3421 spin_unlock_irqrestore(&callback_lock, flags); 3422 return allowed; 3423 } 3424 3425 /** 3426 * cpuset_mem_spread_node() - On which node to begin search for a file page 3427 * cpuset_slab_spread_node() - On which node to begin search for a slab page 3428 * 3429 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 3430 * tasks in a cpuset with is_spread_page or is_spread_slab set), 3431 * and if the memory allocation used cpuset_mem_spread_node() 3432 * to determine on which node to start looking, as it will for 3433 * certain page cache or slab cache pages such as used for file 3434 * system buffers and inode caches, then instead of starting on the 3435 * local node to look for a free page, rather spread the starting 3436 * node around the tasks mems_allowed nodes. 3437 * 3438 * We don't have to worry about the returned node being offline 3439 * because "it can't happen", and even if it did, it would be ok. 3440 * 3441 * The routines calling guarantee_online_mems() are careful to 3442 * only set nodes in task->mems_allowed that are online. So it 3443 * should not be possible for the following code to return an 3444 * offline node. But if it did, that would be ok, as this routine 3445 * is not returning the node where the allocation must be, only 3446 * the node where the search should start. The zonelist passed to 3447 * __alloc_pages() will include all nodes. If the slab allocator 3448 * is passed an offline node, it will fall back to the local node. 3449 * See kmem_cache_alloc_node(). 3450 */ 3451 3452 static int cpuset_spread_node(int *rotor) 3453 { 3454 return *rotor = next_node_in(*rotor, current->mems_allowed); 3455 } 3456 3457 int cpuset_mem_spread_node(void) 3458 { 3459 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 3460 current->cpuset_mem_spread_rotor = 3461 node_random(¤t->mems_allowed); 3462 3463 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 3464 } 3465 3466 int cpuset_slab_spread_node(void) 3467 { 3468 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 3469 current->cpuset_slab_spread_rotor = 3470 node_random(¤t->mems_allowed); 3471 3472 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 3473 } 3474 3475 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 3476 3477 /** 3478 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 3479 * @tsk1: pointer to task_struct of some task. 3480 * @tsk2: pointer to task_struct of some other task. 3481 * 3482 * Description: Return true if @tsk1's mems_allowed intersects the 3483 * mems_allowed of @tsk2. Used by the OOM killer to determine if 3484 * one of the task's memory usage might impact the memory available 3485 * to the other. 3486 **/ 3487 3488 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 3489 const struct task_struct *tsk2) 3490 { 3491 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 3492 } 3493 3494 /** 3495 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 3496 * 3497 * Description: Prints current's name, cpuset name, and cached copy of its 3498 * mems_allowed to the kernel log. 3499 */ 3500 void cpuset_print_current_mems_allowed(void) 3501 { 3502 struct cgroup *cgrp; 3503 3504 rcu_read_lock(); 3505 3506 cgrp = task_cs(current)->css.cgroup; 3507 pr_cont(",cpuset="); 3508 pr_cont_cgroup_name(cgrp); 3509 pr_cont(",mems_allowed=%*pbl", 3510 nodemask_pr_args(¤t->mems_allowed)); 3511 3512 rcu_read_unlock(); 3513 } 3514 3515 /* 3516 * Collection of memory_pressure is suppressed unless 3517 * this flag is enabled by writing "1" to the special 3518 * cpuset file 'memory_pressure_enabled' in the root cpuset. 3519 */ 3520 3521 int cpuset_memory_pressure_enabled __read_mostly; 3522 3523 /** 3524 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 3525 * 3526 * Keep a running average of the rate of synchronous (direct) 3527 * page reclaim efforts initiated by tasks in each cpuset. 3528 * 3529 * This represents the rate at which some task in the cpuset 3530 * ran low on memory on all nodes it was allowed to use, and 3531 * had to enter the kernels page reclaim code in an effort to 3532 * create more free memory by tossing clean pages or swapping 3533 * or writing dirty pages. 3534 * 3535 * Display to user space in the per-cpuset read-only file 3536 * "memory_pressure". Value displayed is an integer 3537 * representing the recent rate of entry into the synchronous 3538 * (direct) page reclaim by any task attached to the cpuset. 3539 **/ 3540 3541 void __cpuset_memory_pressure_bump(void) 3542 { 3543 rcu_read_lock(); 3544 fmeter_markevent(&task_cs(current)->fmeter); 3545 rcu_read_unlock(); 3546 } 3547 3548 #ifdef CONFIG_PROC_PID_CPUSET 3549 /* 3550 * proc_cpuset_show() 3551 * - Print tasks cpuset path into seq_file. 3552 * - Used for /proc/<pid>/cpuset. 3553 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 3554 * doesn't really matter if tsk->cpuset changes after we read it, 3555 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 3556 * anyway. 3557 */ 3558 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 3559 struct pid *pid, struct task_struct *tsk) 3560 { 3561 char *buf; 3562 struct cgroup_subsys_state *css; 3563 int retval; 3564 3565 retval = -ENOMEM; 3566 buf = kmalloc(PATH_MAX, GFP_KERNEL); 3567 if (!buf) 3568 goto out; 3569 3570 css = task_get_css(tsk, cpuset_cgrp_id); 3571 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, 3572 current->nsproxy->cgroup_ns); 3573 css_put(css); 3574 if (retval >= PATH_MAX) 3575 retval = -ENAMETOOLONG; 3576 if (retval < 0) 3577 goto out_free; 3578 seq_puts(m, buf); 3579 seq_putc(m, '\n'); 3580 retval = 0; 3581 out_free: 3582 kfree(buf); 3583 out: 3584 return retval; 3585 } 3586 #endif /* CONFIG_PROC_PID_CPUSET */ 3587 3588 /* Display task mems_allowed in /proc/<pid>/status file. */ 3589 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 3590 { 3591 seq_printf(m, "Mems_allowed:\t%*pb\n", 3592 nodemask_pr_args(&task->mems_allowed)); 3593 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 3594 nodemask_pr_args(&task->mems_allowed)); 3595 } 3596