1 /* 2 * kernel/cpuset.c 3 * 4 * Processor and Memory placement constraints for sets of tasks. 5 * 6 * Copyright (C) 2003 BULL SA. 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc. 8 * Copyright (C) 2006 Google, Inc 9 * 10 * Portions derived from Patrick Mochel's sysfs code. 11 * sysfs is Copyright (c) 2001-3 Patrick Mochel 12 * 13 * 2003-10-10 Written by Simon Derr. 14 * 2003-10-22 Updates by Stephen Hemminger. 15 * 2004 May-July Rework by Paul Jackson. 16 * 2006 Rework by Paul Menage to use generic cgroups 17 * 2008 Rework of the scheduler domains and CPU hotplug handling 18 * by Max Krasnyansky 19 * 20 * This file is subject to the terms and conditions of the GNU General Public 21 * License. See the file COPYING in the main directory of the Linux 22 * distribution for more details. 23 */ 24 25 #include <linux/cpu.h> 26 #include <linux/cpumask.h> 27 #include <linux/cpuset.h> 28 #include <linux/err.h> 29 #include <linux/errno.h> 30 #include <linux/file.h> 31 #include <linux/fs.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/kernel.h> 35 #include <linux/kmod.h> 36 #include <linux/list.h> 37 #include <linux/mempolicy.h> 38 #include <linux/mm.h> 39 #include <linux/memory.h> 40 #include <linux/export.h> 41 #include <linux/mount.h> 42 #include <linux/namei.h> 43 #include <linux/pagemap.h> 44 #include <linux/proc_fs.h> 45 #include <linux/rcupdate.h> 46 #include <linux/sched.h> 47 #include <linux/sched/mm.h> 48 #include <linux/sched/task.h> 49 #include <linux/seq_file.h> 50 #include <linux/security.h> 51 #include <linux/slab.h> 52 #include <linux/spinlock.h> 53 #include <linux/stat.h> 54 #include <linux/string.h> 55 #include <linux/time.h> 56 #include <linux/time64.h> 57 #include <linux/backing-dev.h> 58 #include <linux/sort.h> 59 #include <linux/oom.h> 60 #include <linux/sched/isolation.h> 61 #include <linux/uaccess.h> 62 #include <linux/atomic.h> 63 #include <linux/mutex.h> 64 #include <linux/cgroup.h> 65 #include <linux/wait.h> 66 67 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); 68 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); 69 70 /* See "Frequency meter" comments, below. */ 71 72 struct fmeter { 73 int cnt; /* unprocessed events count */ 74 int val; /* most recent output value */ 75 time64_t time; /* clock (secs) when val computed */ 76 spinlock_t lock; /* guards read or write of above */ 77 }; 78 79 struct cpuset { 80 struct cgroup_subsys_state css; 81 82 unsigned long flags; /* "unsigned long" so bitops work */ 83 84 /* 85 * On default hierarchy: 86 * 87 * The user-configured masks can only be changed by writing to 88 * cpuset.cpus and cpuset.mems, and won't be limited by the 89 * parent masks. 90 * 91 * The effective masks is the real masks that apply to the tasks 92 * in the cpuset. They may be changed if the configured masks are 93 * changed or hotplug happens. 94 * 95 * effective_mask == configured_mask & parent's effective_mask, 96 * and if it ends up empty, it will inherit the parent's mask. 97 * 98 * 99 * On legacy hierachy: 100 * 101 * The user-configured masks are always the same with effective masks. 102 */ 103 104 /* user-configured CPUs and Memory Nodes allow to tasks */ 105 cpumask_var_t cpus_allowed; 106 nodemask_t mems_allowed; 107 108 /* effective CPUs and Memory Nodes allow to tasks */ 109 cpumask_var_t effective_cpus; 110 nodemask_t effective_mems; 111 112 /* 113 * CPUs allocated to child sub-partitions (default hierarchy only) 114 * - CPUs granted by the parent = effective_cpus U subparts_cpus 115 * - effective_cpus and subparts_cpus are mutually exclusive. 116 * 117 * effective_cpus contains only onlined CPUs, but subparts_cpus 118 * may have offlined ones. 119 */ 120 cpumask_var_t subparts_cpus; 121 122 /* 123 * This is old Memory Nodes tasks took on. 124 * 125 * - top_cpuset.old_mems_allowed is initialized to mems_allowed. 126 * - A new cpuset's old_mems_allowed is initialized when some 127 * task is moved into it. 128 * - old_mems_allowed is used in cpuset_migrate_mm() when we change 129 * cpuset.mems_allowed and have tasks' nodemask updated, and 130 * then old_mems_allowed is updated to mems_allowed. 131 */ 132 nodemask_t old_mems_allowed; 133 134 struct fmeter fmeter; /* memory_pressure filter */ 135 136 /* 137 * Tasks are being attached to this cpuset. Used to prevent 138 * zeroing cpus/mems_allowed between ->can_attach() and ->attach(). 139 */ 140 int attach_in_progress; 141 142 /* partition number for rebuild_sched_domains() */ 143 int pn; 144 145 /* for custom sched domain */ 146 int relax_domain_level; 147 148 /* number of CPUs in subparts_cpus */ 149 int nr_subparts_cpus; 150 151 /* partition root state */ 152 int partition_root_state; 153 154 /* 155 * Default hierarchy only: 156 * use_parent_ecpus - set if using parent's effective_cpus 157 * child_ecpus_count - # of children with use_parent_ecpus set 158 */ 159 int use_parent_ecpus; 160 int child_ecpus_count; 161 }; 162 163 /* 164 * Partition root states: 165 * 166 * 0 - not a partition root 167 * 168 * 1 - partition root 169 * 170 * -1 - invalid partition root 171 * None of the cpus in cpus_allowed can be put into the parent's 172 * subparts_cpus. In this case, the cpuset is not a real partition 173 * root anymore. However, the CPU_EXCLUSIVE bit will still be set 174 * and the cpuset can be restored back to a partition root if the 175 * parent cpuset can give more CPUs back to this child cpuset. 176 */ 177 #define PRS_DISABLED 0 178 #define PRS_ENABLED 1 179 #define PRS_ERROR -1 180 181 /* 182 * Temporary cpumasks for working with partitions that are passed among 183 * functions to avoid memory allocation in inner functions. 184 */ 185 struct tmpmasks { 186 cpumask_var_t addmask, delmask; /* For partition root */ 187 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ 188 }; 189 190 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) 191 { 192 return css ? container_of(css, struct cpuset, css) : NULL; 193 } 194 195 /* Retrieve the cpuset for a task */ 196 static inline struct cpuset *task_cs(struct task_struct *task) 197 { 198 return css_cs(task_css(task, cpuset_cgrp_id)); 199 } 200 201 static inline struct cpuset *parent_cs(struct cpuset *cs) 202 { 203 return css_cs(cs->css.parent); 204 } 205 206 #ifdef CONFIG_NUMA 207 static inline bool task_has_mempolicy(struct task_struct *task) 208 { 209 return task->mempolicy; 210 } 211 #else 212 static inline bool task_has_mempolicy(struct task_struct *task) 213 { 214 return false; 215 } 216 #endif 217 218 219 /* bits in struct cpuset flags field */ 220 typedef enum { 221 CS_ONLINE, 222 CS_CPU_EXCLUSIVE, 223 CS_MEM_EXCLUSIVE, 224 CS_MEM_HARDWALL, 225 CS_MEMORY_MIGRATE, 226 CS_SCHED_LOAD_BALANCE, 227 CS_SPREAD_PAGE, 228 CS_SPREAD_SLAB, 229 } cpuset_flagbits_t; 230 231 /* convenient tests for these bits */ 232 static inline bool is_cpuset_online(struct cpuset *cs) 233 { 234 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 235 } 236 237 static inline int is_cpu_exclusive(const struct cpuset *cs) 238 { 239 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); 240 } 241 242 static inline int is_mem_exclusive(const struct cpuset *cs) 243 { 244 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); 245 } 246 247 static inline int is_mem_hardwall(const struct cpuset *cs) 248 { 249 return test_bit(CS_MEM_HARDWALL, &cs->flags); 250 } 251 252 static inline int is_sched_load_balance(const struct cpuset *cs) 253 { 254 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 255 } 256 257 static inline int is_memory_migrate(const struct cpuset *cs) 258 { 259 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); 260 } 261 262 static inline int is_spread_page(const struct cpuset *cs) 263 { 264 return test_bit(CS_SPREAD_PAGE, &cs->flags); 265 } 266 267 static inline int is_spread_slab(const struct cpuset *cs) 268 { 269 return test_bit(CS_SPREAD_SLAB, &cs->flags); 270 } 271 272 static inline int is_partition_root(const struct cpuset *cs) 273 { 274 return cs->partition_root_state > 0; 275 } 276 277 static struct cpuset top_cpuset = { 278 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) | 279 (1 << CS_MEM_EXCLUSIVE)), 280 .partition_root_state = PRS_ENABLED, 281 }; 282 283 /** 284 * cpuset_for_each_child - traverse online children of a cpuset 285 * @child_cs: loop cursor pointing to the current child 286 * @pos_css: used for iteration 287 * @parent_cs: target cpuset to walk children of 288 * 289 * Walk @child_cs through the online children of @parent_cs. Must be used 290 * with RCU read locked. 291 */ 292 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \ 293 css_for_each_child((pos_css), &(parent_cs)->css) \ 294 if (is_cpuset_online(((child_cs) = css_cs((pos_css))))) 295 296 /** 297 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants 298 * @des_cs: loop cursor pointing to the current descendant 299 * @pos_css: used for iteration 300 * @root_cs: target cpuset to walk ancestor of 301 * 302 * Walk @des_cs through the online descendants of @root_cs. Must be used 303 * with RCU read locked. The caller may modify @pos_css by calling 304 * css_rightmost_descendant() to skip subtree. @root_cs is included in the 305 * iteration and the first node to be visited. 306 */ 307 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 308 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 309 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 310 311 /* 312 * There are two global locks guarding cpuset structures - cpuset_mutex and 313 * callback_lock. We also require taking task_lock() when dereferencing a 314 * task's cpuset pointer. See "The task_lock() exception", at the end of this 315 * comment. 316 * 317 * A task must hold both locks to modify cpusets. If a task holds 318 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it 319 * is the only task able to also acquire callback_lock and be able to 320 * modify cpusets. It can perform various checks on the cpuset structure 321 * first, knowing nothing will change. It can also allocate memory while 322 * just holding cpuset_mutex. While it is performing these checks, various 323 * callback routines can briefly acquire callback_lock to query cpusets. 324 * Once it is ready to make the changes, it takes callback_lock, blocking 325 * everyone else. 326 * 327 * Calls to the kernel memory allocator can not be made while holding 328 * callback_lock, as that would risk double tripping on callback_lock 329 * from one of the callbacks into the cpuset code from within 330 * __alloc_pages(). 331 * 332 * If a task is only holding callback_lock, then it has read-only 333 * access to cpusets. 334 * 335 * Now, the task_struct fields mems_allowed and mempolicy may be changed 336 * by other task, we use alloc_lock in the task_struct fields to protect 337 * them. 338 * 339 * The cpuset_common_file_read() handlers only hold callback_lock across 340 * small pieces of code, such as when reading out possibly multi-word 341 * cpumasks and nodemasks. 342 * 343 * Accessing a task's cpuset should be done in accordance with the 344 * guidelines for accessing subsystem state in kernel/cgroup.c 345 */ 346 347 static DEFINE_MUTEX(cpuset_mutex); 348 static DEFINE_SPINLOCK(callback_lock); 349 350 static struct workqueue_struct *cpuset_migrate_mm_wq; 351 352 /* 353 * CPU / memory hotplug is handled asynchronously. 354 */ 355 static void cpuset_hotplug_workfn(struct work_struct *work); 356 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); 357 358 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); 359 360 /* 361 * Cgroup v2 behavior is used when on default hierarchy or the 362 * cgroup_v2_mode flag is set. 363 */ 364 static inline bool is_in_v2_mode(void) 365 { 366 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 367 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); 368 } 369 370 /* 371 * This is ugly, but preserves the userspace API for existing cpuset 372 * users. If someone tries to mount the "cpuset" filesystem, we 373 * silently switch it to mount "cgroup" instead 374 */ 375 static struct dentry *cpuset_mount(struct file_system_type *fs_type, 376 int flags, const char *unused_dev_name, void *data) 377 { 378 struct file_system_type *cgroup_fs = get_fs_type("cgroup"); 379 struct dentry *ret = ERR_PTR(-ENODEV); 380 if (cgroup_fs) { 381 char mountopts[] = 382 "cpuset,noprefix," 383 "release_agent=/sbin/cpuset_release_agent"; 384 ret = cgroup_fs->mount(cgroup_fs, flags, 385 unused_dev_name, mountopts); 386 put_filesystem(cgroup_fs); 387 } 388 return ret; 389 } 390 391 static struct file_system_type cpuset_fs_type = { 392 .name = "cpuset", 393 .mount = cpuset_mount, 394 }; 395 396 /* 397 * Return in pmask the portion of a cpusets's cpus_allowed that 398 * are online. If none are online, walk up the cpuset hierarchy 399 * until we find one that does have some online cpus. 400 * 401 * One way or another, we guarantee to return some non-empty subset 402 * of cpu_online_mask. 403 * 404 * Call with callback_lock or cpuset_mutex held. 405 */ 406 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 407 { 408 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { 409 cs = parent_cs(cs); 410 if (unlikely(!cs)) { 411 /* 412 * The top cpuset doesn't have any online cpu as a 413 * consequence of a race between cpuset_hotplug_work 414 * and cpu hotplug notifier. But we know the top 415 * cpuset's effective_cpus is on its way to to be 416 * identical to cpu_online_mask. 417 */ 418 cpumask_copy(pmask, cpu_online_mask); 419 return; 420 } 421 } 422 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); 423 } 424 425 /* 426 * Return in *pmask the portion of a cpusets's mems_allowed that 427 * are online, with memory. If none are online with memory, walk 428 * up the cpuset hierarchy until we find one that does have some 429 * online mems. The top cpuset always has some mems online. 430 * 431 * One way or another, we guarantee to return some non-empty subset 432 * of node_states[N_MEMORY]. 433 * 434 * Call with callback_lock or cpuset_mutex held. 435 */ 436 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 437 { 438 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) 439 cs = parent_cs(cs); 440 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); 441 } 442 443 /* 444 * update task's spread flag if cpuset's page/slab spread flag is set 445 * 446 * Call with callback_lock or cpuset_mutex held. 447 */ 448 static void cpuset_update_task_spread_flag(struct cpuset *cs, 449 struct task_struct *tsk) 450 { 451 if (is_spread_page(cs)) 452 task_set_spread_page(tsk); 453 else 454 task_clear_spread_page(tsk); 455 456 if (is_spread_slab(cs)) 457 task_set_spread_slab(tsk); 458 else 459 task_clear_spread_slab(tsk); 460 } 461 462 /* 463 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? 464 * 465 * One cpuset is a subset of another if all its allowed CPUs and 466 * Memory Nodes are a subset of the other, and its exclusive flags 467 * are only set if the other's are set. Call holding cpuset_mutex. 468 */ 469 470 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) 471 { 472 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && 473 nodes_subset(p->mems_allowed, q->mems_allowed) && 474 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && 475 is_mem_exclusive(p) <= is_mem_exclusive(q); 476 } 477 478 /** 479 * alloc_cpumasks - allocate three cpumasks for cpuset 480 * @cs: the cpuset that have cpumasks to be allocated. 481 * @tmp: the tmpmasks structure pointer 482 * Return: 0 if successful, -ENOMEM otherwise. 483 * 484 * Only one of the two input arguments should be non-NULL. 485 */ 486 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 487 { 488 cpumask_var_t *pmask1, *pmask2, *pmask3; 489 490 if (cs) { 491 pmask1 = &cs->cpus_allowed; 492 pmask2 = &cs->effective_cpus; 493 pmask3 = &cs->subparts_cpus; 494 } else { 495 pmask1 = &tmp->new_cpus; 496 pmask2 = &tmp->addmask; 497 pmask3 = &tmp->delmask; 498 } 499 500 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) 501 return -ENOMEM; 502 503 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) 504 goto free_one; 505 506 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) 507 goto free_two; 508 509 return 0; 510 511 free_two: 512 free_cpumask_var(*pmask2); 513 free_one: 514 free_cpumask_var(*pmask1); 515 return -ENOMEM; 516 } 517 518 /** 519 * free_cpumasks - free cpumasks in a tmpmasks structure 520 * @cs: the cpuset that have cpumasks to be free. 521 * @tmp: the tmpmasks structure pointer 522 */ 523 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) 524 { 525 if (cs) { 526 free_cpumask_var(cs->cpus_allowed); 527 free_cpumask_var(cs->effective_cpus); 528 free_cpumask_var(cs->subparts_cpus); 529 } 530 if (tmp) { 531 free_cpumask_var(tmp->new_cpus); 532 free_cpumask_var(tmp->addmask); 533 free_cpumask_var(tmp->delmask); 534 } 535 } 536 537 /** 538 * alloc_trial_cpuset - allocate a trial cpuset 539 * @cs: the cpuset that the trial cpuset duplicates 540 */ 541 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) 542 { 543 struct cpuset *trial; 544 545 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); 546 if (!trial) 547 return NULL; 548 549 if (alloc_cpumasks(trial, NULL)) { 550 kfree(trial); 551 return NULL; 552 } 553 554 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); 555 cpumask_copy(trial->effective_cpus, cs->effective_cpus); 556 return trial; 557 } 558 559 /** 560 * free_cpuset - free the cpuset 561 * @cs: the cpuset to be freed 562 */ 563 static inline void free_cpuset(struct cpuset *cs) 564 { 565 free_cpumasks(cs, NULL); 566 kfree(cs); 567 } 568 569 /* 570 * validate_change() - Used to validate that any proposed cpuset change 571 * follows the structural rules for cpusets. 572 * 573 * If we replaced the flag and mask values of the current cpuset 574 * (cur) with those values in the trial cpuset (trial), would 575 * our various subset and exclusive rules still be valid? Presumes 576 * cpuset_mutex held. 577 * 578 * 'cur' is the address of an actual, in-use cpuset. Operations 579 * such as list traversal that depend on the actual address of the 580 * cpuset in the list must use cur below, not trial. 581 * 582 * 'trial' is the address of bulk structure copy of cur, with 583 * perhaps one or more of the fields cpus_allowed, mems_allowed, 584 * or flags changed to new, trial values. 585 * 586 * Return 0 if valid, -errno if not. 587 */ 588 589 static int validate_change(struct cpuset *cur, struct cpuset *trial) 590 { 591 struct cgroup_subsys_state *css; 592 struct cpuset *c, *par; 593 int ret; 594 595 rcu_read_lock(); 596 597 /* Each of our child cpusets must be a subset of us */ 598 ret = -EBUSY; 599 cpuset_for_each_child(c, css, cur) 600 if (!is_cpuset_subset(c, trial)) 601 goto out; 602 603 /* Remaining checks don't apply to root cpuset */ 604 ret = 0; 605 if (cur == &top_cpuset) 606 goto out; 607 608 par = parent_cs(cur); 609 610 /* On legacy hiearchy, we must be a subset of our parent cpuset. */ 611 ret = -EACCES; 612 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) 613 goto out; 614 615 /* 616 * If either I or some sibling (!= me) is exclusive, we can't 617 * overlap 618 */ 619 ret = -EINVAL; 620 cpuset_for_each_child(c, css, par) { 621 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && 622 c != cur && 623 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) 624 goto out; 625 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && 626 c != cur && 627 nodes_intersects(trial->mems_allowed, c->mems_allowed)) 628 goto out; 629 } 630 631 /* 632 * Cpusets with tasks - existing or newly being attached - can't 633 * be changed to have empty cpus_allowed or mems_allowed. 634 */ 635 ret = -ENOSPC; 636 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { 637 if (!cpumask_empty(cur->cpus_allowed) && 638 cpumask_empty(trial->cpus_allowed)) 639 goto out; 640 if (!nodes_empty(cur->mems_allowed) && 641 nodes_empty(trial->mems_allowed)) 642 goto out; 643 } 644 645 /* 646 * We can't shrink if we won't have enough room for SCHED_DEADLINE 647 * tasks. 648 */ 649 ret = -EBUSY; 650 if (is_cpu_exclusive(cur) && 651 !cpuset_cpumask_can_shrink(cur->cpus_allowed, 652 trial->cpus_allowed)) 653 goto out; 654 655 ret = 0; 656 out: 657 rcu_read_unlock(); 658 return ret; 659 } 660 661 #ifdef CONFIG_SMP 662 /* 663 * Helper routine for generate_sched_domains(). 664 * Do cpusets a, b have overlapping effective cpus_allowed masks? 665 */ 666 static int cpusets_overlap(struct cpuset *a, struct cpuset *b) 667 { 668 return cpumask_intersects(a->effective_cpus, b->effective_cpus); 669 } 670 671 static void 672 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 673 { 674 if (dattr->relax_domain_level < c->relax_domain_level) 675 dattr->relax_domain_level = c->relax_domain_level; 676 return; 677 } 678 679 static void update_domain_attr_tree(struct sched_domain_attr *dattr, 680 struct cpuset *root_cs) 681 { 682 struct cpuset *cp; 683 struct cgroup_subsys_state *pos_css; 684 685 rcu_read_lock(); 686 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 687 /* skip the whole subtree if @cp doesn't have any CPU */ 688 if (cpumask_empty(cp->cpus_allowed)) { 689 pos_css = css_rightmost_descendant(pos_css); 690 continue; 691 } 692 693 if (is_sched_load_balance(cp)) 694 update_domain_attr(dattr, cp); 695 } 696 rcu_read_unlock(); 697 } 698 699 /* Must be called with cpuset_mutex held. */ 700 static inline int nr_cpusets(void) 701 { 702 /* jump label reference count + the top-level cpuset */ 703 return static_key_count(&cpusets_enabled_key.key) + 1; 704 } 705 706 /* 707 * generate_sched_domains() 708 * 709 * This function builds a partial partition of the systems CPUs 710 * A 'partial partition' is a set of non-overlapping subsets whose 711 * union is a subset of that set. 712 * The output of this function needs to be passed to kernel/sched/core.c 713 * partition_sched_domains() routine, which will rebuild the scheduler's 714 * load balancing domains (sched domains) as specified by that partial 715 * partition. 716 * 717 * See "What is sched_load_balance" in Documentation/cgroup-v1/cpusets.txt 718 * for a background explanation of this. 719 * 720 * Does not return errors, on the theory that the callers of this 721 * routine would rather not worry about failures to rebuild sched 722 * domains when operating in the severe memory shortage situations 723 * that could cause allocation failures below. 724 * 725 * Must be called with cpuset_mutex held. 726 * 727 * The three key local variables below are: 728 * q - a linked-list queue of cpuset pointers, used to implement a 729 * top-down scan of all cpusets. This scan loads a pointer 730 * to each cpuset marked is_sched_load_balance into the 731 * array 'csa'. For our purposes, rebuilding the schedulers 732 * sched domains, we can ignore !is_sched_load_balance cpusets. 733 * csa - (for CpuSet Array) Array of pointers to all the cpusets 734 * that need to be load balanced, for convenient iterative 735 * access by the subsequent code that finds the best partition, 736 * i.e the set of domains (subsets) of CPUs such that the 737 * cpus_allowed of every cpuset marked is_sched_load_balance 738 * is a subset of one of these domains, while there are as 739 * many such domains as possible, each as small as possible. 740 * doms - Conversion of 'csa' to an array of cpumasks, for passing to 741 * the kernel/sched/core.c routine partition_sched_domains() in a 742 * convenient format, that can be easily compared to the prior 743 * value to determine what partition elements (sched domains) 744 * were changed (added or removed.) 745 * 746 * Finding the best partition (set of domains): 747 * The triple nested loops below over i, j, k scan over the 748 * load balanced cpusets (using the array of cpuset pointers in 749 * csa[]) looking for pairs of cpusets that have overlapping 750 * cpus_allowed, but which don't have the same 'pn' partition 751 * number and gives them in the same partition number. It keeps 752 * looping on the 'restart' label until it can no longer find 753 * any such pairs. 754 * 755 * The union of the cpus_allowed masks from the set of 756 * all cpusets having the same 'pn' value then form the one 757 * element of the partition (one sched domain) to be passed to 758 * partition_sched_domains(). 759 */ 760 static int generate_sched_domains(cpumask_var_t **domains, 761 struct sched_domain_attr **attributes) 762 { 763 struct cpuset *cp; /* scans q */ 764 struct cpuset **csa; /* array of all cpuset ptrs */ 765 int csn; /* how many cpuset ptrs in csa so far */ 766 int i, j, k; /* indices for partition finding loops */ 767 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ 768 struct sched_domain_attr *dattr; /* attributes for custom domains */ 769 int ndoms = 0; /* number of sched domains in result */ 770 int nslot; /* next empty doms[] struct cpumask slot */ 771 struct cgroup_subsys_state *pos_css; 772 bool root_load_balance = is_sched_load_balance(&top_cpuset); 773 774 doms = NULL; 775 dattr = NULL; 776 csa = NULL; 777 778 /* Special case for the 99% of systems with one, full, sched domain */ 779 if (root_load_balance && !top_cpuset.nr_subparts_cpus) { 780 ndoms = 1; 781 doms = alloc_sched_domains(ndoms); 782 if (!doms) 783 goto done; 784 785 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 786 if (dattr) { 787 *dattr = SD_ATTR_INIT; 788 update_domain_attr_tree(dattr, &top_cpuset); 789 } 790 cpumask_and(doms[0], top_cpuset.effective_cpus, 791 housekeeping_cpumask(HK_FLAG_DOMAIN)); 792 793 goto done; 794 } 795 796 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); 797 if (!csa) 798 goto done; 799 csn = 0; 800 801 rcu_read_lock(); 802 if (root_load_balance) 803 csa[csn++] = &top_cpuset; 804 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 805 if (cp == &top_cpuset) 806 continue; 807 /* 808 * Continue traversing beyond @cp iff @cp has some CPUs and 809 * isn't load balancing. The former is obvious. The 810 * latter: All child cpusets contain a subset of the 811 * parent's cpus, so just skip them, and then we call 812 * update_domain_attr_tree() to calc relax_domain_level of 813 * the corresponding sched domain. 814 * 815 * If root is load-balancing, we can skip @cp if it 816 * is a subset of the root's effective_cpus. 817 */ 818 if (!cpumask_empty(cp->cpus_allowed) && 819 !(is_sched_load_balance(cp) && 820 cpumask_intersects(cp->cpus_allowed, 821 housekeeping_cpumask(HK_FLAG_DOMAIN)))) 822 continue; 823 824 if (root_load_balance && 825 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) 826 continue; 827 828 if (is_sched_load_balance(cp)) 829 csa[csn++] = cp; 830 831 /* skip @cp's subtree if not a partition root */ 832 if (!is_partition_root(cp)) 833 pos_css = css_rightmost_descendant(pos_css); 834 } 835 rcu_read_unlock(); 836 837 for (i = 0; i < csn; i++) 838 csa[i]->pn = i; 839 ndoms = csn; 840 841 restart: 842 /* Find the best partition (set of sched domains) */ 843 for (i = 0; i < csn; i++) { 844 struct cpuset *a = csa[i]; 845 int apn = a->pn; 846 847 for (j = 0; j < csn; j++) { 848 struct cpuset *b = csa[j]; 849 int bpn = b->pn; 850 851 if (apn != bpn && cpusets_overlap(a, b)) { 852 for (k = 0; k < csn; k++) { 853 struct cpuset *c = csa[k]; 854 855 if (c->pn == bpn) 856 c->pn = apn; 857 } 858 ndoms--; /* one less element */ 859 goto restart; 860 } 861 } 862 } 863 864 /* 865 * Now we know how many domains to create. 866 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 867 */ 868 doms = alloc_sched_domains(ndoms); 869 if (!doms) 870 goto done; 871 872 /* 873 * The rest of the code, including the scheduler, can deal with 874 * dattr==NULL case. No need to abort if alloc fails. 875 */ 876 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), 877 GFP_KERNEL); 878 879 for (nslot = 0, i = 0; i < csn; i++) { 880 struct cpuset *a = csa[i]; 881 struct cpumask *dp; 882 int apn = a->pn; 883 884 if (apn < 0) { 885 /* Skip completed partitions */ 886 continue; 887 } 888 889 dp = doms[nslot]; 890 891 if (nslot == ndoms) { 892 static int warnings = 10; 893 if (warnings) { 894 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n", 895 nslot, ndoms, csn, i, apn); 896 warnings--; 897 } 898 continue; 899 } 900 901 cpumask_clear(dp); 902 if (dattr) 903 *(dattr + nslot) = SD_ATTR_INIT; 904 for (j = i; j < csn; j++) { 905 struct cpuset *b = csa[j]; 906 907 if (apn == b->pn) { 908 cpumask_or(dp, dp, b->effective_cpus); 909 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN)); 910 if (dattr) 911 update_domain_attr_tree(dattr + nslot, b); 912 913 /* Done with this partition */ 914 b->pn = -1; 915 } 916 } 917 nslot++; 918 } 919 BUG_ON(nslot != ndoms); 920 921 done: 922 kfree(csa); 923 924 /* 925 * Fallback to the default domain if kmalloc() failed. 926 * See comments in partition_sched_domains(). 927 */ 928 if (doms == NULL) 929 ndoms = 1; 930 931 *domains = doms; 932 *attributes = dattr; 933 return ndoms; 934 } 935 936 /* 937 * Rebuild scheduler domains. 938 * 939 * If the flag 'sched_load_balance' of any cpuset with non-empty 940 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 941 * which has that flag enabled, or if any cpuset with a non-empty 942 * 'cpus' is removed, then call this routine to rebuild the 943 * scheduler's dynamic sched domains. 944 * 945 * Call with cpuset_mutex held. Takes get_online_cpus(). 946 */ 947 static void rebuild_sched_domains_locked(void) 948 { 949 struct sched_domain_attr *attr; 950 cpumask_var_t *doms; 951 int ndoms; 952 953 lockdep_assert_held(&cpuset_mutex); 954 get_online_cpus(); 955 956 /* 957 * We have raced with CPU hotplug. Don't do anything to avoid 958 * passing doms with offlined cpu to partition_sched_domains(). 959 * Anyways, hotplug work item will rebuild sched domains. 960 */ 961 if (!top_cpuset.nr_subparts_cpus && 962 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 963 goto out; 964 965 if (top_cpuset.nr_subparts_cpus && 966 !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask)) 967 goto out; 968 969 /* Generate domain masks and attrs */ 970 ndoms = generate_sched_domains(&doms, &attr); 971 972 /* Have scheduler rebuild the domains */ 973 partition_sched_domains(ndoms, doms, attr); 974 out: 975 put_online_cpus(); 976 } 977 #else /* !CONFIG_SMP */ 978 static void rebuild_sched_domains_locked(void) 979 { 980 } 981 #endif /* CONFIG_SMP */ 982 983 void rebuild_sched_domains(void) 984 { 985 mutex_lock(&cpuset_mutex); 986 rebuild_sched_domains_locked(); 987 mutex_unlock(&cpuset_mutex); 988 } 989 990 /** 991 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. 992 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed 993 * 994 * Iterate through each task of @cs updating its cpus_allowed to the 995 * effective cpuset's. As this function is called with cpuset_mutex held, 996 * cpuset membership stays stable. 997 */ 998 static void update_tasks_cpumask(struct cpuset *cs) 999 { 1000 struct css_task_iter it; 1001 struct task_struct *task; 1002 1003 css_task_iter_start(&cs->css, 0, &it); 1004 while ((task = css_task_iter_next(&it))) 1005 set_cpus_allowed_ptr(task, cs->effective_cpus); 1006 css_task_iter_end(&it); 1007 } 1008 1009 /** 1010 * compute_effective_cpumask - Compute the effective cpumask of the cpuset 1011 * @new_cpus: the temp variable for the new effective_cpus mask 1012 * @cs: the cpuset the need to recompute the new effective_cpus mask 1013 * @parent: the parent cpuset 1014 * 1015 * If the parent has subpartition CPUs, include them in the list of 1016 * allowable CPUs in computing the new effective_cpus mask. Since offlined 1017 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask 1018 * to mask those out. 1019 */ 1020 static void compute_effective_cpumask(struct cpumask *new_cpus, 1021 struct cpuset *cs, struct cpuset *parent) 1022 { 1023 if (parent->nr_subparts_cpus) { 1024 cpumask_or(new_cpus, parent->effective_cpus, 1025 parent->subparts_cpus); 1026 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); 1027 cpumask_and(new_cpus, new_cpus, cpu_active_mask); 1028 } else { 1029 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); 1030 } 1031 } 1032 1033 /* 1034 * Commands for update_parent_subparts_cpumask 1035 */ 1036 enum subparts_cmd { 1037 partcmd_enable, /* Enable partition root */ 1038 partcmd_disable, /* Disable partition root */ 1039 partcmd_update, /* Update parent's subparts_cpus */ 1040 }; 1041 1042 /** 1043 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset 1044 * @cpuset: The cpuset that requests change in partition root state 1045 * @cmd: Partition root state change command 1046 * @newmask: Optional new cpumask for partcmd_update 1047 * @tmp: Temporary addmask and delmask 1048 * Return: 0, 1 or an error code 1049 * 1050 * For partcmd_enable, the cpuset is being transformed from a non-partition 1051 * root to a partition root. The cpus_allowed mask of the given cpuset will 1052 * be put into parent's subparts_cpus and taken away from parent's 1053 * effective_cpus. The function will return 0 if all the CPUs listed in 1054 * cpus_allowed can be granted or an error code will be returned. 1055 * 1056 * For partcmd_disable, the cpuset is being transofrmed from a partition 1057 * root back to a non-partition root. any CPUs in cpus_allowed that are in 1058 * parent's subparts_cpus will be taken away from that cpumask and put back 1059 * into parent's effective_cpus. 0 should always be returned. 1060 * 1061 * For partcmd_update, if the optional newmask is specified, the cpu 1062 * list is to be changed from cpus_allowed to newmask. Otherwise, 1063 * cpus_allowed is assumed to remain the same. The cpuset should either 1064 * be a partition root or an invalid partition root. The partition root 1065 * state may change if newmask is NULL and none of the requested CPUs can 1066 * be granted by the parent. The function will return 1 if changes to 1067 * parent's subparts_cpus and effective_cpus happen or 0 otherwise. 1068 * Error code should only be returned when newmask is non-NULL. 1069 * 1070 * The partcmd_enable and partcmd_disable commands are used by 1071 * update_prstate(). The partcmd_update command is used by 1072 * update_cpumasks_hier() with newmask NULL and update_cpumask() with 1073 * newmask set. 1074 * 1075 * The checking is more strict when enabling partition root than the 1076 * other two commands. 1077 * 1078 * Because of the implicit cpu exclusive nature of a partition root, 1079 * cpumask changes that violates the cpu exclusivity rule will not be 1080 * permitted when checked by validate_change(). The validate_change() 1081 * function will also prevent any changes to the cpu list if it is not 1082 * a superset of children's cpu lists. 1083 */ 1084 static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, 1085 struct cpumask *newmask, 1086 struct tmpmasks *tmp) 1087 { 1088 struct cpuset *parent = parent_cs(cpuset); 1089 int adding; /* Moving cpus from effective_cpus to subparts_cpus */ 1090 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */ 1091 bool part_error = false; /* Partition error? */ 1092 1093 lockdep_assert_held(&cpuset_mutex); 1094 1095 /* 1096 * The parent must be a partition root. 1097 * The new cpumask, if present, or the current cpus_allowed must 1098 * not be empty. 1099 */ 1100 if (!is_partition_root(parent) || 1101 (newmask && cpumask_empty(newmask)) || 1102 (!newmask && cpumask_empty(cpuset->cpus_allowed))) 1103 return -EINVAL; 1104 1105 /* 1106 * Enabling/disabling partition root is not allowed if there are 1107 * online children. 1108 */ 1109 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css)) 1110 return -EBUSY; 1111 1112 /* 1113 * Enabling partition root is not allowed if not all the CPUs 1114 * can be granted from parent's effective_cpus or at least one 1115 * CPU will be left after that. 1116 */ 1117 if ((cmd == partcmd_enable) && 1118 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) || 1119 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus))) 1120 return -EINVAL; 1121 1122 /* 1123 * A cpumask update cannot make parent's effective_cpus become empty. 1124 */ 1125 adding = deleting = false; 1126 if (cmd == partcmd_enable) { 1127 cpumask_copy(tmp->addmask, cpuset->cpus_allowed); 1128 adding = true; 1129 } else if (cmd == partcmd_disable) { 1130 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1131 parent->subparts_cpus); 1132 } else if (newmask) { 1133 /* 1134 * partcmd_update with newmask: 1135 * 1136 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus 1137 * addmask = newmask & parent->effective_cpus 1138 * & ~parent->subparts_cpus 1139 */ 1140 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask); 1141 deleting = cpumask_and(tmp->delmask, tmp->delmask, 1142 parent->subparts_cpus); 1143 1144 cpumask_and(tmp->addmask, newmask, parent->effective_cpus); 1145 adding = cpumask_andnot(tmp->addmask, tmp->addmask, 1146 parent->subparts_cpus); 1147 /* 1148 * Return error if the new effective_cpus could become empty. 1149 */ 1150 if (adding && 1151 cpumask_equal(parent->effective_cpus, tmp->addmask)) { 1152 if (!deleting) 1153 return -EINVAL; 1154 /* 1155 * As some of the CPUs in subparts_cpus might have 1156 * been offlined, we need to compute the real delmask 1157 * to confirm that. 1158 */ 1159 if (!cpumask_and(tmp->addmask, tmp->delmask, 1160 cpu_active_mask)) 1161 return -EINVAL; 1162 cpumask_copy(tmp->addmask, parent->effective_cpus); 1163 } 1164 } else { 1165 /* 1166 * partcmd_update w/o newmask: 1167 * 1168 * addmask = cpus_allowed & parent->effectiveb_cpus 1169 * 1170 * Note that parent's subparts_cpus may have been 1171 * pre-shrunk in case there is a change in the cpu list. 1172 * So no deletion is needed. 1173 */ 1174 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed, 1175 parent->effective_cpus); 1176 part_error = cpumask_equal(tmp->addmask, 1177 parent->effective_cpus); 1178 } 1179 1180 if (cmd == partcmd_update) { 1181 int prev_prs = cpuset->partition_root_state; 1182 1183 /* 1184 * Check for possible transition between PRS_ENABLED 1185 * and PRS_ERROR. 1186 */ 1187 switch (cpuset->partition_root_state) { 1188 case PRS_ENABLED: 1189 if (part_error) 1190 cpuset->partition_root_state = PRS_ERROR; 1191 break; 1192 case PRS_ERROR: 1193 if (!part_error) 1194 cpuset->partition_root_state = PRS_ENABLED; 1195 break; 1196 } 1197 /* 1198 * Set part_error if previously in invalid state. 1199 */ 1200 part_error = (prev_prs == PRS_ERROR); 1201 } 1202 1203 if (!part_error && (cpuset->partition_root_state == PRS_ERROR)) 1204 return 0; /* Nothing need to be done */ 1205 1206 if (cpuset->partition_root_state == PRS_ERROR) { 1207 /* 1208 * Remove all its cpus from parent's subparts_cpus. 1209 */ 1210 adding = false; 1211 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed, 1212 parent->subparts_cpus); 1213 } 1214 1215 if (!adding && !deleting) 1216 return 0; 1217 1218 /* 1219 * Change the parent's subparts_cpus. 1220 * Newly added CPUs will be removed from effective_cpus and 1221 * newly deleted ones will be added back to effective_cpus. 1222 */ 1223 spin_lock_irq(&callback_lock); 1224 if (adding) { 1225 cpumask_or(parent->subparts_cpus, 1226 parent->subparts_cpus, tmp->addmask); 1227 cpumask_andnot(parent->effective_cpus, 1228 parent->effective_cpus, tmp->addmask); 1229 } 1230 if (deleting) { 1231 cpumask_andnot(parent->subparts_cpus, 1232 parent->subparts_cpus, tmp->delmask); 1233 /* 1234 * Some of the CPUs in subparts_cpus might have been offlined. 1235 */ 1236 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); 1237 cpumask_or(parent->effective_cpus, 1238 parent->effective_cpus, tmp->delmask); 1239 } 1240 1241 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); 1242 spin_unlock_irq(&callback_lock); 1243 1244 return cmd == partcmd_update; 1245 } 1246 1247 /* 1248 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree 1249 * @cs: the cpuset to consider 1250 * @tmp: temp variables for calculating effective_cpus & partition setup 1251 * 1252 * When congifured cpumask is changed, the effective cpumasks of this cpuset 1253 * and all its descendants need to be updated. 1254 * 1255 * On legacy hierachy, effective_cpus will be the same with cpu_allowed. 1256 * 1257 * Called with cpuset_mutex held 1258 */ 1259 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) 1260 { 1261 struct cpuset *cp; 1262 struct cgroup_subsys_state *pos_css; 1263 bool need_rebuild_sched_domains = false; 1264 1265 rcu_read_lock(); 1266 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1267 struct cpuset *parent = parent_cs(cp); 1268 1269 compute_effective_cpumask(tmp->new_cpus, cp, parent); 1270 1271 /* 1272 * If it becomes empty, inherit the effective mask of the 1273 * parent, which is guaranteed to have some CPUs. 1274 */ 1275 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { 1276 cpumask_copy(tmp->new_cpus, parent->effective_cpus); 1277 if (!cp->use_parent_ecpus) { 1278 cp->use_parent_ecpus = true; 1279 parent->child_ecpus_count++; 1280 } 1281 } else if (cp->use_parent_ecpus) { 1282 cp->use_parent_ecpus = false; 1283 WARN_ON_ONCE(!parent->child_ecpus_count); 1284 parent->child_ecpus_count--; 1285 } 1286 1287 /* 1288 * Skip the whole subtree if the cpumask remains the same 1289 * and has no partition root state. 1290 */ 1291 if (!cp->partition_root_state && 1292 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) { 1293 pos_css = css_rightmost_descendant(pos_css); 1294 continue; 1295 } 1296 1297 /* 1298 * update_parent_subparts_cpumask() should have been called 1299 * for cs already in update_cpumask(). We should also call 1300 * update_tasks_cpumask() again for tasks in the parent 1301 * cpuset if the parent's subparts_cpus changes. 1302 */ 1303 if ((cp != cs) && cp->partition_root_state) { 1304 switch (parent->partition_root_state) { 1305 case PRS_DISABLED: 1306 /* 1307 * If parent is not a partition root or an 1308 * invalid partition root, clear the state 1309 * state and the CS_CPU_EXCLUSIVE flag. 1310 */ 1311 WARN_ON_ONCE(cp->partition_root_state 1312 != PRS_ERROR); 1313 cp->partition_root_state = 0; 1314 1315 /* 1316 * clear_bit() is an atomic operation and 1317 * readers aren't interested in the state 1318 * of CS_CPU_EXCLUSIVE anyway. So we can 1319 * just update the flag without holding 1320 * the callback_lock. 1321 */ 1322 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags); 1323 break; 1324 1325 case PRS_ENABLED: 1326 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp)) 1327 update_tasks_cpumask(parent); 1328 break; 1329 1330 case PRS_ERROR: 1331 /* 1332 * When parent is invalid, it has to be too. 1333 */ 1334 cp->partition_root_state = PRS_ERROR; 1335 if (cp->nr_subparts_cpus) { 1336 cp->nr_subparts_cpus = 0; 1337 cpumask_clear(cp->subparts_cpus); 1338 } 1339 break; 1340 } 1341 } 1342 1343 if (!css_tryget_online(&cp->css)) 1344 continue; 1345 rcu_read_unlock(); 1346 1347 spin_lock_irq(&callback_lock); 1348 1349 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1350 if (cp->nr_subparts_cpus && 1351 (cp->partition_root_state != PRS_ENABLED)) { 1352 cp->nr_subparts_cpus = 0; 1353 cpumask_clear(cp->subparts_cpus); 1354 } else if (cp->nr_subparts_cpus) { 1355 /* 1356 * Make sure that effective_cpus & subparts_cpus 1357 * are mutually exclusive. 1358 * 1359 * In the unlikely event that effective_cpus 1360 * becomes empty. we clear cp->nr_subparts_cpus and 1361 * let its child partition roots to compete for 1362 * CPUs again. 1363 */ 1364 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, 1365 cp->subparts_cpus); 1366 if (cpumask_empty(cp->effective_cpus)) { 1367 cpumask_copy(cp->effective_cpus, tmp->new_cpus); 1368 cpumask_clear(cp->subparts_cpus); 1369 cp->nr_subparts_cpus = 0; 1370 } else if (!cpumask_subset(cp->subparts_cpus, 1371 tmp->new_cpus)) { 1372 cpumask_andnot(cp->subparts_cpus, 1373 cp->subparts_cpus, tmp->new_cpus); 1374 cp->nr_subparts_cpus 1375 = cpumask_weight(cp->subparts_cpus); 1376 } 1377 } 1378 spin_unlock_irq(&callback_lock); 1379 1380 WARN_ON(!is_in_v2_mode() && 1381 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 1382 1383 update_tasks_cpumask(cp); 1384 1385 /* 1386 * On legacy hierarchy, if the effective cpumask of any non- 1387 * empty cpuset is changed, we need to rebuild sched domains. 1388 * On default hierarchy, the cpuset needs to be a partition 1389 * root as well. 1390 */ 1391 if (!cpumask_empty(cp->cpus_allowed) && 1392 is_sched_load_balance(cp) && 1393 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || 1394 is_partition_root(cp))) 1395 need_rebuild_sched_domains = true; 1396 1397 rcu_read_lock(); 1398 css_put(&cp->css); 1399 } 1400 rcu_read_unlock(); 1401 1402 if (need_rebuild_sched_domains) 1403 rebuild_sched_domains_locked(); 1404 } 1405 1406 /** 1407 * update_sibling_cpumasks - Update siblings cpumasks 1408 * @parent: Parent cpuset 1409 * @cs: Current cpuset 1410 * @tmp: Temp variables 1411 */ 1412 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, 1413 struct tmpmasks *tmp) 1414 { 1415 struct cpuset *sibling; 1416 struct cgroup_subsys_state *pos_css; 1417 1418 /* 1419 * Check all its siblings and call update_cpumasks_hier() 1420 * if their use_parent_ecpus flag is set in order for them 1421 * to use the right effective_cpus value. 1422 */ 1423 rcu_read_lock(); 1424 cpuset_for_each_child(sibling, pos_css, parent) { 1425 if (sibling == cs) 1426 continue; 1427 if (!sibling->use_parent_ecpus) 1428 continue; 1429 1430 update_cpumasks_hier(sibling, tmp); 1431 } 1432 rcu_read_unlock(); 1433 } 1434 1435 /** 1436 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 1437 * @cs: the cpuset to consider 1438 * @trialcs: trial cpuset 1439 * @buf: buffer of cpu numbers written to this cpuset 1440 */ 1441 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, 1442 const char *buf) 1443 { 1444 int retval; 1445 struct tmpmasks tmp; 1446 1447 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ 1448 if (cs == &top_cpuset) 1449 return -EACCES; 1450 1451 /* 1452 * An empty cpus_allowed is ok only if the cpuset has no tasks. 1453 * Since cpulist_parse() fails on an empty mask, we special case 1454 * that parsing. The validate_change() call ensures that cpusets 1455 * with tasks have cpus. 1456 */ 1457 if (!*buf) { 1458 cpumask_clear(trialcs->cpus_allowed); 1459 } else { 1460 retval = cpulist_parse(buf, trialcs->cpus_allowed); 1461 if (retval < 0) 1462 return retval; 1463 1464 if (!cpumask_subset(trialcs->cpus_allowed, 1465 top_cpuset.cpus_allowed)) 1466 return -EINVAL; 1467 } 1468 1469 /* Nothing to do if the cpus didn't change */ 1470 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) 1471 return 0; 1472 1473 retval = validate_change(cs, trialcs); 1474 if (retval < 0) 1475 return retval; 1476 1477 #ifdef CONFIG_CPUMASK_OFFSTACK 1478 /* 1479 * Use the cpumasks in trialcs for tmpmasks when they are pointers 1480 * to allocated cpumasks. 1481 */ 1482 tmp.addmask = trialcs->subparts_cpus; 1483 tmp.delmask = trialcs->effective_cpus; 1484 tmp.new_cpus = trialcs->cpus_allowed; 1485 #endif 1486 1487 if (cs->partition_root_state) { 1488 /* Cpumask of a partition root cannot be empty */ 1489 if (cpumask_empty(trialcs->cpus_allowed)) 1490 return -EINVAL; 1491 if (update_parent_subparts_cpumask(cs, partcmd_update, 1492 trialcs->cpus_allowed, &tmp) < 0) 1493 return -EINVAL; 1494 } 1495 1496 spin_lock_irq(&callback_lock); 1497 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 1498 1499 /* 1500 * Make sure that subparts_cpus is a subset of cpus_allowed. 1501 */ 1502 if (cs->nr_subparts_cpus) { 1503 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, 1504 cs->cpus_allowed); 1505 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); 1506 } 1507 spin_unlock_irq(&callback_lock); 1508 1509 update_cpumasks_hier(cs, &tmp); 1510 1511 if (cs->partition_root_state) { 1512 struct cpuset *parent = parent_cs(cs); 1513 1514 /* 1515 * For partition root, update the cpumasks of sibling 1516 * cpusets if they use parent's effective_cpus. 1517 */ 1518 if (parent->child_ecpus_count) 1519 update_sibling_cpumasks(parent, cs, &tmp); 1520 } 1521 return 0; 1522 } 1523 1524 /* 1525 * Migrate memory region from one set of nodes to another. This is 1526 * performed asynchronously as it can be called from process migration path 1527 * holding locks involved in process management. All mm migrations are 1528 * performed in the queued order and can be waited for by flushing 1529 * cpuset_migrate_mm_wq. 1530 */ 1531 1532 struct cpuset_migrate_mm_work { 1533 struct work_struct work; 1534 struct mm_struct *mm; 1535 nodemask_t from; 1536 nodemask_t to; 1537 }; 1538 1539 static void cpuset_migrate_mm_workfn(struct work_struct *work) 1540 { 1541 struct cpuset_migrate_mm_work *mwork = 1542 container_of(work, struct cpuset_migrate_mm_work, work); 1543 1544 /* on a wq worker, no need to worry about %current's mems_allowed */ 1545 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); 1546 mmput(mwork->mm); 1547 kfree(mwork); 1548 } 1549 1550 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1551 const nodemask_t *to) 1552 { 1553 struct cpuset_migrate_mm_work *mwork; 1554 1555 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); 1556 if (mwork) { 1557 mwork->mm = mm; 1558 mwork->from = *from; 1559 mwork->to = *to; 1560 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); 1561 queue_work(cpuset_migrate_mm_wq, &mwork->work); 1562 } else { 1563 mmput(mm); 1564 } 1565 } 1566 1567 static void cpuset_post_attach(void) 1568 { 1569 flush_workqueue(cpuset_migrate_mm_wq); 1570 } 1571 1572 /* 1573 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy 1574 * @tsk: the task to change 1575 * @newmems: new nodes that the task will be set 1576 * 1577 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed 1578 * and rebind an eventual tasks' mempolicy. If the task is allocating in 1579 * parallel, it might temporarily see an empty intersection, which results in 1580 * a seqlock check and retry before OOM or allocation failure. 1581 */ 1582 static void cpuset_change_task_nodemask(struct task_struct *tsk, 1583 nodemask_t *newmems) 1584 { 1585 task_lock(tsk); 1586 1587 local_irq_disable(); 1588 write_seqcount_begin(&tsk->mems_allowed_seq); 1589 1590 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); 1591 mpol_rebind_task(tsk, newmems); 1592 tsk->mems_allowed = *newmems; 1593 1594 write_seqcount_end(&tsk->mems_allowed_seq); 1595 local_irq_enable(); 1596 1597 task_unlock(tsk); 1598 } 1599 1600 static void *cpuset_being_rebound; 1601 1602 /** 1603 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1604 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1605 * 1606 * Iterate through each task of @cs updating its mems_allowed to the 1607 * effective cpuset's. As this function is called with cpuset_mutex held, 1608 * cpuset membership stays stable. 1609 */ 1610 static void update_tasks_nodemask(struct cpuset *cs) 1611 { 1612 static nodemask_t newmems; /* protected by cpuset_mutex */ 1613 struct css_task_iter it; 1614 struct task_struct *task; 1615 1616 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1617 1618 guarantee_online_mems(cs, &newmems); 1619 1620 /* 1621 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't 1622 * take while holding tasklist_lock. Forks can happen - the 1623 * mpol_dup() cpuset_being_rebound check will catch such forks, 1624 * and rebind their vma mempolicies too. Because we still hold 1625 * the global cpuset_mutex, we know that no other rebind effort 1626 * will be contending for the global variable cpuset_being_rebound. 1627 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1628 * is idempotent. Also migrate pages in each mm to new nodes. 1629 */ 1630 css_task_iter_start(&cs->css, 0, &it); 1631 while ((task = css_task_iter_next(&it))) { 1632 struct mm_struct *mm; 1633 bool migrate; 1634 1635 cpuset_change_task_nodemask(task, &newmems); 1636 1637 mm = get_task_mm(task); 1638 if (!mm) 1639 continue; 1640 1641 migrate = is_memory_migrate(cs); 1642 1643 mpol_rebind_mm(mm, &cs->mems_allowed); 1644 if (migrate) 1645 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 1646 else 1647 mmput(mm); 1648 } 1649 css_task_iter_end(&it); 1650 1651 /* 1652 * All the tasks' nodemasks have been updated, update 1653 * cs->old_mems_allowed. 1654 */ 1655 cs->old_mems_allowed = newmems; 1656 1657 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 1658 cpuset_being_rebound = NULL; 1659 } 1660 1661 /* 1662 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree 1663 * @cs: the cpuset to consider 1664 * @new_mems: a temp variable for calculating new effective_mems 1665 * 1666 * When configured nodemask is changed, the effective nodemasks of this cpuset 1667 * and all its descendants need to be updated. 1668 * 1669 * On legacy hiearchy, effective_mems will be the same with mems_allowed. 1670 * 1671 * Called with cpuset_mutex held 1672 */ 1673 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) 1674 { 1675 struct cpuset *cp; 1676 struct cgroup_subsys_state *pos_css; 1677 1678 rcu_read_lock(); 1679 cpuset_for_each_descendant_pre(cp, pos_css, cs) { 1680 struct cpuset *parent = parent_cs(cp); 1681 1682 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); 1683 1684 /* 1685 * If it becomes empty, inherit the effective mask of the 1686 * parent, which is guaranteed to have some MEMs. 1687 */ 1688 if (is_in_v2_mode() && nodes_empty(*new_mems)) 1689 *new_mems = parent->effective_mems; 1690 1691 /* Skip the whole subtree if the nodemask remains the same. */ 1692 if (nodes_equal(*new_mems, cp->effective_mems)) { 1693 pos_css = css_rightmost_descendant(pos_css); 1694 continue; 1695 } 1696 1697 if (!css_tryget_online(&cp->css)) 1698 continue; 1699 rcu_read_unlock(); 1700 1701 spin_lock_irq(&callback_lock); 1702 cp->effective_mems = *new_mems; 1703 spin_unlock_irq(&callback_lock); 1704 1705 WARN_ON(!is_in_v2_mode() && 1706 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 1707 1708 update_tasks_nodemask(cp); 1709 1710 rcu_read_lock(); 1711 css_put(&cp->css); 1712 } 1713 rcu_read_unlock(); 1714 } 1715 1716 /* 1717 * Handle user request to change the 'mems' memory placement 1718 * of a cpuset. Needs to validate the request, update the 1719 * cpusets mems_allowed, and for each task in the cpuset, 1720 * update mems_allowed and rebind task's mempolicy and any vma 1721 * mempolicies and if the cpuset is marked 'memory_migrate', 1722 * migrate the tasks pages to the new memory. 1723 * 1724 * Call with cpuset_mutex held. May take callback_lock during call. 1725 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1726 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 1727 * their mempolicies to the cpusets new mems_allowed. 1728 */ 1729 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 1730 const char *buf) 1731 { 1732 int retval; 1733 1734 /* 1735 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; 1736 * it's read-only 1737 */ 1738 if (cs == &top_cpuset) { 1739 retval = -EACCES; 1740 goto done; 1741 } 1742 1743 /* 1744 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 1745 * Since nodelist_parse() fails on an empty mask, we special case 1746 * that parsing. The validate_change() call ensures that cpusets 1747 * with tasks have memory. 1748 */ 1749 if (!*buf) { 1750 nodes_clear(trialcs->mems_allowed); 1751 } else { 1752 retval = nodelist_parse(buf, trialcs->mems_allowed); 1753 if (retval < 0) 1754 goto done; 1755 1756 if (!nodes_subset(trialcs->mems_allowed, 1757 top_cpuset.mems_allowed)) { 1758 retval = -EINVAL; 1759 goto done; 1760 } 1761 } 1762 1763 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { 1764 retval = 0; /* Too easy - nothing to do */ 1765 goto done; 1766 } 1767 retval = validate_change(cs, trialcs); 1768 if (retval < 0) 1769 goto done; 1770 1771 spin_lock_irq(&callback_lock); 1772 cs->mems_allowed = trialcs->mems_allowed; 1773 spin_unlock_irq(&callback_lock); 1774 1775 /* use trialcs->mems_allowed as a temp variable */ 1776 update_nodemasks_hier(cs, &trialcs->mems_allowed); 1777 done: 1778 return retval; 1779 } 1780 1781 bool current_cpuset_is_being_rebound(void) 1782 { 1783 bool ret; 1784 1785 rcu_read_lock(); 1786 ret = task_cs(current) == cpuset_being_rebound; 1787 rcu_read_unlock(); 1788 1789 return ret; 1790 } 1791 1792 static int update_relax_domain_level(struct cpuset *cs, s64 val) 1793 { 1794 #ifdef CONFIG_SMP 1795 if (val < -1 || val >= sched_domain_level_max) 1796 return -EINVAL; 1797 #endif 1798 1799 if (val != cs->relax_domain_level) { 1800 cs->relax_domain_level = val; 1801 if (!cpumask_empty(cs->cpus_allowed) && 1802 is_sched_load_balance(cs)) 1803 rebuild_sched_domains_locked(); 1804 } 1805 1806 return 0; 1807 } 1808 1809 /** 1810 * update_tasks_flags - update the spread flags of tasks in the cpuset. 1811 * @cs: the cpuset in which each task's spread flags needs to be changed 1812 * 1813 * Iterate through each task of @cs updating its spread flags. As this 1814 * function is called with cpuset_mutex held, cpuset membership stays 1815 * stable. 1816 */ 1817 static void update_tasks_flags(struct cpuset *cs) 1818 { 1819 struct css_task_iter it; 1820 struct task_struct *task; 1821 1822 css_task_iter_start(&cs->css, 0, &it); 1823 while ((task = css_task_iter_next(&it))) 1824 cpuset_update_task_spread_flag(cs, task); 1825 css_task_iter_end(&it); 1826 } 1827 1828 /* 1829 * update_flag - read a 0 or a 1 in a file and update associated flag 1830 * bit: the bit to update (see cpuset_flagbits_t) 1831 * cs: the cpuset to update 1832 * turning_on: whether the flag is being set or cleared 1833 * 1834 * Call with cpuset_mutex held. 1835 */ 1836 1837 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1838 int turning_on) 1839 { 1840 struct cpuset *trialcs; 1841 int balance_flag_changed; 1842 int spread_flag_changed; 1843 int err; 1844 1845 trialcs = alloc_trial_cpuset(cs); 1846 if (!trialcs) 1847 return -ENOMEM; 1848 1849 if (turning_on) 1850 set_bit(bit, &trialcs->flags); 1851 else 1852 clear_bit(bit, &trialcs->flags); 1853 1854 err = validate_change(cs, trialcs); 1855 if (err < 0) 1856 goto out; 1857 1858 balance_flag_changed = (is_sched_load_balance(cs) != 1859 is_sched_load_balance(trialcs)); 1860 1861 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 1862 || (is_spread_page(cs) != is_spread_page(trialcs))); 1863 1864 spin_lock_irq(&callback_lock); 1865 cs->flags = trialcs->flags; 1866 spin_unlock_irq(&callback_lock); 1867 1868 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1869 rebuild_sched_domains_locked(); 1870 1871 if (spread_flag_changed) 1872 update_tasks_flags(cs); 1873 out: 1874 free_cpuset(trialcs); 1875 return err; 1876 } 1877 1878 /* 1879 * update_prstate - update partititon_root_state 1880 * cs: the cpuset to update 1881 * val: 0 - disabled, 1 - enabled 1882 * 1883 * Call with cpuset_mutex held. 1884 */ 1885 static int update_prstate(struct cpuset *cs, int val) 1886 { 1887 int err; 1888 struct cpuset *parent = parent_cs(cs); 1889 struct tmpmasks tmp; 1890 1891 if ((val != 0) && (val != 1)) 1892 return -EINVAL; 1893 if (val == cs->partition_root_state) 1894 return 0; 1895 1896 /* 1897 * Cannot force a partial or invalid partition root to a full 1898 * partition root. 1899 */ 1900 if (val && cs->partition_root_state) 1901 return -EINVAL; 1902 1903 if (alloc_cpumasks(NULL, &tmp)) 1904 return -ENOMEM; 1905 1906 err = -EINVAL; 1907 if (!cs->partition_root_state) { 1908 /* 1909 * Turning on partition root requires setting the 1910 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed 1911 * cannot be NULL. 1912 */ 1913 if (cpumask_empty(cs->cpus_allowed)) 1914 goto out; 1915 1916 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); 1917 if (err) 1918 goto out; 1919 1920 err = update_parent_subparts_cpumask(cs, partcmd_enable, 1921 NULL, &tmp); 1922 if (err) { 1923 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1924 goto out; 1925 } 1926 cs->partition_root_state = PRS_ENABLED; 1927 } else { 1928 /* 1929 * Turning off partition root will clear the 1930 * CS_CPU_EXCLUSIVE bit. 1931 */ 1932 if (cs->partition_root_state == PRS_ERROR) { 1933 cs->partition_root_state = 0; 1934 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1935 err = 0; 1936 goto out; 1937 } 1938 1939 err = update_parent_subparts_cpumask(cs, partcmd_disable, 1940 NULL, &tmp); 1941 if (err) 1942 goto out; 1943 1944 cs->partition_root_state = 0; 1945 1946 /* Turning off CS_CPU_EXCLUSIVE will not return error */ 1947 update_flag(CS_CPU_EXCLUSIVE, cs, 0); 1948 } 1949 1950 /* 1951 * Update cpumask of parent's tasks except when it is the top 1952 * cpuset as some system daemons cannot be mapped to other CPUs. 1953 */ 1954 if (parent != &top_cpuset) 1955 update_tasks_cpumask(parent); 1956 1957 if (parent->child_ecpus_count) 1958 update_sibling_cpumasks(parent, cs, &tmp); 1959 1960 rebuild_sched_domains_locked(); 1961 out: 1962 free_cpumasks(NULL, &tmp); 1963 return err; 1964 } 1965 1966 /* 1967 * Frequency meter - How fast is some event occurring? 1968 * 1969 * These routines manage a digitally filtered, constant time based, 1970 * event frequency meter. There are four routines: 1971 * fmeter_init() - initialize a frequency meter. 1972 * fmeter_markevent() - called each time the event happens. 1973 * fmeter_getrate() - returns the recent rate of such events. 1974 * fmeter_update() - internal routine used to update fmeter. 1975 * 1976 * A common data structure is passed to each of these routines, 1977 * which is used to keep track of the state required to manage the 1978 * frequency meter and its digital filter. 1979 * 1980 * The filter works on the number of events marked per unit time. 1981 * The filter is single-pole low-pass recursive (IIR). The time unit 1982 * is 1 second. Arithmetic is done using 32-bit integers scaled to 1983 * simulate 3 decimal digits of precision (multiplied by 1000). 1984 * 1985 * With an FM_COEF of 933, and a time base of 1 second, the filter 1986 * has a half-life of 10 seconds, meaning that if the events quit 1987 * happening, then the rate returned from the fmeter_getrate() 1988 * will be cut in half each 10 seconds, until it converges to zero. 1989 * 1990 * It is not worth doing a real infinitely recursive filter. If more 1991 * than FM_MAXTICKS ticks have elapsed since the last filter event, 1992 * just compute FM_MAXTICKS ticks worth, by which point the level 1993 * will be stable. 1994 * 1995 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid 1996 * arithmetic overflow in the fmeter_update() routine. 1997 * 1998 * Given the simple 32 bit integer arithmetic used, this meter works 1999 * best for reporting rates between one per millisecond (msec) and 2000 * one per 32 (approx) seconds. At constant rates faster than one 2001 * per msec it maxes out at values just under 1,000,000. At constant 2002 * rates between one per msec, and one per second it will stabilize 2003 * to a value N*1000, where N is the rate of events per second. 2004 * At constant rates between one per second and one per 32 seconds, 2005 * it will be choppy, moving up on the seconds that have an event, 2006 * and then decaying until the next event. At rates slower than 2007 * about one in 32 seconds, it decays all the way back to zero between 2008 * each event. 2009 */ 2010 2011 #define FM_COEF 933 /* coefficient for half-life of 10 secs */ 2012 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */ 2013 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ 2014 #define FM_SCALE 1000 /* faux fixed point scale */ 2015 2016 /* Initialize a frequency meter */ 2017 static void fmeter_init(struct fmeter *fmp) 2018 { 2019 fmp->cnt = 0; 2020 fmp->val = 0; 2021 fmp->time = 0; 2022 spin_lock_init(&fmp->lock); 2023 } 2024 2025 /* Internal meter update - process cnt events and update value */ 2026 static void fmeter_update(struct fmeter *fmp) 2027 { 2028 time64_t now; 2029 u32 ticks; 2030 2031 now = ktime_get_seconds(); 2032 ticks = now - fmp->time; 2033 2034 if (ticks == 0) 2035 return; 2036 2037 ticks = min(FM_MAXTICKS, ticks); 2038 while (ticks-- > 0) 2039 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; 2040 fmp->time = now; 2041 2042 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; 2043 fmp->cnt = 0; 2044 } 2045 2046 /* Process any previous ticks, then bump cnt by one (times scale). */ 2047 static void fmeter_markevent(struct fmeter *fmp) 2048 { 2049 spin_lock(&fmp->lock); 2050 fmeter_update(fmp); 2051 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); 2052 spin_unlock(&fmp->lock); 2053 } 2054 2055 /* Process any previous ticks, then return current value. */ 2056 static int fmeter_getrate(struct fmeter *fmp) 2057 { 2058 int val; 2059 2060 spin_lock(&fmp->lock); 2061 fmeter_update(fmp); 2062 val = fmp->val; 2063 spin_unlock(&fmp->lock); 2064 return val; 2065 } 2066 2067 static struct cpuset *cpuset_attach_old_cs; 2068 2069 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ 2070 static int cpuset_can_attach(struct cgroup_taskset *tset) 2071 { 2072 struct cgroup_subsys_state *css; 2073 struct cpuset *cs; 2074 struct task_struct *task; 2075 int ret; 2076 2077 /* used later by cpuset_attach() */ 2078 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); 2079 cs = css_cs(css); 2080 2081 mutex_lock(&cpuset_mutex); 2082 2083 /* allow moving tasks into an empty cpuset if on default hierarchy */ 2084 ret = -ENOSPC; 2085 if (!is_in_v2_mode() && 2086 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) 2087 goto out_unlock; 2088 2089 cgroup_taskset_for_each(task, css, tset) { 2090 ret = task_can_attach(task, cs->cpus_allowed); 2091 if (ret) 2092 goto out_unlock; 2093 ret = security_task_setscheduler(task); 2094 if (ret) 2095 goto out_unlock; 2096 } 2097 2098 /* 2099 * Mark attach is in progress. This makes validate_change() fail 2100 * changes which zero cpus/mems_allowed. 2101 */ 2102 cs->attach_in_progress++; 2103 ret = 0; 2104 out_unlock: 2105 mutex_unlock(&cpuset_mutex); 2106 return ret; 2107 } 2108 2109 static void cpuset_cancel_attach(struct cgroup_taskset *tset) 2110 { 2111 struct cgroup_subsys_state *css; 2112 2113 cgroup_taskset_first(tset, &css); 2114 2115 mutex_lock(&cpuset_mutex); 2116 css_cs(css)->attach_in_progress--; 2117 mutex_unlock(&cpuset_mutex); 2118 } 2119 2120 /* 2121 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach() 2122 * but we can't allocate it dynamically there. Define it global and 2123 * allocate from cpuset_init(). 2124 */ 2125 static cpumask_var_t cpus_attach; 2126 2127 static void cpuset_attach(struct cgroup_taskset *tset) 2128 { 2129 /* static buf protected by cpuset_mutex */ 2130 static nodemask_t cpuset_attach_nodemask_to; 2131 struct task_struct *task; 2132 struct task_struct *leader; 2133 struct cgroup_subsys_state *css; 2134 struct cpuset *cs; 2135 struct cpuset *oldcs = cpuset_attach_old_cs; 2136 2137 cgroup_taskset_first(tset, &css); 2138 cs = css_cs(css); 2139 2140 mutex_lock(&cpuset_mutex); 2141 2142 /* prepare for attach */ 2143 if (cs == &top_cpuset) 2144 cpumask_copy(cpus_attach, cpu_possible_mask); 2145 else 2146 guarantee_online_cpus(cs, cpus_attach); 2147 2148 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); 2149 2150 cgroup_taskset_for_each(task, css, tset) { 2151 /* 2152 * can_attach beforehand should guarantee that this doesn't 2153 * fail. TODO: have a better way to handle failure here 2154 */ 2155 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); 2156 2157 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); 2158 cpuset_update_task_spread_flag(cs, task); 2159 } 2160 2161 /* 2162 * Change mm for all threadgroup leaders. This is expensive and may 2163 * sleep and should be moved outside migration path proper. 2164 */ 2165 cpuset_attach_nodemask_to = cs->effective_mems; 2166 cgroup_taskset_for_each_leader(leader, css, tset) { 2167 struct mm_struct *mm = get_task_mm(leader); 2168 2169 if (mm) { 2170 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); 2171 2172 /* 2173 * old_mems_allowed is the same with mems_allowed 2174 * here, except if this task is being moved 2175 * automatically due to hotplug. In that case 2176 * @mems_allowed has been updated and is empty, so 2177 * @old_mems_allowed is the right nodesets that we 2178 * migrate mm from. 2179 */ 2180 if (is_memory_migrate(cs)) 2181 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 2182 &cpuset_attach_nodemask_to); 2183 else 2184 mmput(mm); 2185 } 2186 } 2187 2188 cs->old_mems_allowed = cpuset_attach_nodemask_to; 2189 2190 cs->attach_in_progress--; 2191 if (!cs->attach_in_progress) 2192 wake_up(&cpuset_attach_wq); 2193 2194 mutex_unlock(&cpuset_mutex); 2195 } 2196 2197 /* The various types of files and directories in a cpuset file system */ 2198 2199 typedef enum { 2200 FILE_MEMORY_MIGRATE, 2201 FILE_CPULIST, 2202 FILE_MEMLIST, 2203 FILE_EFFECTIVE_CPULIST, 2204 FILE_EFFECTIVE_MEMLIST, 2205 FILE_SUBPARTS_CPULIST, 2206 FILE_CPU_EXCLUSIVE, 2207 FILE_MEM_EXCLUSIVE, 2208 FILE_MEM_HARDWALL, 2209 FILE_SCHED_LOAD_BALANCE, 2210 FILE_PARTITION_ROOT, 2211 FILE_SCHED_RELAX_DOMAIN_LEVEL, 2212 FILE_MEMORY_PRESSURE_ENABLED, 2213 FILE_MEMORY_PRESSURE, 2214 FILE_SPREAD_PAGE, 2215 FILE_SPREAD_SLAB, 2216 } cpuset_filetype_t; 2217 2218 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, 2219 u64 val) 2220 { 2221 struct cpuset *cs = css_cs(css); 2222 cpuset_filetype_t type = cft->private; 2223 int retval = 0; 2224 2225 mutex_lock(&cpuset_mutex); 2226 if (!is_cpuset_online(cs)) { 2227 retval = -ENODEV; 2228 goto out_unlock; 2229 } 2230 2231 switch (type) { 2232 case FILE_CPU_EXCLUSIVE: 2233 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); 2234 break; 2235 case FILE_MEM_EXCLUSIVE: 2236 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); 2237 break; 2238 case FILE_MEM_HARDWALL: 2239 retval = update_flag(CS_MEM_HARDWALL, cs, val); 2240 break; 2241 case FILE_SCHED_LOAD_BALANCE: 2242 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); 2243 break; 2244 case FILE_MEMORY_MIGRATE: 2245 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); 2246 break; 2247 case FILE_MEMORY_PRESSURE_ENABLED: 2248 cpuset_memory_pressure_enabled = !!val; 2249 break; 2250 case FILE_SPREAD_PAGE: 2251 retval = update_flag(CS_SPREAD_PAGE, cs, val); 2252 break; 2253 case FILE_SPREAD_SLAB: 2254 retval = update_flag(CS_SPREAD_SLAB, cs, val); 2255 break; 2256 default: 2257 retval = -EINVAL; 2258 break; 2259 } 2260 out_unlock: 2261 mutex_unlock(&cpuset_mutex); 2262 return retval; 2263 } 2264 2265 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, 2266 s64 val) 2267 { 2268 struct cpuset *cs = css_cs(css); 2269 cpuset_filetype_t type = cft->private; 2270 int retval = -ENODEV; 2271 2272 mutex_lock(&cpuset_mutex); 2273 if (!is_cpuset_online(cs)) 2274 goto out_unlock; 2275 2276 switch (type) { 2277 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2278 retval = update_relax_domain_level(cs, val); 2279 break; 2280 default: 2281 retval = -EINVAL; 2282 break; 2283 } 2284 out_unlock: 2285 mutex_unlock(&cpuset_mutex); 2286 return retval; 2287 } 2288 2289 /* 2290 * Common handling for a write to a "cpus" or "mems" file. 2291 */ 2292 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, 2293 char *buf, size_t nbytes, loff_t off) 2294 { 2295 struct cpuset *cs = css_cs(of_css(of)); 2296 struct cpuset *trialcs; 2297 int retval = -ENODEV; 2298 2299 buf = strstrip(buf); 2300 2301 /* 2302 * CPU or memory hotunplug may leave @cs w/o any execution 2303 * resources, in which case the hotplug code asynchronously updates 2304 * configuration and transfers all tasks to the nearest ancestor 2305 * which can execute. 2306 * 2307 * As writes to "cpus" or "mems" may restore @cs's execution 2308 * resources, wait for the previously scheduled operations before 2309 * proceeding, so that we don't end up keep removing tasks added 2310 * after execution capability is restored. 2311 * 2312 * cpuset_hotplug_work calls back into cgroup core via 2313 * cgroup_transfer_tasks() and waiting for it from a cgroupfs 2314 * operation like this one can lead to a deadlock through kernfs 2315 * active_ref protection. Let's break the protection. Losing the 2316 * protection is okay as we check whether @cs is online after 2317 * grabbing cpuset_mutex anyway. This only happens on the legacy 2318 * hierarchies. 2319 */ 2320 css_get(&cs->css); 2321 kernfs_break_active_protection(of->kn); 2322 flush_work(&cpuset_hotplug_work); 2323 2324 mutex_lock(&cpuset_mutex); 2325 if (!is_cpuset_online(cs)) 2326 goto out_unlock; 2327 2328 trialcs = alloc_trial_cpuset(cs); 2329 if (!trialcs) { 2330 retval = -ENOMEM; 2331 goto out_unlock; 2332 } 2333 2334 switch (of_cft(of)->private) { 2335 case FILE_CPULIST: 2336 retval = update_cpumask(cs, trialcs, buf); 2337 break; 2338 case FILE_MEMLIST: 2339 retval = update_nodemask(cs, trialcs, buf); 2340 break; 2341 default: 2342 retval = -EINVAL; 2343 break; 2344 } 2345 2346 free_cpuset(trialcs); 2347 out_unlock: 2348 mutex_unlock(&cpuset_mutex); 2349 kernfs_unbreak_active_protection(of->kn); 2350 css_put(&cs->css); 2351 flush_workqueue(cpuset_migrate_mm_wq); 2352 return retval ?: nbytes; 2353 } 2354 2355 /* 2356 * These ascii lists should be read in a single call, by using a user 2357 * buffer large enough to hold the entire map. If read in smaller 2358 * chunks, there is no guarantee of atomicity. Since the display format 2359 * used, list of ranges of sequential numbers, is variable length, 2360 * and since these maps can change value dynamically, one could read 2361 * gibberish by doing partial reads while a list was changing. 2362 */ 2363 static int cpuset_common_seq_show(struct seq_file *sf, void *v) 2364 { 2365 struct cpuset *cs = css_cs(seq_css(sf)); 2366 cpuset_filetype_t type = seq_cft(sf)->private; 2367 int ret = 0; 2368 2369 spin_lock_irq(&callback_lock); 2370 2371 switch (type) { 2372 case FILE_CPULIST: 2373 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); 2374 break; 2375 case FILE_MEMLIST: 2376 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); 2377 break; 2378 case FILE_EFFECTIVE_CPULIST: 2379 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); 2380 break; 2381 case FILE_EFFECTIVE_MEMLIST: 2382 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); 2383 break; 2384 case FILE_SUBPARTS_CPULIST: 2385 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); 2386 break; 2387 default: 2388 ret = -EINVAL; 2389 } 2390 2391 spin_unlock_irq(&callback_lock); 2392 return ret; 2393 } 2394 2395 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) 2396 { 2397 struct cpuset *cs = css_cs(css); 2398 cpuset_filetype_t type = cft->private; 2399 switch (type) { 2400 case FILE_CPU_EXCLUSIVE: 2401 return is_cpu_exclusive(cs); 2402 case FILE_MEM_EXCLUSIVE: 2403 return is_mem_exclusive(cs); 2404 case FILE_MEM_HARDWALL: 2405 return is_mem_hardwall(cs); 2406 case FILE_SCHED_LOAD_BALANCE: 2407 return is_sched_load_balance(cs); 2408 case FILE_MEMORY_MIGRATE: 2409 return is_memory_migrate(cs); 2410 case FILE_MEMORY_PRESSURE_ENABLED: 2411 return cpuset_memory_pressure_enabled; 2412 case FILE_MEMORY_PRESSURE: 2413 return fmeter_getrate(&cs->fmeter); 2414 case FILE_SPREAD_PAGE: 2415 return is_spread_page(cs); 2416 case FILE_SPREAD_SLAB: 2417 return is_spread_slab(cs); 2418 default: 2419 BUG(); 2420 } 2421 2422 /* Unreachable but makes gcc happy */ 2423 return 0; 2424 } 2425 2426 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) 2427 { 2428 struct cpuset *cs = css_cs(css); 2429 cpuset_filetype_t type = cft->private; 2430 switch (type) { 2431 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 2432 return cs->relax_domain_level; 2433 default: 2434 BUG(); 2435 } 2436 2437 /* Unrechable but makes gcc happy */ 2438 return 0; 2439 } 2440 2441 static int sched_partition_show(struct seq_file *seq, void *v) 2442 { 2443 struct cpuset *cs = css_cs(seq_css(seq)); 2444 2445 switch (cs->partition_root_state) { 2446 case PRS_ENABLED: 2447 seq_puts(seq, "root\n"); 2448 break; 2449 case PRS_DISABLED: 2450 seq_puts(seq, "member\n"); 2451 break; 2452 case PRS_ERROR: 2453 seq_puts(seq, "root invalid\n"); 2454 break; 2455 } 2456 return 0; 2457 } 2458 2459 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, 2460 size_t nbytes, loff_t off) 2461 { 2462 struct cpuset *cs = css_cs(of_css(of)); 2463 int val; 2464 int retval = -ENODEV; 2465 2466 buf = strstrip(buf); 2467 2468 /* 2469 * Convert "root" to ENABLED, and convert "member" to DISABLED. 2470 */ 2471 if (!strcmp(buf, "root")) 2472 val = PRS_ENABLED; 2473 else if (!strcmp(buf, "member")) 2474 val = PRS_DISABLED; 2475 else 2476 return -EINVAL; 2477 2478 css_get(&cs->css); 2479 mutex_lock(&cpuset_mutex); 2480 if (!is_cpuset_online(cs)) 2481 goto out_unlock; 2482 2483 retval = update_prstate(cs, val); 2484 out_unlock: 2485 mutex_unlock(&cpuset_mutex); 2486 css_put(&cs->css); 2487 return retval ?: nbytes; 2488 } 2489 2490 /* 2491 * for the common functions, 'private' gives the type of file 2492 */ 2493 2494 static struct cftype legacy_files[] = { 2495 { 2496 .name = "cpus", 2497 .seq_show = cpuset_common_seq_show, 2498 .write = cpuset_write_resmask, 2499 .max_write_len = (100U + 6 * NR_CPUS), 2500 .private = FILE_CPULIST, 2501 }, 2502 2503 { 2504 .name = "mems", 2505 .seq_show = cpuset_common_seq_show, 2506 .write = cpuset_write_resmask, 2507 .max_write_len = (100U + 6 * MAX_NUMNODES), 2508 .private = FILE_MEMLIST, 2509 }, 2510 2511 { 2512 .name = "effective_cpus", 2513 .seq_show = cpuset_common_seq_show, 2514 .private = FILE_EFFECTIVE_CPULIST, 2515 }, 2516 2517 { 2518 .name = "effective_mems", 2519 .seq_show = cpuset_common_seq_show, 2520 .private = FILE_EFFECTIVE_MEMLIST, 2521 }, 2522 2523 { 2524 .name = "cpu_exclusive", 2525 .read_u64 = cpuset_read_u64, 2526 .write_u64 = cpuset_write_u64, 2527 .private = FILE_CPU_EXCLUSIVE, 2528 }, 2529 2530 { 2531 .name = "mem_exclusive", 2532 .read_u64 = cpuset_read_u64, 2533 .write_u64 = cpuset_write_u64, 2534 .private = FILE_MEM_EXCLUSIVE, 2535 }, 2536 2537 { 2538 .name = "mem_hardwall", 2539 .read_u64 = cpuset_read_u64, 2540 .write_u64 = cpuset_write_u64, 2541 .private = FILE_MEM_HARDWALL, 2542 }, 2543 2544 { 2545 .name = "sched_load_balance", 2546 .read_u64 = cpuset_read_u64, 2547 .write_u64 = cpuset_write_u64, 2548 .private = FILE_SCHED_LOAD_BALANCE, 2549 }, 2550 2551 { 2552 .name = "sched_relax_domain_level", 2553 .read_s64 = cpuset_read_s64, 2554 .write_s64 = cpuset_write_s64, 2555 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, 2556 }, 2557 2558 { 2559 .name = "memory_migrate", 2560 .read_u64 = cpuset_read_u64, 2561 .write_u64 = cpuset_write_u64, 2562 .private = FILE_MEMORY_MIGRATE, 2563 }, 2564 2565 { 2566 .name = "memory_pressure", 2567 .read_u64 = cpuset_read_u64, 2568 .private = FILE_MEMORY_PRESSURE, 2569 }, 2570 2571 { 2572 .name = "memory_spread_page", 2573 .read_u64 = cpuset_read_u64, 2574 .write_u64 = cpuset_write_u64, 2575 .private = FILE_SPREAD_PAGE, 2576 }, 2577 2578 { 2579 .name = "memory_spread_slab", 2580 .read_u64 = cpuset_read_u64, 2581 .write_u64 = cpuset_write_u64, 2582 .private = FILE_SPREAD_SLAB, 2583 }, 2584 2585 { 2586 .name = "memory_pressure_enabled", 2587 .flags = CFTYPE_ONLY_ON_ROOT, 2588 .read_u64 = cpuset_read_u64, 2589 .write_u64 = cpuset_write_u64, 2590 .private = FILE_MEMORY_PRESSURE_ENABLED, 2591 }, 2592 2593 { } /* terminate */ 2594 }; 2595 2596 /* 2597 * This is currently a minimal set for the default hierarchy. It can be 2598 * expanded later on by migrating more features and control files from v1. 2599 */ 2600 static struct cftype dfl_files[] = { 2601 { 2602 .name = "cpus", 2603 .seq_show = cpuset_common_seq_show, 2604 .write = cpuset_write_resmask, 2605 .max_write_len = (100U + 6 * NR_CPUS), 2606 .private = FILE_CPULIST, 2607 .flags = CFTYPE_NOT_ON_ROOT, 2608 }, 2609 2610 { 2611 .name = "mems", 2612 .seq_show = cpuset_common_seq_show, 2613 .write = cpuset_write_resmask, 2614 .max_write_len = (100U + 6 * MAX_NUMNODES), 2615 .private = FILE_MEMLIST, 2616 .flags = CFTYPE_NOT_ON_ROOT, 2617 }, 2618 2619 { 2620 .name = "cpus.effective", 2621 .seq_show = cpuset_common_seq_show, 2622 .private = FILE_EFFECTIVE_CPULIST, 2623 }, 2624 2625 { 2626 .name = "mems.effective", 2627 .seq_show = cpuset_common_seq_show, 2628 .private = FILE_EFFECTIVE_MEMLIST, 2629 }, 2630 2631 { 2632 .name = "cpus.partition", 2633 .seq_show = sched_partition_show, 2634 .write = sched_partition_write, 2635 .private = FILE_PARTITION_ROOT, 2636 .flags = CFTYPE_NOT_ON_ROOT, 2637 }, 2638 2639 { 2640 .name = "cpus.subpartitions", 2641 .seq_show = cpuset_common_seq_show, 2642 .private = FILE_SUBPARTS_CPULIST, 2643 .flags = CFTYPE_DEBUG, 2644 }, 2645 2646 { } /* terminate */ 2647 }; 2648 2649 2650 /* 2651 * cpuset_css_alloc - allocate a cpuset css 2652 * cgrp: control group that the new cpuset will be part of 2653 */ 2654 2655 static struct cgroup_subsys_state * 2656 cpuset_css_alloc(struct cgroup_subsys_state *parent_css) 2657 { 2658 struct cpuset *cs; 2659 2660 if (!parent_css) 2661 return &top_cpuset.css; 2662 2663 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 2664 if (!cs) 2665 return ERR_PTR(-ENOMEM); 2666 2667 if (alloc_cpumasks(cs, NULL)) { 2668 kfree(cs); 2669 return ERR_PTR(-ENOMEM); 2670 } 2671 2672 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); 2673 nodes_clear(cs->mems_allowed); 2674 nodes_clear(cs->effective_mems); 2675 fmeter_init(&cs->fmeter); 2676 cs->relax_domain_level = -1; 2677 2678 return &cs->css; 2679 } 2680 2681 static int cpuset_css_online(struct cgroup_subsys_state *css) 2682 { 2683 struct cpuset *cs = css_cs(css); 2684 struct cpuset *parent = parent_cs(cs); 2685 struct cpuset *tmp_cs; 2686 struct cgroup_subsys_state *pos_css; 2687 2688 if (!parent) 2689 return 0; 2690 2691 mutex_lock(&cpuset_mutex); 2692 2693 set_bit(CS_ONLINE, &cs->flags); 2694 if (is_spread_page(parent)) 2695 set_bit(CS_SPREAD_PAGE, &cs->flags); 2696 if (is_spread_slab(parent)) 2697 set_bit(CS_SPREAD_SLAB, &cs->flags); 2698 2699 cpuset_inc(); 2700 2701 spin_lock_irq(&callback_lock); 2702 if (is_in_v2_mode()) { 2703 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 2704 cs->effective_mems = parent->effective_mems; 2705 cs->use_parent_ecpus = true; 2706 parent->child_ecpus_count++; 2707 } 2708 spin_unlock_irq(&callback_lock); 2709 2710 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 2711 goto out_unlock; 2712 2713 /* 2714 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is 2715 * set. This flag handling is implemented in cgroup core for 2716 * histrical reasons - the flag may be specified during mount. 2717 * 2718 * Currently, if any sibling cpusets have exclusive cpus or mem, we 2719 * refuse to clone the configuration - thereby refusing the task to 2720 * be entered, and as a result refusing the sys_unshare() or 2721 * clone() which initiated it. If this becomes a problem for some 2722 * users who wish to allow that scenario, then this could be 2723 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive 2724 * (and likewise for mems) to the new cgroup. 2725 */ 2726 rcu_read_lock(); 2727 cpuset_for_each_child(tmp_cs, pos_css, parent) { 2728 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { 2729 rcu_read_unlock(); 2730 goto out_unlock; 2731 } 2732 } 2733 rcu_read_unlock(); 2734 2735 spin_lock_irq(&callback_lock); 2736 cs->mems_allowed = parent->mems_allowed; 2737 cs->effective_mems = parent->mems_allowed; 2738 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 2739 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); 2740 spin_unlock_irq(&callback_lock); 2741 out_unlock: 2742 mutex_unlock(&cpuset_mutex); 2743 return 0; 2744 } 2745 2746 /* 2747 * If the cpuset being removed has its flag 'sched_load_balance' 2748 * enabled, then simulate turning sched_load_balance off, which 2749 * will call rebuild_sched_domains_locked(). That is not needed 2750 * in the default hierarchy where only changes in partition 2751 * will cause repartitioning. 2752 * 2753 * If the cpuset has the 'sched.partition' flag enabled, simulate 2754 * turning 'sched.partition" off. 2755 */ 2756 2757 static void cpuset_css_offline(struct cgroup_subsys_state *css) 2758 { 2759 struct cpuset *cs = css_cs(css); 2760 2761 mutex_lock(&cpuset_mutex); 2762 2763 if (is_partition_root(cs)) 2764 update_prstate(cs, 0); 2765 2766 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && 2767 is_sched_load_balance(cs)) 2768 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 2769 2770 if (cs->use_parent_ecpus) { 2771 struct cpuset *parent = parent_cs(cs); 2772 2773 cs->use_parent_ecpus = false; 2774 parent->child_ecpus_count--; 2775 } 2776 2777 cpuset_dec(); 2778 clear_bit(CS_ONLINE, &cs->flags); 2779 2780 mutex_unlock(&cpuset_mutex); 2781 } 2782 2783 static void cpuset_css_free(struct cgroup_subsys_state *css) 2784 { 2785 struct cpuset *cs = css_cs(css); 2786 2787 free_cpuset(cs); 2788 } 2789 2790 static void cpuset_bind(struct cgroup_subsys_state *root_css) 2791 { 2792 mutex_lock(&cpuset_mutex); 2793 spin_lock_irq(&callback_lock); 2794 2795 if (is_in_v2_mode()) { 2796 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 2797 top_cpuset.mems_allowed = node_possible_map; 2798 } else { 2799 cpumask_copy(top_cpuset.cpus_allowed, 2800 top_cpuset.effective_cpus); 2801 top_cpuset.mems_allowed = top_cpuset.effective_mems; 2802 } 2803 2804 spin_unlock_irq(&callback_lock); 2805 mutex_unlock(&cpuset_mutex); 2806 } 2807 2808 /* 2809 * Make sure the new task conform to the current state of its parent, 2810 * which could have been changed by cpuset just after it inherits the 2811 * state from the parent and before it sits on the cgroup's task list. 2812 */ 2813 static void cpuset_fork(struct task_struct *task) 2814 { 2815 if (task_css_is_root(task, cpuset_cgrp_id)) 2816 return; 2817 2818 set_cpus_allowed_ptr(task, ¤t->cpus_allowed); 2819 task->mems_allowed = current->mems_allowed; 2820 } 2821 2822 struct cgroup_subsys cpuset_cgrp_subsys = { 2823 .css_alloc = cpuset_css_alloc, 2824 .css_online = cpuset_css_online, 2825 .css_offline = cpuset_css_offline, 2826 .css_free = cpuset_css_free, 2827 .can_attach = cpuset_can_attach, 2828 .cancel_attach = cpuset_cancel_attach, 2829 .attach = cpuset_attach, 2830 .post_attach = cpuset_post_attach, 2831 .bind = cpuset_bind, 2832 .fork = cpuset_fork, 2833 .legacy_cftypes = legacy_files, 2834 .dfl_cftypes = dfl_files, 2835 .early_init = true, 2836 .threaded = true, 2837 }; 2838 2839 /** 2840 * cpuset_init - initialize cpusets at system boot 2841 * 2842 * Description: Initialize top_cpuset and the cpuset internal file system, 2843 **/ 2844 2845 int __init cpuset_init(void) 2846 { 2847 int err = 0; 2848 2849 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); 2850 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); 2851 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); 2852 2853 cpumask_setall(top_cpuset.cpus_allowed); 2854 nodes_setall(top_cpuset.mems_allowed); 2855 cpumask_setall(top_cpuset.effective_cpus); 2856 nodes_setall(top_cpuset.effective_mems); 2857 2858 fmeter_init(&top_cpuset.fmeter); 2859 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); 2860 top_cpuset.relax_domain_level = -1; 2861 2862 err = register_filesystem(&cpuset_fs_type); 2863 if (err < 0) 2864 return err; 2865 2866 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); 2867 2868 return 0; 2869 } 2870 2871 /* 2872 * If CPU and/or memory hotplug handlers, below, unplug any CPUs 2873 * or memory nodes, we need to walk over the cpuset hierarchy, 2874 * removing that CPU or node from all cpusets. If this removes the 2875 * last CPU or node from a cpuset, then move the tasks in the empty 2876 * cpuset to its next-highest non-empty parent. 2877 */ 2878 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) 2879 { 2880 struct cpuset *parent; 2881 2882 /* 2883 * Find its next-highest non-empty parent, (top cpuset 2884 * has online cpus, so can't be empty). 2885 */ 2886 parent = parent_cs(cs); 2887 while (cpumask_empty(parent->cpus_allowed) || 2888 nodes_empty(parent->mems_allowed)) 2889 parent = parent_cs(parent); 2890 2891 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 2892 pr_err("cpuset: failed to transfer tasks out of empty cpuset "); 2893 pr_cont_cgroup_name(cs->css.cgroup); 2894 pr_cont("\n"); 2895 } 2896 } 2897 2898 static void 2899 hotplug_update_tasks_legacy(struct cpuset *cs, 2900 struct cpumask *new_cpus, nodemask_t *new_mems, 2901 bool cpus_updated, bool mems_updated) 2902 { 2903 bool is_empty; 2904 2905 spin_lock_irq(&callback_lock); 2906 cpumask_copy(cs->cpus_allowed, new_cpus); 2907 cpumask_copy(cs->effective_cpus, new_cpus); 2908 cs->mems_allowed = *new_mems; 2909 cs->effective_mems = *new_mems; 2910 spin_unlock_irq(&callback_lock); 2911 2912 /* 2913 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 2914 * as the tasks will be migratecd to an ancestor. 2915 */ 2916 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) 2917 update_tasks_cpumask(cs); 2918 if (mems_updated && !nodes_empty(cs->mems_allowed)) 2919 update_tasks_nodemask(cs); 2920 2921 is_empty = cpumask_empty(cs->cpus_allowed) || 2922 nodes_empty(cs->mems_allowed); 2923 2924 mutex_unlock(&cpuset_mutex); 2925 2926 /* 2927 * Move tasks to the nearest ancestor with execution resources, 2928 * This is full cgroup operation which will also call back into 2929 * cpuset. Should be done outside any lock. 2930 */ 2931 if (is_empty) 2932 remove_tasks_in_empty_cpuset(cs); 2933 2934 mutex_lock(&cpuset_mutex); 2935 } 2936 2937 static void 2938 hotplug_update_tasks(struct cpuset *cs, 2939 struct cpumask *new_cpus, nodemask_t *new_mems, 2940 bool cpus_updated, bool mems_updated) 2941 { 2942 if (cpumask_empty(new_cpus)) 2943 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); 2944 if (nodes_empty(*new_mems)) 2945 *new_mems = parent_cs(cs)->effective_mems; 2946 2947 spin_lock_irq(&callback_lock); 2948 cpumask_copy(cs->effective_cpus, new_cpus); 2949 cs->effective_mems = *new_mems; 2950 spin_unlock_irq(&callback_lock); 2951 2952 if (cpus_updated) 2953 update_tasks_cpumask(cs); 2954 if (mems_updated) 2955 update_tasks_nodemask(cs); 2956 } 2957 2958 static bool force_rebuild; 2959 2960 void cpuset_force_rebuild(void) 2961 { 2962 force_rebuild = true; 2963 } 2964 2965 /** 2966 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug 2967 * @cs: cpuset in interest 2968 * @tmp: the tmpmasks structure pointer 2969 * 2970 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone 2971 * offline, update @cs accordingly. If @cs ends up with no CPU or memory, 2972 * all its tasks are moved to the nearest ancestor with both resources. 2973 */ 2974 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) 2975 { 2976 static cpumask_t new_cpus; 2977 static nodemask_t new_mems; 2978 bool cpus_updated; 2979 bool mems_updated; 2980 struct cpuset *parent; 2981 retry: 2982 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); 2983 2984 mutex_lock(&cpuset_mutex); 2985 2986 /* 2987 * We have raced with task attaching. We wait until attaching 2988 * is finished, so we won't attach a task to an empty cpuset. 2989 */ 2990 if (cs->attach_in_progress) { 2991 mutex_unlock(&cpuset_mutex); 2992 goto retry; 2993 } 2994 2995 parent = parent_cs(cs); 2996 compute_effective_cpumask(&new_cpus, cs, parent); 2997 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); 2998 2999 if (cs->nr_subparts_cpus) 3000 /* 3001 * Make sure that CPUs allocated to child partitions 3002 * do not show up in effective_cpus. 3003 */ 3004 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); 3005 3006 if (!tmp || !cs->partition_root_state) 3007 goto update_tasks; 3008 3009 /* 3010 * In the unlikely event that a partition root has empty 3011 * effective_cpus or its parent becomes erroneous, we have to 3012 * transition it to the erroneous state. 3013 */ 3014 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || 3015 (parent->partition_root_state == PRS_ERROR))) { 3016 if (cs->nr_subparts_cpus) { 3017 cs->nr_subparts_cpus = 0; 3018 cpumask_clear(cs->subparts_cpus); 3019 compute_effective_cpumask(&new_cpus, cs, parent); 3020 } 3021 3022 /* 3023 * If the effective_cpus is empty because the child 3024 * partitions take away all the CPUs, we can keep 3025 * the current partition and let the child partitions 3026 * fight for available CPUs. 3027 */ 3028 if ((parent->partition_root_state == PRS_ERROR) || 3029 cpumask_empty(&new_cpus)) { 3030 update_parent_subparts_cpumask(cs, partcmd_disable, 3031 NULL, tmp); 3032 cs->partition_root_state = PRS_ERROR; 3033 } 3034 cpuset_force_rebuild(); 3035 } 3036 3037 /* 3038 * On the other hand, an erroneous partition root may be transitioned 3039 * back to a regular one or a partition root with no CPU allocated 3040 * from the parent may change to erroneous. 3041 */ 3042 if (is_partition_root(parent) && 3043 ((cs->partition_root_state == PRS_ERROR) || 3044 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) && 3045 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) 3046 cpuset_force_rebuild(); 3047 3048 update_tasks: 3049 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); 3050 mems_updated = !nodes_equal(new_mems, cs->effective_mems); 3051 3052 if (is_in_v2_mode()) 3053 hotplug_update_tasks(cs, &new_cpus, &new_mems, 3054 cpus_updated, mems_updated); 3055 else 3056 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, 3057 cpus_updated, mems_updated); 3058 3059 mutex_unlock(&cpuset_mutex); 3060 } 3061 3062 /** 3063 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset 3064 * 3065 * This function is called after either CPU or memory configuration has 3066 * changed and updates cpuset accordingly. The top_cpuset is always 3067 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in 3068 * order to make cpusets transparent (of no affect) on systems that are 3069 * actively using CPU hotplug but making no active use of cpusets. 3070 * 3071 * Non-root cpusets are only affected by offlining. If any CPUs or memory 3072 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on 3073 * all descendants. 3074 * 3075 * Note that CPU offlining during suspend is ignored. We don't modify 3076 * cpusets across suspend/resume cycles at all. 3077 */ 3078 static void cpuset_hotplug_workfn(struct work_struct *work) 3079 { 3080 static cpumask_t new_cpus; 3081 static nodemask_t new_mems; 3082 bool cpus_updated, mems_updated; 3083 bool on_dfl = is_in_v2_mode(); 3084 struct tmpmasks tmp, *ptmp = NULL; 3085 3086 if (on_dfl && !alloc_cpumasks(NULL, &tmp)) 3087 ptmp = &tmp; 3088 3089 mutex_lock(&cpuset_mutex); 3090 3091 /* fetch the available cpus/mems and find out which changed how */ 3092 cpumask_copy(&new_cpus, cpu_active_mask); 3093 new_mems = node_states[N_MEMORY]; 3094 3095 /* 3096 * If subparts_cpus is populated, it is likely that the check below 3097 * will produce a false positive on cpus_updated when the cpu list 3098 * isn't changed. It is extra work, but it is better to be safe. 3099 */ 3100 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus); 3101 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); 3102 3103 /* synchronize cpus_allowed to cpu_active_mask */ 3104 if (cpus_updated) { 3105 spin_lock_irq(&callback_lock); 3106 if (!on_dfl) 3107 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 3108 /* 3109 * Make sure that CPUs allocated to child partitions 3110 * do not show up in effective_cpus. If no CPU is left, 3111 * we clear the subparts_cpus & let the child partitions 3112 * fight for the CPUs again. 3113 */ 3114 if (top_cpuset.nr_subparts_cpus) { 3115 if (cpumask_subset(&new_cpus, 3116 top_cpuset.subparts_cpus)) { 3117 top_cpuset.nr_subparts_cpus = 0; 3118 cpumask_clear(top_cpuset.subparts_cpus); 3119 } else { 3120 cpumask_andnot(&new_cpus, &new_cpus, 3121 top_cpuset.subparts_cpus); 3122 } 3123 } 3124 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 3125 spin_unlock_irq(&callback_lock); 3126 /* we don't mess with cpumasks of tasks in top_cpuset */ 3127 } 3128 3129 /* synchronize mems_allowed to N_MEMORY */ 3130 if (mems_updated) { 3131 spin_lock_irq(&callback_lock); 3132 if (!on_dfl) 3133 top_cpuset.mems_allowed = new_mems; 3134 top_cpuset.effective_mems = new_mems; 3135 spin_unlock_irq(&callback_lock); 3136 update_tasks_nodemask(&top_cpuset); 3137 } 3138 3139 mutex_unlock(&cpuset_mutex); 3140 3141 /* if cpus or mems changed, we need to propagate to descendants */ 3142 if (cpus_updated || mems_updated) { 3143 struct cpuset *cs; 3144 struct cgroup_subsys_state *pos_css; 3145 3146 rcu_read_lock(); 3147 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 3148 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) 3149 continue; 3150 rcu_read_unlock(); 3151 3152 cpuset_hotplug_update_tasks(cs, ptmp); 3153 3154 rcu_read_lock(); 3155 css_put(&cs->css); 3156 } 3157 rcu_read_unlock(); 3158 } 3159 3160 /* rebuild sched domains if cpus_allowed has changed */ 3161 if (cpus_updated || force_rebuild) { 3162 force_rebuild = false; 3163 rebuild_sched_domains(); 3164 } 3165 3166 free_cpumasks(NULL, ptmp); 3167 } 3168 3169 void cpuset_update_active_cpus(void) 3170 { 3171 /* 3172 * We're inside cpu hotplug critical region which usually nests 3173 * inside cgroup synchronization. Bounce actual hotplug processing 3174 * to a work item to avoid reverse locking order. 3175 */ 3176 schedule_work(&cpuset_hotplug_work); 3177 } 3178 3179 void cpuset_wait_for_hotplug(void) 3180 { 3181 flush_work(&cpuset_hotplug_work); 3182 } 3183 3184 /* 3185 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. 3186 * Call this routine anytime after node_states[N_MEMORY] changes. 3187 * See cpuset_update_active_cpus() for CPU hotplug handling. 3188 */ 3189 static int cpuset_track_online_nodes(struct notifier_block *self, 3190 unsigned long action, void *arg) 3191 { 3192 schedule_work(&cpuset_hotplug_work); 3193 return NOTIFY_OK; 3194 } 3195 3196 static struct notifier_block cpuset_track_online_nodes_nb = { 3197 .notifier_call = cpuset_track_online_nodes, 3198 .priority = 10, /* ??! */ 3199 }; 3200 3201 /** 3202 * cpuset_init_smp - initialize cpus_allowed 3203 * 3204 * Description: Finish top cpuset after cpu, node maps are initialized 3205 */ 3206 void __init cpuset_init_smp(void) 3207 { 3208 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); 3209 top_cpuset.mems_allowed = node_states[N_MEMORY]; 3210 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; 3211 3212 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); 3213 top_cpuset.effective_mems = node_states[N_MEMORY]; 3214 3215 register_hotmemory_notifier(&cpuset_track_online_nodes_nb); 3216 3217 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); 3218 BUG_ON(!cpuset_migrate_mm_wq); 3219 } 3220 3221 /** 3222 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 3223 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 3224 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. 3225 * 3226 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset 3227 * attached to the specified @tsk. Guaranteed to return some non-empty 3228 * subset of cpu_online_mask, even if this means going outside the 3229 * tasks cpuset. 3230 **/ 3231 3232 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 3233 { 3234 unsigned long flags; 3235 3236 spin_lock_irqsave(&callback_lock, flags); 3237 rcu_read_lock(); 3238 guarantee_online_cpus(task_cs(tsk), pmask); 3239 rcu_read_unlock(); 3240 spin_unlock_irqrestore(&callback_lock, flags); 3241 } 3242 3243 void cpuset_cpus_allowed_fallback(struct task_struct *tsk) 3244 { 3245 rcu_read_lock(); 3246 do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus); 3247 rcu_read_unlock(); 3248 3249 /* 3250 * We own tsk->cpus_allowed, nobody can change it under us. 3251 * 3252 * But we used cs && cs->cpus_allowed lockless and thus can 3253 * race with cgroup_attach_task() or update_cpumask() and get 3254 * the wrong tsk->cpus_allowed. However, both cases imply the 3255 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() 3256 * which takes task_rq_lock(). 3257 * 3258 * If we are called after it dropped the lock we must see all 3259 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary 3260 * set any mask even if it is not right from task_cs() pov, 3261 * the pending set_cpus_allowed_ptr() will fix things. 3262 * 3263 * select_fallback_rq() will fix things ups and set cpu_possible_mask 3264 * if required. 3265 */ 3266 } 3267 3268 void __init cpuset_init_current_mems_allowed(void) 3269 { 3270 nodes_setall(current->mems_allowed); 3271 } 3272 3273 /** 3274 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. 3275 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. 3276 * 3277 * Description: Returns the nodemask_t mems_allowed of the cpuset 3278 * attached to the specified @tsk. Guaranteed to return some non-empty 3279 * subset of node_states[N_MEMORY], even if this means going outside the 3280 * tasks cpuset. 3281 **/ 3282 3283 nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 3284 { 3285 nodemask_t mask; 3286 unsigned long flags; 3287 3288 spin_lock_irqsave(&callback_lock, flags); 3289 rcu_read_lock(); 3290 guarantee_online_mems(task_cs(tsk), &mask); 3291 rcu_read_unlock(); 3292 spin_unlock_irqrestore(&callback_lock, flags); 3293 3294 return mask; 3295 } 3296 3297 /** 3298 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed 3299 * @nodemask: the nodemask to be checked 3300 * 3301 * Are any of the nodes in the nodemask allowed in current->mems_allowed? 3302 */ 3303 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 3304 { 3305 return nodes_intersects(*nodemask, current->mems_allowed); 3306 } 3307 3308 /* 3309 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 3310 * mem_hardwall ancestor to the specified cpuset. Call holding 3311 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall 3312 * (an unusual configuration), then returns the root cpuset. 3313 */ 3314 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 3315 { 3316 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) 3317 cs = parent_cs(cs); 3318 return cs; 3319 } 3320 3321 /** 3322 * cpuset_node_allowed - Can we allocate on a memory node? 3323 * @node: is this an allowed node? 3324 * @gfp_mask: memory allocation flags 3325 * 3326 * If we're in interrupt, yes, we can always allocate. If @node is set in 3327 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this 3328 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, 3329 * yes. If current has access to memory reserves as an oom victim, yes. 3330 * Otherwise, no. 3331 * 3332 * GFP_USER allocations are marked with the __GFP_HARDWALL bit, 3333 * and do not allow allocations outside the current tasks cpuset 3334 * unless the task has been OOM killed. 3335 * GFP_KERNEL allocations are not so marked, so can escape to the 3336 * nearest enclosing hardwalled ancestor cpuset. 3337 * 3338 * Scanning up parent cpusets requires callback_lock. The 3339 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 3340 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 3341 * current tasks mems_allowed came up empty on the first pass over 3342 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 3343 * cpuset are short of memory, might require taking the callback_lock. 3344 * 3345 * The first call here from mm/page_alloc:get_page_from_freelist() 3346 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 3347 * so no allocation on a node outside the cpuset is allowed (unless 3348 * in interrupt, of course). 3349 * 3350 * The second pass through get_page_from_freelist() doesn't even call 3351 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() 3352 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set 3353 * in alloc_flags. That logic and the checks below have the combined 3354 * affect that: 3355 * in_interrupt - any node ok (current task context irrelevant) 3356 * GFP_ATOMIC - any node ok 3357 * tsk_is_oom_victim - any node ok 3358 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 3359 * GFP_USER - only nodes in current tasks mems allowed ok. 3360 */ 3361 bool __cpuset_node_allowed(int node, gfp_t gfp_mask) 3362 { 3363 struct cpuset *cs; /* current cpuset ancestors */ 3364 int allowed; /* is allocation in zone z allowed? */ 3365 unsigned long flags; 3366 3367 if (in_interrupt()) 3368 return true; 3369 if (node_isset(node, current->mems_allowed)) 3370 return true; 3371 /* 3372 * Allow tasks that have access to memory reserves because they have 3373 * been OOM killed to get memory anywhere. 3374 */ 3375 if (unlikely(tsk_is_oom_victim(current))) 3376 return true; 3377 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ 3378 return false; 3379 3380 if (current->flags & PF_EXITING) /* Let dying task have memory */ 3381 return true; 3382 3383 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 3384 spin_lock_irqsave(&callback_lock, flags); 3385 3386 rcu_read_lock(); 3387 cs = nearest_hardwall_ancestor(task_cs(current)); 3388 allowed = node_isset(node, cs->mems_allowed); 3389 rcu_read_unlock(); 3390 3391 spin_unlock_irqrestore(&callback_lock, flags); 3392 return allowed; 3393 } 3394 3395 /** 3396 * cpuset_mem_spread_node() - On which node to begin search for a file page 3397 * cpuset_slab_spread_node() - On which node to begin search for a slab page 3398 * 3399 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for 3400 * tasks in a cpuset with is_spread_page or is_spread_slab set), 3401 * and if the memory allocation used cpuset_mem_spread_node() 3402 * to determine on which node to start looking, as it will for 3403 * certain page cache or slab cache pages such as used for file 3404 * system buffers and inode caches, then instead of starting on the 3405 * local node to look for a free page, rather spread the starting 3406 * node around the tasks mems_allowed nodes. 3407 * 3408 * We don't have to worry about the returned node being offline 3409 * because "it can't happen", and even if it did, it would be ok. 3410 * 3411 * The routines calling guarantee_online_mems() are careful to 3412 * only set nodes in task->mems_allowed that are online. So it 3413 * should not be possible for the following code to return an 3414 * offline node. But if it did, that would be ok, as this routine 3415 * is not returning the node where the allocation must be, only 3416 * the node where the search should start. The zonelist passed to 3417 * __alloc_pages() will include all nodes. If the slab allocator 3418 * is passed an offline node, it will fall back to the local node. 3419 * See kmem_cache_alloc_node(). 3420 */ 3421 3422 static int cpuset_spread_node(int *rotor) 3423 { 3424 return *rotor = next_node_in(*rotor, current->mems_allowed); 3425 } 3426 3427 int cpuset_mem_spread_node(void) 3428 { 3429 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) 3430 current->cpuset_mem_spread_rotor = 3431 node_random(¤t->mems_allowed); 3432 3433 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); 3434 } 3435 3436 int cpuset_slab_spread_node(void) 3437 { 3438 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) 3439 current->cpuset_slab_spread_rotor = 3440 node_random(¤t->mems_allowed); 3441 3442 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); 3443 } 3444 3445 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); 3446 3447 /** 3448 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? 3449 * @tsk1: pointer to task_struct of some task. 3450 * @tsk2: pointer to task_struct of some other task. 3451 * 3452 * Description: Return true if @tsk1's mems_allowed intersects the 3453 * mems_allowed of @tsk2. Used by the OOM killer to determine if 3454 * one of the task's memory usage might impact the memory available 3455 * to the other. 3456 **/ 3457 3458 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 3459 const struct task_struct *tsk2) 3460 { 3461 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); 3462 } 3463 3464 /** 3465 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed 3466 * 3467 * Description: Prints current's name, cpuset name, and cached copy of its 3468 * mems_allowed to the kernel log. 3469 */ 3470 void cpuset_print_current_mems_allowed(void) 3471 { 3472 struct cgroup *cgrp; 3473 3474 rcu_read_lock(); 3475 3476 cgrp = task_cs(current)->css.cgroup; 3477 pr_cont(",cpuset="); 3478 pr_cont_cgroup_name(cgrp); 3479 pr_cont(",mems_allowed=%*pbl", 3480 nodemask_pr_args(¤t->mems_allowed)); 3481 3482 rcu_read_unlock(); 3483 } 3484 3485 /* 3486 * Collection of memory_pressure is suppressed unless 3487 * this flag is enabled by writing "1" to the special 3488 * cpuset file 'memory_pressure_enabled' in the root cpuset. 3489 */ 3490 3491 int cpuset_memory_pressure_enabled __read_mostly; 3492 3493 /** 3494 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. 3495 * 3496 * Keep a running average of the rate of synchronous (direct) 3497 * page reclaim efforts initiated by tasks in each cpuset. 3498 * 3499 * This represents the rate at which some task in the cpuset 3500 * ran low on memory on all nodes it was allowed to use, and 3501 * had to enter the kernels page reclaim code in an effort to 3502 * create more free memory by tossing clean pages or swapping 3503 * or writing dirty pages. 3504 * 3505 * Display to user space in the per-cpuset read-only file 3506 * "memory_pressure". Value displayed is an integer 3507 * representing the recent rate of entry into the synchronous 3508 * (direct) page reclaim by any task attached to the cpuset. 3509 **/ 3510 3511 void __cpuset_memory_pressure_bump(void) 3512 { 3513 rcu_read_lock(); 3514 fmeter_markevent(&task_cs(current)->fmeter); 3515 rcu_read_unlock(); 3516 } 3517 3518 #ifdef CONFIG_PROC_PID_CPUSET 3519 /* 3520 * proc_cpuset_show() 3521 * - Print tasks cpuset path into seq_file. 3522 * - Used for /proc/<pid>/cpuset. 3523 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it 3524 * doesn't really matter if tsk->cpuset changes after we read it, 3525 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 3526 * anyway. 3527 */ 3528 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 3529 struct pid *pid, struct task_struct *tsk) 3530 { 3531 char *buf; 3532 struct cgroup_subsys_state *css; 3533 int retval; 3534 3535 retval = -ENOMEM; 3536 buf = kmalloc(PATH_MAX, GFP_KERNEL); 3537 if (!buf) 3538 goto out; 3539 3540 css = task_get_css(tsk, cpuset_cgrp_id); 3541 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX, 3542 current->nsproxy->cgroup_ns); 3543 css_put(css); 3544 if (retval >= PATH_MAX) 3545 retval = -ENAMETOOLONG; 3546 if (retval < 0) 3547 goto out_free; 3548 seq_puts(m, buf); 3549 seq_putc(m, '\n'); 3550 retval = 0; 3551 out_free: 3552 kfree(buf); 3553 out: 3554 return retval; 3555 } 3556 #endif /* CONFIG_PROC_PID_CPUSET */ 3557 3558 /* Display task mems_allowed in /proc/<pid>/status file. */ 3559 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 3560 { 3561 seq_printf(m, "Mems_allowed:\t%*pb\n", 3562 nodemask_pr_args(&task->mems_allowed)); 3563 seq_printf(m, "Mems_allowed_list:\t%*pbl\n", 3564 nodemask_pr_args(&task->mems_allowed)); 3565 } 3566