Lines Matching +full:4 +full:- +full:pole
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
24 #include "cgroup-internal.h"
53 * node binding, add this key to provide a quick low-cost judgment
99 * The user-configured masks can only be changed by writing to
113 * The user-configured masks are always the same with effective masks.
116 /* user-configured CPUs and Memory Nodes allow to tasks */
125 * CPUs allocated to child sub-partitions (default hierarchy only)
126 * - CPUs granted by the parent = effective_cpus U subparts_cpus
127 * - effective_cpus and subparts_cpus are mutually exclusive.
137 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
138 * - A new cpuset's old_mems_allowed is initialized when some
140 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
150 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
168 * use_parent_ecpus - set if using parent's effective_cpus
169 * child_ecpus_count - # of children with use_parent_ecpus set
192 * 0 - member (not a partition root)
193 * 1 - partition root
194 * 2 - partition root without load balancing (isolated)
195 * -1 - invalid partition root
196 * -2 - invalid isolated partition root
201 #define PRS_INVALID_ROOT -1
202 #define PRS_INVALID_ISOLATED -2
231 return css_cs(cs->css.parent); in parent_cs()
238 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
245 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
278 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
293 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
298 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
303 return cs->partition_root_state > 0; in is_partition_valid()
308 return cs->partition_root_state < 0; in is_partition_invalid()
317 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
325 if (old_prs == cs->partition_root_state) in notify_partition_change()
327 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
331 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
341 * cpuset_for_each_child - traverse online children of a cpuset
350 css_for_each_child((pos_css), &(parent_cs)->css) \
354 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
365 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
369 * There are two global locks guarding cpuset structures - cpuset_mutex and
375 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
392 * If a task is only holding callback_lock, then it has read-only
400 * small pieces of code, such as when reading out possibly multi-word
453 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); in is_in_v2_mode()
457 * partition_is_populated - check if partition has tasks
463 * be non-NULL when this cpuset is going to become a partition itself.
471 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
473 if (!excluded_child && !cs->nr_subparts_cpus) in partition_is_populated()
474 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
482 if (cgroup_is_populated(child->css.cgroup)) { in partition_is_populated()
497 * One way or another, we guarantee to return some non-empty subset
514 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
527 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
539 * One way or another, we guarantee to return some non-empty subset
546 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
548 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
575 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
584 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset()
585 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset()
591 * alloc_cpumasks - allocate three cpumasks for cpuset
594 * Return: 0 if successful, -ENOMEM otherwise.
596 * Only one of the two input arguments should be non-NULL.
603 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
604 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
605 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
607 pmask1 = &tmp->new_cpus; in alloc_cpumasks()
608 pmask2 = &tmp->addmask; in alloc_cpumasks()
609 pmask3 = &tmp->delmask; in alloc_cpumasks()
613 return -ENOMEM; in alloc_cpumasks()
627 return -ENOMEM; in alloc_cpumasks()
631 * free_cpumasks - free cpumasks in a tmpmasks structure
638 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
639 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
640 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
643 free_cpumask_var(tmp->new_cpus); in free_cpumasks()
644 free_cpumask_var(tmp->addmask); in free_cpumasks()
645 free_cpumask_var(tmp->delmask); in free_cpumasks()
650 * alloc_trial_cpuset - allocate a trial cpuset
666 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
667 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
672 * free_cpuset - free the cpuset
682 * validate_change_legacy() - Validate conditions specific to legacy (v1)
694 ret = -EBUSY; in validate_change_legacy()
700 ret = -EACCES; in validate_change_legacy()
711 * validate_change() - Used to validate that any proposed cpuset change
719 * 'cur' is the address of an actual, in-use cpuset. Operations
727 * Return 0 if valid, -errno if not.
750 * Cpusets with tasks - existing or newly being attached - can't in validate_change()
753 ret = -ENOSPC; in validate_change()
754 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { in validate_change()
755 if (!cpumask_empty(cur->cpus_allowed) && in validate_change()
756 cpumask_empty(trial->cpus_allowed)) in validate_change()
758 if (!nodes_empty(cur->mems_allowed) && in validate_change()
759 nodes_empty(trial->mems_allowed)) in validate_change()
767 ret = -EBUSY; in validate_change()
769 !cpuset_cpumask_can_shrink(cur->cpus_allowed, in validate_change()
770 trial->cpus_allowed)) in validate_change()
777 ret = -EINVAL; in validate_change()
781 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed)) in validate_change()
785 nodes_intersects(trial->mems_allowed, c->mems_allowed)) in validate_change()
802 return cpumask_intersects(a->effective_cpus, b->effective_cpus); in cpusets_overlap()
808 if (dattr->relax_domain_level < c->relax_domain_level) in update_domain_attr()
809 dattr->relax_domain_level = c->relax_domain_level; in update_domain_attr()
822 if (cpumask_empty(cp->cpus_allowed)) { in update_domain_attr_tree()
836 /* jump label reference count + the top-level cpuset */ in nr_cpusets()
844 * A 'partial partition' is a set of non-overlapping subsets whose
851 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
862 * cp - cpuset pointer, used (together with pos_css) to perform a
863 * top-down scan of all cpusets. For our purposes, rebuilding
866 * csa - (for CpuSet Array) Array of pointers to all the cpusets
873 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
896 struct cpuset *cp; /* top-down scan of cpusets */ in generate_sched_domains()
948 * If root is load-balancing, we can skip @cp if it in generate_sched_domains()
951 if (!cpumask_empty(cp->cpus_allowed) && in generate_sched_domains()
953 cpumask_intersects(cp->cpus_allowed, in generate_sched_domains()
958 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus)) in generate_sched_domains()
962 !cpumask_empty(cp->effective_cpus)) in generate_sched_domains()
972 csa[i]->pn = i; in generate_sched_domains()
979 int apn = a->pn; in generate_sched_domains()
983 int bpn = b->pn; in generate_sched_domains()
989 if (c->pn == bpn) in generate_sched_domains()
990 c->pn = apn; in generate_sched_domains()
992 ndoms--; /* one less element */ in generate_sched_domains()
1016 int apn = a->pn; in generate_sched_domains()
1030 warnings--; in generate_sched_domains()
1041 if (apn == b->pn) { in generate_sched_domains()
1042 cpumask_or(dp, dp, b->effective_cpus); in generate_sched_domains()
1048 b->pn = -1; in generate_sched_domains()
1075 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
1078 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
1105 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
1110 css_get(&cs->css); in dl_rebuild_rd_accounting()
1117 css_put(&cs->css); in dl_rebuild_rd_accounting()
1135 * If the flag 'sched_load_balance' of any cpuset with non-empty
1137 * which has that flag enabled, or if any cpuset with a non-empty
1179 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1210 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1226 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1236 if (task->flags & PF_NO_SETAFFINITY) in update_tasks_cpumask()
1238 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus); in update_tasks_cpumask()
1240 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in update_tasks_cpumask()
1248 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1261 if (parent->nr_subparts_cpus && is_partition_valid(cs)) { in compute_effective_cpumask()
1262 cpumask_or(new_cpus, parent->effective_cpus, in compute_effective_cpumask()
1263 parent->subparts_cpus); in compute_effective_cpumask()
1264 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1267 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1314 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1330 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1332 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1340 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1347 * For partcmd_enable, the cpuset is being transformed from a non-partition
1354 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1398 if (!newmask && cpumask_empty(cs->cpus_allowed)) in update_parent_subparts_cpumask()
1406 old_prs = new_prs = cs->partition_root_state; in update_parent_subparts_cpumask()
1412 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) in update_parent_subparts_cpumask()
1419 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) && in update_parent_subparts_cpumask()
1423 cpumask_copy(tmp->addmask, cs->cpus_allowed); in update_parent_subparts_cpumask()
1431 cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1432 parent->subparts_cpus); in update_parent_subparts_cpumask()
1441 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1442 parent->subparts_cpus); in update_parent_subparts_cpumask()
1444 new_prs = -old_prs; in update_parent_subparts_cpumask()
1453 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus in update_parent_subparts_cpumask()
1454 * addmask = newmask & parent->cpus_allowed in update_parent_subparts_cpumask()
1455 * & ~parent->subparts_cpus in update_parent_subparts_cpumask()
1457 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1458 deleting = cpumask_and(tmp->delmask, tmp->delmask, in update_parent_subparts_cpumask()
1459 parent->subparts_cpus); in update_parent_subparts_cpumask()
1461 cpumask_and(tmp->addmask, newmask, parent->cpus_allowed); in update_parent_subparts_cpumask()
1462 adding = cpumask_andnot(tmp->addmask, tmp->addmask, in update_parent_subparts_cpumask()
1463 parent->subparts_cpus); in update_parent_subparts_cpumask()
1474 cpumask_subset(parent->effective_cpus, tmp->addmask) && in update_parent_subparts_cpumask()
1475 !cpumask_intersects(tmp->delmask, cpu_active_mask) && in update_parent_subparts_cpumask()
1479 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1480 parent->subparts_cpus); in update_parent_subparts_cpumask()
1486 * delmask = cpus_allowed & parent->subparts_cpus in update_parent_subparts_cpumask()
1487 * addmask = cpus_allowed & parent->cpus_allowed in update_parent_subparts_cpumask()
1488 * & ~parent->subparts_cpus in update_parent_subparts_cpumask()
1501 cpumask_and(tmp->addmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1502 parent->cpus_allowed); in update_parent_subparts_cpumask()
1503 adding = cpumask_andnot(tmp->addmask, tmp->addmask, in update_parent_subparts_cpumask()
1504 parent->subparts_cpus); in update_parent_subparts_cpumask()
1506 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || in update_parent_subparts_cpumask()
1508 cpumask_subset(parent->effective_cpus, tmp->addmask) && in update_parent_subparts_cpumask()
1515 parent->nr_subparts_cpus) in update_parent_subparts_cpumask()
1516 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1517 parent->subparts_cpus); in update_parent_subparts_cpumask()
1520 WRITE_ONCE(cs->prs_err, part_error); in update_parent_subparts_cpumask()
1527 switch (cs->partition_root_state) { in update_parent_subparts_cpumask()
1531 new_prs = -old_prs; in update_parent_subparts_cpumask()
1536 new_prs = -old_prs; in update_parent_subparts_cpumask()
1562 cpumask_or(parent->subparts_cpus, in update_parent_subparts_cpumask()
1563 parent->subparts_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1564 cpumask_andnot(parent->effective_cpus, in update_parent_subparts_cpumask()
1565 parent->effective_cpus, tmp->addmask); in update_parent_subparts_cpumask()
1568 cpumask_andnot(parent->subparts_cpus, in update_parent_subparts_cpumask()
1569 parent->subparts_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1573 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask); in update_parent_subparts_cpumask()
1574 cpumask_or(parent->effective_cpus, in update_parent_subparts_cpumask()
1575 parent->effective_cpus, tmp->delmask); in update_parent_subparts_cpumask()
1578 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus); in update_parent_subparts_cpumask()
1581 cs->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1586 update_tasks_cpumask(parent, tmp->addmask); in update_parent_subparts_cpumask()
1587 if (parent->child_ecpus_count) in update_parent_subparts_cpumask()
1613 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1638 compute_effective_cpumask(tmp->new_cpus, cp, parent); in update_cpumasks_hier()
1646 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) { in update_cpumasks_hier()
1648 cpumask_equal(cp->cpus_allowed, cp->subparts_cpus)) in update_cpumasks_hier()
1651 cpumask_copy(tmp->new_cpus, parent->effective_cpus); in update_cpumasks_hier()
1652 if (!cp->use_parent_ecpus) { in update_cpumasks_hier()
1653 cp->use_parent_ecpus = true; in update_cpumasks_hier()
1654 parent->child_ecpus_count++; in update_cpumasks_hier()
1656 } else if (cp->use_parent_ecpus) { in update_cpumasks_hier()
1657 cp->use_parent_ecpus = false; in update_cpumasks_hier()
1658 WARN_ON_ONCE(!parent->child_ecpus_count); in update_cpumasks_hier()
1659 parent->child_ecpus_count--; in update_cpumasks_hier()
1667 * 4) for v2 load balance state same as its parent. in update_cpumasks_hier()
1669 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) && in update_cpumasks_hier()
1670 cpumask_equal(tmp->new_cpus, cp->effective_cpus) && in update_cpumasks_hier()
1684 old_prs = new_prs = cp->partition_root_state; in update_cpumasks_hier()
1686 switch (parent->partition_root_state) { in update_cpumasks_hier()
1699 new_prs = -cp->partition_root_state; in update_cpumasks_hier()
1700 WRITE_ONCE(cp->prs_err, in update_cpumasks_hier()
1707 if (!css_tryget_online(&cp->css)) in update_cpumasks_hier()
1718 new_prs = cp->partition_root_state; in update_cpumasks_hier()
1723 if (cp->nr_subparts_cpus && !is_partition_valid(cp)) { in update_cpumasks_hier()
1727 cpumask_or(tmp->new_cpus, tmp->new_cpus, in update_cpumasks_hier()
1728 cp->subparts_cpus); in update_cpumasks_hier()
1729 cpumask_and(tmp->new_cpus, tmp->new_cpus, in update_cpumasks_hier()
1731 cp->nr_subparts_cpus = 0; in update_cpumasks_hier()
1732 cpumask_clear(cp->subparts_cpus); in update_cpumasks_hier()
1735 cpumask_copy(cp->effective_cpus, tmp->new_cpus); in update_cpumasks_hier()
1736 if (cp->nr_subparts_cpus) { in update_cpumasks_hier()
1741 cpumask_andnot(cp->effective_cpus, cp->effective_cpus, in update_cpumasks_hier()
1742 cp->subparts_cpus); in update_cpumasks_hier()
1745 cp->partition_root_state = new_prs; in update_cpumasks_hier()
1751 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); in update_cpumasks_hier()
1753 update_tasks_cpumask(cp, tmp->new_cpus); in update_cpumasks_hier()
1764 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
1766 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); in update_cpumasks_hier()
1770 * On legacy hierarchy, if the effective cpumask of any non- in update_cpumasks_hier()
1775 if (!cpumask_empty(cp->cpus_allowed) && in update_cpumasks_hier()
1782 css_put(&cp->css); in update_cpumasks_hier()
1791 * update_sibling_cpumasks - Update siblings cpumasks
1818 if (!sibling->use_parent_ecpus) in update_sibling_cpumasks()
1820 if (!css_tryget_online(&sibling->css)) in update_sibling_cpumasks()
1826 css_put(&sibling->css); in update_sibling_cpumasks()
1832 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1843 int old_prs = cs->partition_root_state; in update_cpumask()
1845 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ in update_cpumask()
1847 return -EACCES; in update_cpumask()
1856 cpumask_clear(trialcs->cpus_allowed); in update_cpumask()
1858 retval = cpulist_parse(buf, trialcs->cpus_allowed); in update_cpumask()
1862 if (!cpumask_subset(trialcs->cpus_allowed, in update_cpumask()
1864 return -EINVAL; in update_cpumask()
1868 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1872 return -ENOMEM; in update_cpumask()
1876 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { in update_cpumask()
1881 * The -EINVAL error code indicates that partition sibling in update_cpumask()
1892 cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) { in update_cpumask()
1903 if (cs->partition_root_state) { in update_cpumask()
1909 trialcs->cpus_allowed, &tmp); in update_cpumask()
1912 compute_effective_cpumask(trialcs->effective_cpus, trialcs, in update_cpumask()
1915 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1922 if (cs->nr_subparts_cpus) { in update_cpumask()
1924 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && in update_cpumask()
1926 cs->nr_subparts_cpus = 0; in update_cpumask()
1927 cpumask_clear(cs->subparts_cpus); in update_cpumask()
1929 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1930 cs->cpus_allowed); in update_cpumask()
1931 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1939 if (cs->partition_root_state) { in update_cpumask()
1946 if (parent->child_ecpus_count) in update_cpumask()
1978 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); in cpuset_migrate_mm_workfn()
1979 mmput(mwork->mm); in cpuset_migrate_mm_workfn()
1995 mwork->mm = mm; in cpuset_migrate_mm()
1996 mwork->from = *from; in cpuset_migrate_mm()
1997 mwork->to = *to; in cpuset_migrate_mm()
1998 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); in cpuset_migrate_mm()
1999 queue_work(cpuset_migrate_mm_wq, &mwork->work); in cpuset_migrate_mm()
2011 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2015 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2026 write_seqcount_begin(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2028 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); in cpuset_change_task_nodemask()
2030 tsk->mems_allowed = *newmems; in cpuset_change_task_nodemask()
2032 write_seqcount_end(&tsk->mems_allowed_seq); in cpuset_change_task_nodemask()
2041 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2060 * take while holding tasklist_lock. Forks can happen - the in update_tasks_nodemask()
2068 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
2081 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
2083 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
2091 * cs->old_mems_allowed. in update_tasks_nodemask()
2093 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
2100 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2120 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); in update_nodemasks_hier()
2127 *new_mems = parent->effective_mems; in update_nodemasks_hier()
2130 if (nodes_equal(*new_mems, cp->effective_mems)) { in update_nodemasks_hier()
2135 if (!css_tryget_online(&cp->css)) in update_nodemasks_hier()
2140 cp->effective_mems = *new_mems; in update_nodemasks_hier()
2144 !nodes_equal(cp->mems_allowed, cp->effective_mems)); in update_nodemasks_hier()
2149 css_put(&cp->css); in update_nodemasks_hier()
2164 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2174 * it's read-only in update_nodemask()
2177 retval = -EACCES; in update_nodemask()
2188 nodes_clear(trialcs->mems_allowed); in update_nodemask()
2190 retval = nodelist_parse(buf, trialcs->mems_allowed); in update_nodemask()
2194 if (!nodes_subset(trialcs->mems_allowed, in update_nodemask()
2196 retval = -EINVAL; in update_nodemask()
2201 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2202 retval = 0; /* Too easy - nothing to do */ in update_nodemask()
2209 check_insane_mems_config(&trialcs->mems_allowed); in update_nodemask()
2212 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2215 /* use trialcs->mems_allowed as a temp variable */ in update_nodemask()
2216 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2235 if (val < -1 || val > sched_domain_level_max + 1) in update_relax_domain_level()
2236 return -EINVAL; in update_relax_domain_level()
2239 if (val != cs->relax_domain_level) { in update_relax_domain_level()
2240 cs->relax_domain_level = val; in update_relax_domain_level()
2241 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
2250 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2262 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
2269 * update_flag - read a 0 or a 1 in a file and update associated flag
2287 return -ENOMEM; in update_flag()
2290 set_bit(bit, &trialcs->flags); in update_flag()
2292 clear_bit(bit, &trialcs->flags); in update_flag()
2305 cs->flags = trialcs->flags; in update_flag()
2308 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) in update_flag()
2319 * update_prstate - update partition_root_state
2328 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2340 cs->partition_root_state = -new_prs; in update_prstate()
2345 return -ENOMEM; in update_prstate()
2355 if (cpumask_empty(cs->cpus_allowed)) { in update_prstate()
2378 if (unlikely(cs->nr_subparts_cpus)) { in update_prstate()
2380 cs->nr_subparts_cpus = 0; in update_prstate()
2381 cpumask_clear(cs->subparts_cpus); in update_prstate()
2382 compute_effective_cpumask(cs->effective_cpus, cs, parent); in update_prstate()
2392 new_prs = -new_prs; in update_prstate()
2397 cs->partition_root_state = new_prs; in update_prstate()
2398 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2405 if (!list_empty(&cs->css.children)) in update_prstate()
2417 * Frequency meter - How fast is some event occurring?
2421 * fmeter_init() - initialize a frequency meter.
2422 * fmeter_markevent() - called each time the event happens.
2423 * fmeter_getrate() - returns the recent rate of such events.
2424 * fmeter_update() - internal routine used to update fmeter.
2431 * The filter is single-pole low-pass recursive (IIR). The time unit
2432 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2436 * has a half-life of 10 seconds, meaning that if the events quit
2461 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2469 fmp->cnt = 0; in fmeter_init()
2470 fmp->val = 0; in fmeter_init()
2471 fmp->time = 0; in fmeter_init()
2472 spin_lock_init(&fmp->lock); in fmeter_init()
2475 /* Internal meter update - process cnt events and update value */
2482 ticks = now - fmp->time; in fmeter_update()
2488 while (ticks-- > 0) in fmeter_update()
2489 fmp->val = (FM_COEF * fmp->val) / FM_SCALE; in fmeter_update()
2490 fmp->time = now; in fmeter_update()
2492 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; in fmeter_update()
2493 fmp->cnt = 0; in fmeter_update()
2499 spin_lock(&fmp->lock); in fmeter_markevent()
2501 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); in fmeter_markevent()
2502 spin_unlock(&fmp->lock); in fmeter_markevent()
2510 spin_lock(&fmp->lock); in fmeter_getrate()
2512 val = fmp->val; in fmeter_getrate()
2513 spin_unlock(&fmp->lock); in fmeter_getrate()
2527 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
2528 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
2529 return -ENOSPC; in cpuset_can_attach_check()
2535 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2536 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2560 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
2561 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
2581 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2582 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2586 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2589 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2590 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2594 ret = -EINVAL; in cpuset_can_attach()
2598 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
2610 cs->attach_in_progress++; in cpuset_can_attach()
2625 cs->attach_in_progress--; in cpuset_cancel_attach()
2626 if (!cs->attach_in_progress) in cpuset_cancel_attach()
2629 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
2630 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
2632 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
2655 cs->subparts_cpus); in cpuset_attach_task()
2680 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
2681 oldcs->effective_cpus); in cpuset_attach()
2682 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
2692 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2707 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2726 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, in cpuset_attach()
2734 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2736 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
2737 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
2738 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
2742 cs->attach_in_progress--; in cpuset_attach()
2743 if (!cs->attach_in_progress) in cpuset_attach()
2774 cpuset_filetype_t type = cft->private; in cpuset_write_u64()
2780 retval = -ENODEV; in cpuset_write_u64()
2810 retval = -EINVAL; in cpuset_write_u64()
2823 cpuset_filetype_t type = cft->private; in cpuset_write_s64()
2824 int retval = -ENODEV; in cpuset_write_s64()
2836 retval = -EINVAL; in cpuset_write_s64()
2853 int retval = -ENODEV; in cpuset_write_resmask()
2876 css_get(&cs->css); in cpuset_write_resmask()
2877 kernfs_break_active_protection(of->kn); in cpuset_write_resmask()
2887 retval = -ENOMEM; in cpuset_write_resmask()
2891 switch (of_cft(of)->private) { in cpuset_write_resmask()
2899 retval = -EINVAL; in cpuset_write_resmask()
2907 kernfs_unbreak_active_protection(of->kn); in cpuset_write_resmask()
2908 css_put(&cs->css); in cpuset_write_resmask()
2924 cpuset_filetype_t type = seq_cft(sf)->private; in cpuset_common_seq_show()
2931 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2934 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2937 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2940 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2943 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2946 ret = -EINVAL; in cpuset_common_seq_show()
2956 cpuset_filetype_t type = cft->private; in cpuset_read_u64()
2971 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2987 cpuset_filetype_t type = cft->private; in cpuset_read_s64()
2990 return cs->relax_domain_level; in cpuset_read_s64()
3004 switch (cs->partition_root_state) { in sched_partition_show()
3020 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
3035 int retval = -ENODEV; in sched_partition_write()
3049 return -EINVAL; in sched_partition_write()
3051 css_get(&cs->css); in sched_partition_write()
3061 css_put(&cs->css); in sched_partition_write()
3227 * cpuset_css_alloc - Allocate a cpuset css
3230 * Return: cpuset css on success, -ENOMEM on failure.
3232 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3245 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3249 return ERR_PTR(-ENOMEM); in cpuset_css_alloc()
3252 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3253 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
3254 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
3255 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3256 cs->relax_domain_level = -1; in cpuset_css_alloc()
3260 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3262 return &cs->css; in cpuset_css_alloc()
3278 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3280 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3282 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3288 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3289 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3290 cs->use_parent_ecpus = true; in cpuset_css_online()
3291 parent->child_ecpus_count++; in cpuset_css_online()
3299 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3303 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) in cpuset_css_online()
3309 * historical reasons - the flag may be specified during mount. in cpuset_css_online()
3312 * refuse to clone the configuration - thereby refusing the task to in cpuset_css_online()
3316 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive in cpuset_css_online()
3329 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3330 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3331 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3332 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3365 if (cs->use_parent_ecpus) { in cpuset_css_offline()
3368 cs->use_parent_ecpus = false; in cpuset_css_offline()
3369 parent->child_ecpus_count--; in cpuset_css_offline()
3373 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3410 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork()
3441 cs->attach_in_progress++; in cpuset_can_fork()
3449 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork()
3460 cs->attach_in_progress--; in cpuset_cancel_fork()
3461 if (!cs->attach_in_progress) in cpuset_cancel_fork()
3485 set_cpus_allowed_ptr(task, current->cpus_ptr); in cpuset_fork()
3486 task->mems_allowed = current->mems_allowed; in cpuset_fork()
3495 cs->attach_in_progress--; in cpuset_fork()
3496 if (!cs->attach_in_progress) in cpuset_fork()
3522 * cpuset_init - initialize cpusets at system boot
3540 top_cpuset.relax_domain_level = -1; in cpuset_init()
3552 * cpuset to its next-highest non-empty parent.
3559 * Find its next-highest non-empty parent, (top cpuset in remove_tasks_in_empty_cpuset()
3563 while (cpumask_empty(parent->cpus_allowed) || in remove_tasks_in_empty_cpuset()
3564 nodes_empty(parent->mems_allowed)) in remove_tasks_in_empty_cpuset()
3567 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3569 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3582 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3583 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3584 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3585 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3592 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3594 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3597 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3598 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3619 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3621 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3624 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3625 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3642 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3658 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3666 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3673 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3675 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3680 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3682 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3691 if (cs->nr_subparts_cpus && is_partition_valid(cs) && in cpuset_hotplug_update_tasks()
3694 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3695 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3707 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || in cpuset_hotplug_update_tasks()
3712 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3714 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3715 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3720 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3721 parent_prs = parent->partition_root_state; in cpuset_hotplug_update_tasks()
3727 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); in cpuset_hotplug_update_tasks()
3729 WRITE_ONCE(cs->prs_err, PERR_NOTPART); in cpuset_hotplug_update_tasks()
3731 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); in cpuset_hotplug_update_tasks()
3748 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3749 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3768 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3777 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3861 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3868 css_put(&cs->css); in cpuset_hotplug_workfn()
3910 * cpuset_init_smp - initialize cpus_allowed
3933 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3934 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3938 * attached to the specified @tsk. Guaranteed to return some non-empty
3976 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3980 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3981 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3996 cs_mask = task_cs(tsk)->cpus_allowed; in cpuset_cpus_allowed_fallback()
4004 * We own tsk->cpus_allowed, nobody can change it under us. in cpuset_cpus_allowed_fallback()
4006 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
4008 * the wrong tsk->cpus_allowed. However, both cases imply the in cpuset_cpus_allowed_fallback()
4009 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() in cpuset_cpus_allowed_fallback()
4013 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary in cpuset_cpus_allowed_fallback()
4025 nodes_setall(current->mems_allowed); in cpuset_init_current_mems_allowed()
4029 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4030 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4033 * attached to the specified @tsk. Guaranteed to return some non-empty
4053 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4056 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4060 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
4064 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4077 * cpuset_node_allowed - Can we allocate on a memory node?
4110 * in_interrupt - any node ok (current task context irrelevant)
4111 * GFP_ATOMIC - any node ok
4112 * tsk_is_oom_victim - any node ok
4113 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4114 * GFP_USER - only nodes in current tasks mems allowed ok.
4124 if (node_isset(node, current->mems_allowed)) in cpuset_node_allowed()
4135 if (current->flags & PF_EXITING) /* Let dying task have memory */ in cpuset_node_allowed()
4143 allowed = node_isset(node, cs->mems_allowed); in cpuset_node_allowed()
4151 * cpuset_spread_node() - On which node to begin search for a page
4167 * only set nodes in task->mems_allowed that are online. So it
4178 return *rotor = next_node_in(*rotor, current->mems_allowed); in cpuset_spread_node()
4182 * cpuset_mem_spread_node() - On which node to begin search for a file page
4186 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) in cpuset_mem_spread_node()
4187 current->cpuset_mem_spread_rotor = in cpuset_mem_spread_node()
4188 node_random(¤t->mems_allowed); in cpuset_mem_spread_node()
4190 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); in cpuset_mem_spread_node()
4194 * cpuset_slab_spread_node() - On which node to begin search for a slab page
4198 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) in cpuset_slab_spread_node()
4199 current->cpuset_slab_spread_rotor = in cpuset_slab_spread_node()
4200 node_random(¤t->mems_allowed); in cpuset_slab_spread_node()
4202 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); in cpuset_slab_spread_node()
4207 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4220 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); in cpuset_mems_allowed_intersects()
4224 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4235 cgrp = task_cs(current)->css.cgroup; in cpuset_print_current_mems_allowed()
4239 nodemask_pr_args(¤t->mems_allowed)); in cpuset_print_current_mems_allowed()
4253 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4264 * Display to user space in the per-cpuset read-only file
4273 fmeter_markevent(&task_cs(current)->fmeter); in __cpuset_memory_pressure_bump()
4280 * - Print tasks cpuset path into seq_file.
4281 * - Used for /proc/<pid>/cpuset.
4282 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4283 * doesn't really matter if tsk->cpuset changes after we read it,
4294 retval = -ENOMEM; in proc_cpuset_show()
4302 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX, in proc_cpuset_show()
4303 current->nsproxy->cgroup_ns); in proc_cpuset_show()
4307 if (retval == -E2BIG) in proc_cpuset_show()
4308 retval = -ENAMETOOLONG; in proc_cpuset_show()
4325 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()
4327 nodemask_pr_args(&task->mems_allowed)); in cpuset_task_status_allowed()