Lines Matching full:cs

229 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
231 return css_cs(cs->css.parent); in parent_cs()
236 struct cpuset *cs = task_cs(p); in inc_dl_tasks_cs() local
238 cs->nr_deadline_tasks++; in inc_dl_tasks_cs()
243 struct cpuset *cs = task_cs(p); in dec_dl_tasks_cs() local
245 cs->nr_deadline_tasks--; in dec_dl_tasks_cs()
261 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
266 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
271 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
276 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
278 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
281 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
286 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
291 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
293 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
296 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
298 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
301 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
303 return cs->partition_root_state > 0; in is_partition_valid()
306 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
308 return cs->partition_root_state < 0; in is_partition_invalid()
314 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
316 if (is_partition_valid(cs)) in make_partition_invalid()
317 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
323 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
325 if (old_prs == cs->partition_root_state) in notify_partition_change()
327 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
330 if (is_partition_valid(cs)) in notify_partition_change()
331 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
458 * @cs: partition root to be checked
462 * It is assumed that @cs is a valid partition root. @excluded_child should
465 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
471 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
473 if (!excluded_child && !cs->nr_subparts_cpus) in partition_is_populated()
474 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
477 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
506 struct cpuset *cs; in guarantee_online_cpus() local
512 cs = task_cs(tsk); in guarantee_online_cpus()
514 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
515 cs = parent_cs(cs); in guarantee_online_cpus()
516 if (unlikely(!cs)) { in guarantee_online_cpus()
527 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
544 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
546 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
547 cs = parent_cs(cs); in guarantee_online_mems()
548 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
557 static void cpuset_update_task_spread_flags(struct cpuset *cs, in cpuset_update_task_spread_flags() argument
563 if (is_spread_page(cs)) in cpuset_update_task_spread_flags()
568 if (is_spread_slab(cs)) in cpuset_update_task_spread_flags()
592 * @cs: the cpuset that have cpumasks to be allocated.
598 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
602 if (cs) { in alloc_cpumasks()
603 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
604 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
605 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
632 * @cs: the cpuset that have cpumasks to be free.
635 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
637 if (cs) { in free_cpumasks()
638 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
639 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
640 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
651 * @cs: the cpuset that the trial cpuset duplicates
653 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
657 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
666 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
667 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
673 * @cs: the cpuset to be freed
675 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
677 free_cpumasks(cs, NULL); in free_cpuset()
678 kfree(cs); in free_cpuset()
1070 static void dl_update_tasks_root_domain(struct cpuset *cs) in dl_update_tasks_root_domain() argument
1075 if (cs->nr_deadline_tasks == 0) in dl_update_tasks_root_domain()
1078 css_task_iter_start(&cs->css, 0, &it); in dl_update_tasks_root_domain()
1088 struct cpuset *cs = NULL; in dl_rebuild_rd_accounting() local
1103 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in dl_rebuild_rd_accounting()
1105 if (cpumask_empty(cs->effective_cpus)) { in dl_rebuild_rd_accounting()
1110 css_get(&cs->css); in dl_rebuild_rd_accounting()
1114 dl_update_tasks_root_domain(cs); in dl_rebuild_rd_accounting()
1117 css_put(&cs->css); in dl_rebuild_rd_accounting()
1148 struct cpuset *cs; in rebuild_sched_domains_locked() local
1174 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1175 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1179 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1211 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1214 * Iterate through each task of @cs updating its cpus_allowed to the
1220 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) in update_tasks_cpumask() argument
1224 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1226 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1236 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus); in update_tasks_cpumask()
1238 cpumask_and(new_cpus, possible_mask, cs->effective_cpus); in update_tasks_cpumask()
1248 * @cs: the cpuset the need to recompute the new effective_cpus mask
1257 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1259 if (parent->nr_subparts_cpus && is_partition_valid(cs)) { in compute_effective_cpumask()
1262 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1265 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1279 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1281 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1289 static int update_partition_exclusive(struct cpuset *cs, int new_prs) in update_partition_exclusive() argument
1293 if (exclusive && !is_cpu_exclusive(cs)) { in update_partition_exclusive()
1294 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1)) in update_partition_exclusive()
1296 } else if (!exclusive && is_cpu_exclusive(cs)) { in update_partition_exclusive()
1298 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_partition_exclusive()
1310 static void update_partition_sd_lb(struct cpuset *cs, int old_prs) in update_partition_sd_lb() argument
1312 int new_prs = cs->partition_root_state; in update_partition_sd_lb()
1317 * If cs is not a valid partition root, the load balance state in update_partition_sd_lb()
1323 new_lb = is_sched_load_balance(parent_cs(cs)); in update_partition_sd_lb()
1325 if (new_lb != !!is_sched_load_balance(cs)) { in update_partition_sd_lb()
1328 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1330 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in update_partition_sd_lb()
1339 * @cs: The cpuset that requests change in partition root state
1375 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, in update_parent_subparts_cpumask() argument
1379 struct cpuset *parent = parent_cs(cs); in update_parent_subparts_cpumask()
1396 if (!newmask && cpumask_empty(cs->cpus_allowed)) in update_parent_subparts_cpumask()
1404 old_prs = new_prs = cs->partition_root_state; in update_parent_subparts_cpumask()
1410 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) in update_parent_subparts_cpumask()
1417 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) && in update_parent_subparts_cpumask()
1418 partition_is_populated(parent, cs)) in update_parent_subparts_cpumask()
1421 cpumask_copy(tmp->addmask, cs->cpus_allowed); in update_parent_subparts_cpumask()
1429 cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1439 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1455 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1474 partition_is_populated(parent, cs)) { in update_parent_subparts_cpumask()
1477 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1499 cpumask_and(tmp->addmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1504 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || in update_parent_subparts_cpumask()
1507 partition_is_populated(parent, cs))) { in update_parent_subparts_cpumask()
1512 if (part_error && is_partition_valid(cs) && in update_parent_subparts_cpumask()
1514 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1518 WRITE_ONCE(cs->prs_err, part_error); in update_parent_subparts_cpumask()
1525 switch (cs->partition_root_state) { in update_parent_subparts_cpumask()
1547 int err = update_partition_exclusive(cs, new_prs); in update_parent_subparts_cpumask()
1579 cs->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1586 update_sibling_cpumasks(parent, cs, tmp); in update_parent_subparts_cpumask()
1596 update_partition_sd_lb(cs, old_prs); in update_parent_subparts_cpumask()
1600 notify_partition_change(cs, old_prs); in update_parent_subparts_cpumask()
1612 * @cs: the cpuset to consider
1623 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
1632 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1678 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1683 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
1791 * @cs: Current cpuset
1794 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1814 if (sibling == cs) in update_sibling_cpumasks()
1831 * @cs: the cpuset to consider
1835 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1841 int old_prs = cs->partition_root_state; in update_cpumask()
1844 if (cs == &top_cpuset) in update_cpumask()
1866 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1872 retval = validate_change(cs, trialcs); in update_cpumask()
1887 parent = parent_cs(cs); in update_cpumask()
1901 if (cs->partition_root_state) { in update_cpumask()
1903 update_parent_subparts_cpumask(cs, partcmd_invalidate, in update_cpumask()
1906 update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1911 parent_cs(cs)); in update_cpumask()
1913 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1920 if (cs->nr_subparts_cpus) { in update_cpumask()
1921 if (!is_partition_valid(cs) || in update_cpumask()
1922 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && in update_cpumask()
1923 partition_is_populated(cs, NULL))) { in update_cpumask()
1924 cs->nr_subparts_cpus = 0; in update_cpumask()
1925 cpumask_clear(cs->subparts_cpus); in update_cpumask()
1927 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1928 cs->cpus_allowed); in update_cpumask()
1929 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1935 update_cpumasks_hier(cs, &tmp, 0); in update_cpumask()
1937 if (cs->partition_root_state) { in update_cpumask()
1938 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1945 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1948 update_partition_sd_lb(cs, old_prs); in update_cpumask()
2040 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2042 * Iterate through each task of @cs updating its mems_allowed to the
2046 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
2052 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
2054 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
2066 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
2077 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
2079 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
2081 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
2089 * cs->old_mems_allowed. in update_tasks_nodemask()
2091 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
2099 * @cs: the cpuset to consider
2109 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2115 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2161 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2165 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2174 if (cs == &top_cpuset) { in update_nodemask()
2199 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2203 retval = validate_change(cs, trialcs); in update_nodemask()
2210 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2214 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2230 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
2237 if (val != cs->relax_domain_level) { in update_relax_domain_level()
2238 cs->relax_domain_level = val; in update_relax_domain_level()
2239 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
2240 is_sched_load_balance(cs)) in update_relax_domain_level()
2249 * @cs: the cpuset in which each task's spread flags needs to be changed
2251 * Iterate through each task of @cs updating its spread flags. As this
2255 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
2260 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
2262 cpuset_update_task_spread_flags(cs, task); in update_tasks_flags()
2269 * cs: the cpuset to update
2275 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
2283 trialcs = alloc_trial_cpuset(cs); in update_flag()
2292 err = validate_change(cs, trialcs); in update_flag()
2296 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
2299 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
2300 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
2303 cs->flags = trialcs->flags; in update_flag()
2310 update_tasks_flags(cs); in update_flag()
2318 * @cs: the cpuset to update
2324 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2326 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2327 struct cpuset *parent = parent_cs(cs); in update_prstate()
2338 cs->partition_root_state = -new_prs; in update_prstate()
2345 err = update_partition_exclusive(cs, new_prs); in update_prstate()
2353 if (cpumask_empty(cs->cpus_allowed)) { in update_prstate()
2358 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2370 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, in update_prstate()
2376 if (unlikely(cs->nr_subparts_cpus)) { in update_prstate()
2378 cs->nr_subparts_cpus = 0; in update_prstate()
2379 cpumask_clear(cs->subparts_cpus); in update_prstate()
2380 compute_effective_cpumask(cs->effective_cpus, cs, parent); in update_prstate()
2391 update_partition_exclusive(cs, new_prs); in update_prstate()
2395 cs->partition_root_state = new_prs; in update_prstate()
2396 WRITE_ONCE(cs->prs_err, err); in update_prstate()
2403 if (!list_empty(&cs->css.children)) in update_prstate()
2404 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); in update_prstate()
2407 update_partition_sd_lb(cs, old_prs); in update_prstate()
2409 notify_partition_change(cs, old_prs); in update_prstate()
2523 static int cpuset_can_attach_check(struct cpuset *cs) in cpuset_can_attach_check() argument
2525 if (cpumask_empty(cs->effective_cpus) || in cpuset_can_attach_check()
2526 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) in cpuset_can_attach_check()
2531 static void reset_migrate_dl_data(struct cpuset *cs) in reset_migrate_dl_data() argument
2533 cs->nr_migrate_dl_tasks = 0; in reset_migrate_dl_data()
2534 cs->sum_migrate_dl_bw = 0; in reset_migrate_dl_data()
2541 struct cpuset *cs, *oldcs; in cpuset_can_attach() local
2549 cs = css_cs(css); in cpuset_can_attach()
2554 ret = cpuset_can_attach_check(cs); in cpuset_can_attach()
2558 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); in cpuset_can_attach()
2559 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_can_attach()
2579 cs->nr_migrate_dl_tasks++; in cpuset_can_attach()
2580 cs->sum_migrate_dl_bw += task->dl.dl_bw; in cpuset_can_attach()
2584 if (!cs->nr_migrate_dl_tasks) in cpuset_can_attach()
2587 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { in cpuset_can_attach()
2588 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); in cpuset_can_attach()
2591 reset_migrate_dl_data(cs); in cpuset_can_attach()
2596 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); in cpuset_can_attach()
2598 reset_migrate_dl_data(cs); in cpuset_can_attach()
2608 cs->attach_in_progress++; in cpuset_can_attach()
2617 struct cpuset *cs; in cpuset_cancel_attach() local
2620 cs = css_cs(css); in cpuset_cancel_attach()
2623 cs->attach_in_progress--; in cpuset_cancel_attach()
2624 if (!cs->attach_in_progress) in cpuset_cancel_attach()
2627 if (cs->nr_migrate_dl_tasks) { in cpuset_cancel_attach()
2628 int cpu = cpumask_any(cs->effective_cpus); in cpuset_cancel_attach()
2630 dl_bw_free(cpu, cs->sum_migrate_dl_bw); in cpuset_cancel_attach()
2631 reset_migrate_dl_data(cs); in cpuset_cancel_attach()
2645 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) in cpuset_attach_task() argument
2649 if (cs != &top_cpuset) in cpuset_attach_task()
2653 cs->subparts_cpus); in cpuset_attach_task()
2661 cpuset_update_task_spread_flags(cs, task); in cpuset_attach_task()
2669 struct cpuset *cs; in cpuset_attach() local
2674 cs = css_cs(css); in cpuset_attach()
2678 cpus_updated = !cpumask_equal(cs->effective_cpus, in cpuset_attach()
2680 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); in cpuset_attach()
2690 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2694 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2697 cpuset_attach_task(cs, task); in cpuset_attach()
2705 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2706 if (!is_memory_migrate(cs) && !mems_updated) in cpuset_attach()
2723 if (is_memory_migrate(cs)) in cpuset_attach()
2732 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2734 if (cs->nr_migrate_dl_tasks) { in cpuset_attach()
2735 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; in cpuset_attach()
2736 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; in cpuset_attach()
2737 reset_migrate_dl_data(cs); in cpuset_attach()
2740 cs->attach_in_progress--; in cpuset_attach()
2741 if (!cs->attach_in_progress) in cpuset_attach()
2771 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2777 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2784 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2787 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2790 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2793 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2796 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2802 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2805 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2820 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2826 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2831 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2849 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2856 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2861 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2870 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2874 css_get(&cs->css); in cpuset_write_resmask()
2880 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2883 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2891 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2894 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2906 css_put(&cs->css); in cpuset_write_resmask()
2921 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2929 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2932 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2935 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2938 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2941 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2953 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2957 return is_cpu_exclusive(cs); in cpuset_read_u64()
2959 return is_mem_exclusive(cs); in cpuset_read_u64()
2961 return is_mem_hardwall(cs); in cpuset_read_u64()
2963 return is_sched_load_balance(cs); in cpuset_read_u64()
2965 return is_memory_migrate(cs); in cpuset_read_u64()
2969 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2971 return is_spread_page(cs); in cpuset_read_u64()
2973 return is_spread_slab(cs); in cpuset_read_u64()
2984 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2988 return cs->relax_domain_level; in cpuset_read_s64()
2999 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
3002 switch (cs->partition_root_state) { in sched_partition_show()
3018 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
3031 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
3049 css_get(&cs->css); in sched_partition_write()
3052 if (!is_cpuset_online(cs)) in sched_partition_write()
3055 retval = update_prstate(cs, val); in sched_partition_write()
3059 css_put(&cs->css); in sched_partition_write()
3236 struct cpuset *cs; in cpuset_css_alloc() local
3241 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3242 if (!cs) in cpuset_css_alloc()
3245 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3246 kfree(cs); in cpuset_css_alloc()
3250 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3251 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
3252 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
3253 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3254 cs->relax_domain_level = -1; in cpuset_css_alloc()
3258 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3260 return &cs->css; in cpuset_css_alloc()
3265 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3266 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3276 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3278 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3280 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3286 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3287 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3288 cs->use_parent_ecpus = true; in cpuset_css_online()
3297 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_online()
3327 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3328 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3329 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3330 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3351 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3356 if (is_partition_valid(cs)) in cpuset_css_offline()
3357 update_prstate(cs, 0); in cpuset_css_offline()
3360 is_sched_load_balance(cs)) in cpuset_css_offline()
3361 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3363 if (cs->use_parent_ecpus) { in cpuset_css_offline()
3364 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
3366 cs->use_parent_ecpus = false; in cpuset_css_offline()
3371 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3379 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3381 free_cpuset(cs); in cpuset_css_free()
3408 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_can_fork() local
3413 same_cs = (cs == task_cs(current)); in cpuset_can_fork()
3423 ret = cpuset_can_attach_check(cs); in cpuset_can_fork()
3439 cs->attach_in_progress++; in cpuset_can_fork()
3447 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); in cpuset_cancel_fork() local
3451 same_cs = (cs == task_cs(current)); in cpuset_cancel_fork()
3458 cs->attach_in_progress--; in cpuset_cancel_fork()
3459 if (!cs->attach_in_progress) in cpuset_cancel_fork()
3471 struct cpuset *cs; in cpuset_fork() local
3475 cs = task_cs(task); in cpuset_fork()
3476 same_cs = (cs == task_cs(current)); in cpuset_fork()
3480 if (cs == &top_cpuset) in cpuset_fork()
3490 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_fork()
3491 cpuset_attach_task(cs, task); in cpuset_fork()
3493 cs->attach_in_progress--; in cpuset_fork()
3494 if (!cs->attach_in_progress) in cpuset_fork()
3552 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3560 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3565 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3567 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3573 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3580 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3581 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3582 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3583 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3590 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3591 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks_legacy()
3592 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3593 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3595 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3596 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3605 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3611 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3616 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3617 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3619 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3622 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3623 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3627 update_tasks_cpumask(cs, new_cpus); in hotplug_update_tasks()
3629 update_tasks_nodemask(cs); in hotplug_update_tasks()
3641 * @cs: cpuset in interest
3644 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3645 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3648 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3656 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3664 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3669 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3670 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3671 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3673 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3678 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3680 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3689 if (cs->nr_subparts_cpus && is_partition_valid(cs) && in cpuset_hotplug_update_tasks()
3690 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3692 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3693 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3695 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3705 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || in cpuset_hotplug_update_tasks()
3706 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) { in cpuset_hotplug_update_tasks()
3709 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); in cpuset_hotplug_update_tasks()
3710 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3712 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3713 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3715 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3718 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3720 if (is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3722 make_partition_invalid(cs); in cpuset_hotplug_update_tasks()
3725 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); in cpuset_hotplug_update_tasks()
3727 WRITE_ONCE(cs->prs_err, PERR_NOTPART); in cpuset_hotplug_update_tasks()
3729 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); in cpuset_hotplug_update_tasks()
3730 notify_partition_change(cs, old_prs); in cpuset_hotplug_update_tasks()
3739 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { in cpuset_hotplug_update_tasks()
3740 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp); in cpuset_hotplug_update_tasks()
3741 if (is_partition_valid(cs)) in cpuset_hotplug_update_tasks()
3746 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3747 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3755 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3758 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3854 struct cpuset *cs; in cpuset_hotplug_workfn() local
3858 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3859 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3863 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3866 css_put(&cs->css); in cpuset_hotplug_workfn()
3944 struct cpuset *cs; in cpuset_cpus_allowed() local
3949 cs = task_cs(tsk); in cpuset_cpus_allowed()
3950 if (cs != &top_cpuset) in cpuset_cpus_allowed()
3957 if ((cs == &top_cpuset) || cpumask_empty(pmask)) { in cpuset_cpus_allowed()
4004 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
4067 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
4069 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
4070 cs = parent_cs(cs); in nearest_hardwall_ancestor()
4071 return cs; in nearest_hardwall_ancestor()
4116 struct cpuset *cs; /* current cpuset ancestors */ in cpuset_node_allowed() local
4140 cs = nearest_hardwall_ancestor(task_cs(current)); in cpuset_node_allowed()
4141 allowed = node_isset(node, cs->mems_allowed); in cpuset_node_allowed()