1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24 #include "cgroup-internal.h"
25
26 #include <linux/cpu.h>
27 #include <linux/cpumask.h>
28 #include <linux/cpuset.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/mempolicy.h>
33 #include <linux/mm.h>
34 #include <linux/memory.h>
35 #include <linux/export.h>
36 #include <linux/rcupdate.h>
37 #include <linux/sched.h>
38 #include <linux/sched/deadline.h>
39 #include <linux/sched/mm.h>
40 #include <linux/sched/task.h>
41 #include <linux/security.h>
42 #include <linux/spinlock.h>
43 #include <linux/oom.h>
44 #include <linux/sched/isolation.h>
45 #include <linux/cgroup.h>
46 #include <linux/wait.h>
47
48 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
49 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
50
51 /*
52 * There could be abnormal cpuset configurations for cpu or memory
53 * node binding, add this key to provide a quick low-cost judgment
54 * of the situation.
55 */
56 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
57
58 /* See "Frequency meter" comments, below. */
59
60 struct fmeter {
61 int cnt; /* unprocessed events count */
62 int val; /* most recent output value */
63 time64_t time; /* clock (secs) when val computed */
64 spinlock_t lock; /* guards read or write of above */
65 };
66
67 /*
68 * Invalid partition error code
69 */
70 enum prs_errcode {
71 PERR_NONE = 0,
72 PERR_INVCPUS,
73 PERR_INVPARENT,
74 PERR_NOTPART,
75 PERR_NOTEXCL,
76 PERR_NOCPUS,
77 PERR_HOTPLUG,
78 PERR_CPUSEMPTY,
79 };
80
81 static const char * const perr_strings[] = {
82 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus",
83 [PERR_INVPARENT] = "Parent is an invalid partition root",
84 [PERR_NOTPART] = "Parent is not a partition root",
85 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
86 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
87 [PERR_HOTPLUG] = "No cpu available due to hotplug",
88 [PERR_CPUSEMPTY] = "cpuset.cpus is empty",
89 };
90
91 struct cpuset {
92 struct cgroup_subsys_state css;
93
94 unsigned long flags; /* "unsigned long" so bitops work */
95
96 /*
97 * On default hierarchy:
98 *
99 * The user-configured masks can only be changed by writing to
100 * cpuset.cpus and cpuset.mems, and won't be limited by the
101 * parent masks.
102 *
103 * The effective masks is the real masks that apply to the tasks
104 * in the cpuset. They may be changed if the configured masks are
105 * changed or hotplug happens.
106 *
107 * effective_mask == configured_mask & parent's effective_mask,
108 * and if it ends up empty, it will inherit the parent's mask.
109 *
110 *
111 * On legacy hierarchy:
112 *
113 * The user-configured masks are always the same with effective masks.
114 */
115
116 /* user-configured CPUs and Memory Nodes allow to tasks */
117 cpumask_var_t cpus_allowed;
118 nodemask_t mems_allowed;
119
120 /* effective CPUs and Memory Nodes allow to tasks */
121 cpumask_var_t effective_cpus;
122 nodemask_t effective_mems;
123
124 /*
125 * CPUs allocated to child sub-partitions (default hierarchy only)
126 * - CPUs granted by the parent = effective_cpus U subparts_cpus
127 * - effective_cpus and subparts_cpus are mutually exclusive.
128 *
129 * effective_cpus contains only onlined CPUs, but subparts_cpus
130 * may have offlined ones.
131 */
132 cpumask_var_t subparts_cpus;
133
134 /*
135 * This is old Memory Nodes tasks took on.
136 *
137 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
138 * - A new cpuset's old_mems_allowed is initialized when some
139 * task is moved into it.
140 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
141 * cpuset.mems_allowed and have tasks' nodemask updated, and
142 * then old_mems_allowed is updated to mems_allowed.
143 */
144 nodemask_t old_mems_allowed;
145
146 struct fmeter fmeter; /* memory_pressure filter */
147
148 /*
149 * Tasks are being attached to this cpuset. Used to prevent
150 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
151 */
152 int attach_in_progress;
153
154 /* partition number for rebuild_sched_domains() */
155 int pn;
156
157 /* for custom sched domain */
158 int relax_domain_level;
159
160 /* number of CPUs in subparts_cpus */
161 int nr_subparts_cpus;
162
163 /* partition root state */
164 int partition_root_state;
165
166 /*
167 * Default hierarchy only:
168 * use_parent_ecpus - set if using parent's effective_cpus
169 * child_ecpus_count - # of children with use_parent_ecpus set
170 */
171 int use_parent_ecpus;
172 int child_ecpus_count;
173
174 /*
175 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
176 * know when to rebuild associated root domain bandwidth information.
177 */
178 int nr_deadline_tasks;
179 int nr_migrate_dl_tasks;
180 u64 sum_migrate_dl_bw;
181
182 /* Invalid partition error code, not lock protected */
183 enum prs_errcode prs_err;
184
185 /* Handle for cpuset.cpus.partition */
186 struct cgroup_file partition_file;
187 };
188
189 /*
190 * Partition root states:
191 *
192 * 0 - member (not a partition root)
193 * 1 - partition root
194 * 2 - partition root without load balancing (isolated)
195 * -1 - invalid partition root
196 * -2 - invalid isolated partition root
197 */
198 #define PRS_MEMBER 0
199 #define PRS_ROOT 1
200 #define PRS_ISOLATED 2
201 #define PRS_INVALID_ROOT -1
202 #define PRS_INVALID_ISOLATED -2
203
is_prs_invalid(int prs_state)204 static inline bool is_prs_invalid(int prs_state)
205 {
206 return prs_state < 0;
207 }
208
209 /*
210 * Temporary cpumasks for working with partitions that are passed among
211 * functions to avoid memory allocation in inner functions.
212 */
213 struct tmpmasks {
214 cpumask_var_t addmask, delmask; /* For partition root */
215 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
216 };
217
css_cs(struct cgroup_subsys_state * css)218 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
219 {
220 return css ? container_of(css, struct cpuset, css) : NULL;
221 }
222
223 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)224 static inline struct cpuset *task_cs(struct task_struct *task)
225 {
226 return css_cs(task_css(task, cpuset_cgrp_id));
227 }
228
parent_cs(struct cpuset * cs)229 static inline struct cpuset *parent_cs(struct cpuset *cs)
230 {
231 return css_cs(cs->css.parent);
232 }
233
inc_dl_tasks_cs(struct task_struct * p)234 void inc_dl_tasks_cs(struct task_struct *p)
235 {
236 struct cpuset *cs = task_cs(p);
237
238 cs->nr_deadline_tasks++;
239 }
240
dec_dl_tasks_cs(struct task_struct * p)241 void dec_dl_tasks_cs(struct task_struct *p)
242 {
243 struct cpuset *cs = task_cs(p);
244
245 cs->nr_deadline_tasks--;
246 }
247
248 /* bits in struct cpuset flags field */
249 typedef enum {
250 CS_ONLINE,
251 CS_CPU_EXCLUSIVE,
252 CS_MEM_EXCLUSIVE,
253 CS_MEM_HARDWALL,
254 CS_MEMORY_MIGRATE,
255 CS_SCHED_LOAD_BALANCE,
256 CS_SPREAD_PAGE,
257 CS_SPREAD_SLAB,
258 } cpuset_flagbits_t;
259
260 /* convenient tests for these bits */
is_cpuset_online(struct cpuset * cs)261 static inline bool is_cpuset_online(struct cpuset *cs)
262 {
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
264 }
265
is_cpu_exclusive(const struct cpuset * cs)266 static inline int is_cpu_exclusive(const struct cpuset *cs)
267 {
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
269 }
270
is_mem_exclusive(const struct cpuset * cs)271 static inline int is_mem_exclusive(const struct cpuset *cs)
272 {
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
274 }
275
is_mem_hardwall(const struct cpuset * cs)276 static inline int is_mem_hardwall(const struct cpuset *cs)
277 {
278 return test_bit(CS_MEM_HARDWALL, &cs->flags);
279 }
280
is_sched_load_balance(const struct cpuset * cs)281 static inline int is_sched_load_balance(const struct cpuset *cs)
282 {
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
284 }
285
is_memory_migrate(const struct cpuset * cs)286 static inline int is_memory_migrate(const struct cpuset *cs)
287 {
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
289 }
290
is_spread_page(const struct cpuset * cs)291 static inline int is_spread_page(const struct cpuset *cs)
292 {
293 return test_bit(CS_SPREAD_PAGE, &cs->flags);
294 }
295
is_spread_slab(const struct cpuset * cs)296 static inline int is_spread_slab(const struct cpuset *cs)
297 {
298 return test_bit(CS_SPREAD_SLAB, &cs->flags);
299 }
300
is_partition_valid(const struct cpuset * cs)301 static inline int is_partition_valid(const struct cpuset *cs)
302 {
303 return cs->partition_root_state > 0;
304 }
305
is_partition_invalid(const struct cpuset * cs)306 static inline int is_partition_invalid(const struct cpuset *cs)
307 {
308 return cs->partition_root_state < 0;
309 }
310
311 /*
312 * Callers should hold callback_lock to modify partition_root_state.
313 */
make_partition_invalid(struct cpuset * cs)314 static inline void make_partition_invalid(struct cpuset *cs)
315 {
316 if (is_partition_valid(cs))
317 cs->partition_root_state = -cs->partition_root_state;
318 }
319
320 /*
321 * Send notification event of whenever partition_root_state changes.
322 */
notify_partition_change(struct cpuset * cs,int old_prs)323 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
324 {
325 if (old_prs == cs->partition_root_state)
326 return;
327 cgroup_file_notify(&cs->partition_file);
328
329 /* Reset prs_err if not invalid */
330 if (is_partition_valid(cs))
331 WRITE_ONCE(cs->prs_err, PERR_NONE);
332 }
333
334 static struct cpuset top_cpuset = {
335 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
336 (1 << CS_MEM_EXCLUSIVE)),
337 .partition_root_state = PRS_ROOT,
338 };
339
340 /**
341 * cpuset_for_each_child - traverse online children of a cpuset
342 * @child_cs: loop cursor pointing to the current child
343 * @pos_css: used for iteration
344 * @parent_cs: target cpuset to walk children of
345 *
346 * Walk @child_cs through the online children of @parent_cs. Must be used
347 * with RCU read locked.
348 */
349 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
350 css_for_each_child((pos_css), &(parent_cs)->css) \
351 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
352
353 /**
354 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
355 * @des_cs: loop cursor pointing to the current descendant
356 * @pos_css: used for iteration
357 * @root_cs: target cpuset to walk ancestor of
358 *
359 * Walk @des_cs through the online descendants of @root_cs. Must be used
360 * with RCU read locked. The caller may modify @pos_css by calling
361 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
362 * iteration and the first node to be visited.
363 */
364 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
365 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
366 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
367
368 /*
369 * There are two global locks guarding cpuset structures - cpuset_mutex and
370 * callback_lock. We also require taking task_lock() when dereferencing a
371 * task's cpuset pointer. See "The task_lock() exception", at the end of this
372 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
373 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
374 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
375 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
376 * correctness.
377 *
378 * A task must hold both locks to modify cpusets. If a task holds
379 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
380 * also acquire callback_lock and be able to modify cpusets. It can perform
381 * various checks on the cpuset structure first, knowing nothing will change.
382 * It can also allocate memory while just holding cpuset_mutex. While it is
383 * performing these checks, various callback routines can briefly acquire
384 * callback_lock to query cpusets. Once it is ready to make the changes, it
385 * takes callback_lock, blocking everyone else.
386 *
387 * Calls to the kernel memory allocator can not be made while holding
388 * callback_lock, as that would risk double tripping on callback_lock
389 * from one of the callbacks into the cpuset code from within
390 * __alloc_pages().
391 *
392 * If a task is only holding callback_lock, then it has read-only
393 * access to cpusets.
394 *
395 * Now, the task_struct fields mems_allowed and mempolicy may be changed
396 * by other task, we use alloc_lock in the task_struct fields to protect
397 * them.
398 *
399 * The cpuset_common_file_read() handlers only hold callback_lock across
400 * small pieces of code, such as when reading out possibly multi-word
401 * cpumasks and nodemasks.
402 *
403 * Accessing a task's cpuset should be done in accordance with the
404 * guidelines for accessing subsystem state in kernel/cgroup.c
405 */
406
407 static DEFINE_MUTEX(cpuset_mutex);
408
cpuset_lock(void)409 void cpuset_lock(void)
410 {
411 mutex_lock(&cpuset_mutex);
412 }
413
cpuset_unlock(void)414 void cpuset_unlock(void)
415 {
416 mutex_unlock(&cpuset_mutex);
417 }
418
419 static DEFINE_SPINLOCK(callback_lock);
420
421 static struct workqueue_struct *cpuset_migrate_mm_wq;
422
423 /*
424 * CPU / memory hotplug is handled asynchronously.
425 */
426 static void cpuset_hotplug_workfn(struct work_struct *work);
427 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
428
429 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
430
check_insane_mems_config(nodemask_t * nodes)431 static inline void check_insane_mems_config(nodemask_t *nodes)
432 {
433 if (!cpusets_insane_config() &&
434 movable_only_nodes(nodes)) {
435 static_branch_enable(&cpusets_insane_config_key);
436 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
437 "Cpuset allocations might fail even with a lot of memory available.\n",
438 nodemask_pr_args(nodes));
439 }
440 }
441
442 /*
443 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
444 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
445 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
446 * With v2 behavior, "cpus" and "mems" are always what the users have
447 * requested and won't be changed by hotplug events. Only the effective
448 * cpus or mems will be affected.
449 */
is_in_v2_mode(void)450 static inline bool is_in_v2_mode(void)
451 {
452 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
453 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
454 }
455
456 /**
457 * partition_is_populated - check if partition has tasks
458 * @cs: partition root to be checked
459 * @excluded_child: a child cpuset to be excluded in task checking
460 * Return: true if there are tasks, false otherwise
461 *
462 * It is assumed that @cs is a valid partition root. @excluded_child should
463 * be non-NULL when this cpuset is going to become a partition itself.
464 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)465 static inline bool partition_is_populated(struct cpuset *cs,
466 struct cpuset *excluded_child)
467 {
468 struct cgroup_subsys_state *css;
469 struct cpuset *child;
470
471 if (cs->css.cgroup->nr_populated_csets)
472 return true;
473 if (!excluded_child && !cs->nr_subparts_cpus)
474 return cgroup_is_populated(cs->css.cgroup);
475
476 rcu_read_lock();
477 cpuset_for_each_child(child, css, cs) {
478 if (child == excluded_child)
479 continue;
480 if (is_partition_valid(child))
481 continue;
482 if (cgroup_is_populated(child->css.cgroup)) {
483 rcu_read_unlock();
484 return true;
485 }
486 }
487 rcu_read_unlock();
488 return false;
489 }
490
491 /*
492 * Return in pmask the portion of a task's cpusets's cpus_allowed that
493 * are online and are capable of running the task. If none are found,
494 * walk up the cpuset hierarchy until we find one that does have some
495 * appropriate cpus.
496 *
497 * One way or another, we guarantee to return some non-empty subset
498 * of cpu_online_mask.
499 *
500 * Call with callback_lock or cpuset_mutex held.
501 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)502 static void guarantee_online_cpus(struct task_struct *tsk,
503 struct cpumask *pmask)
504 {
505 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
506 struct cpuset *cs;
507
508 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
509 cpumask_copy(pmask, cpu_online_mask);
510
511 rcu_read_lock();
512 cs = task_cs(tsk);
513
514 while (!cpumask_intersects(cs->effective_cpus, pmask)) {
515 cs = parent_cs(cs);
516 if (unlikely(!cs)) {
517 /*
518 * The top cpuset doesn't have any online cpu as a
519 * consequence of a race between cpuset_hotplug_work
520 * and cpu hotplug notifier. But we know the top
521 * cpuset's effective_cpus is on its way to be
522 * identical to cpu_online_mask.
523 */
524 goto out_unlock;
525 }
526 }
527 cpumask_and(pmask, pmask, cs->effective_cpus);
528
529 out_unlock:
530 rcu_read_unlock();
531 }
532
533 /*
534 * Return in *pmask the portion of a cpusets's mems_allowed that
535 * are online, with memory. If none are online with memory, walk
536 * up the cpuset hierarchy until we find one that does have some
537 * online mems. The top cpuset always has some mems online.
538 *
539 * One way or another, we guarantee to return some non-empty subset
540 * of node_states[N_MEMORY].
541 *
542 * Call with callback_lock or cpuset_mutex held.
543 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)544 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
545 {
546 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
547 cs = parent_cs(cs);
548 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
549 }
550
551 /*
552 * update task's spread flag if cpuset's page/slab spread flag is set
553 *
554 * Call with callback_lock or cpuset_mutex held. The check can be skipped
555 * if on default hierarchy.
556 */
cpuset_update_task_spread_flags(struct cpuset * cs,struct task_struct * tsk)557 static void cpuset_update_task_spread_flags(struct cpuset *cs,
558 struct task_struct *tsk)
559 {
560 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
561 return;
562
563 if (is_spread_page(cs))
564 task_set_spread_page(tsk);
565 else
566 task_clear_spread_page(tsk);
567
568 if (is_spread_slab(cs))
569 task_set_spread_slab(tsk);
570 else
571 task_clear_spread_slab(tsk);
572 }
573
574 /*
575 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
576 *
577 * One cpuset is a subset of another if all its allowed CPUs and
578 * Memory Nodes are a subset of the other, and its exclusive flags
579 * are only set if the other's are set. Call holding cpuset_mutex.
580 */
581
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)582 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
583 {
584 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
585 nodes_subset(p->mems_allowed, q->mems_allowed) &&
586 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
587 is_mem_exclusive(p) <= is_mem_exclusive(q);
588 }
589
590 /**
591 * alloc_cpumasks - allocate three cpumasks for cpuset
592 * @cs: the cpuset that have cpumasks to be allocated.
593 * @tmp: the tmpmasks structure pointer
594 * Return: 0 if successful, -ENOMEM otherwise.
595 *
596 * Only one of the two input arguments should be non-NULL.
597 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)598 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
599 {
600 cpumask_var_t *pmask1, *pmask2, *pmask3;
601
602 if (cs) {
603 pmask1 = &cs->cpus_allowed;
604 pmask2 = &cs->effective_cpus;
605 pmask3 = &cs->subparts_cpus;
606 } else {
607 pmask1 = &tmp->new_cpus;
608 pmask2 = &tmp->addmask;
609 pmask3 = &tmp->delmask;
610 }
611
612 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
613 return -ENOMEM;
614
615 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
616 goto free_one;
617
618 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
619 goto free_two;
620
621 return 0;
622
623 free_two:
624 free_cpumask_var(*pmask2);
625 free_one:
626 free_cpumask_var(*pmask1);
627 return -ENOMEM;
628 }
629
630 /**
631 * free_cpumasks - free cpumasks in a tmpmasks structure
632 * @cs: the cpuset that have cpumasks to be free.
633 * @tmp: the tmpmasks structure pointer
634 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)635 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
636 {
637 if (cs) {
638 free_cpumask_var(cs->cpus_allowed);
639 free_cpumask_var(cs->effective_cpus);
640 free_cpumask_var(cs->subparts_cpus);
641 }
642 if (tmp) {
643 free_cpumask_var(tmp->new_cpus);
644 free_cpumask_var(tmp->addmask);
645 free_cpumask_var(tmp->delmask);
646 }
647 }
648
649 /**
650 * alloc_trial_cpuset - allocate a trial cpuset
651 * @cs: the cpuset that the trial cpuset duplicates
652 */
alloc_trial_cpuset(struct cpuset * cs)653 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
654 {
655 struct cpuset *trial;
656
657 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
658 if (!trial)
659 return NULL;
660
661 if (alloc_cpumasks(trial, NULL)) {
662 kfree(trial);
663 return NULL;
664 }
665
666 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
667 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
668 return trial;
669 }
670
671 /**
672 * free_cpuset - free the cpuset
673 * @cs: the cpuset to be freed
674 */
free_cpuset(struct cpuset * cs)675 static inline void free_cpuset(struct cpuset *cs)
676 {
677 free_cpumasks(cs, NULL);
678 kfree(cs);
679 }
680
681 /*
682 * validate_change_legacy() - Validate conditions specific to legacy (v1)
683 * behavior.
684 */
validate_change_legacy(struct cpuset * cur,struct cpuset * trial)685 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
686 {
687 struct cgroup_subsys_state *css;
688 struct cpuset *c, *par;
689 int ret;
690
691 WARN_ON_ONCE(!rcu_read_lock_held());
692
693 /* Each of our child cpusets must be a subset of us */
694 ret = -EBUSY;
695 cpuset_for_each_child(c, css, cur)
696 if (!is_cpuset_subset(c, trial))
697 goto out;
698
699 /* On legacy hierarchy, we must be a subset of our parent cpuset. */
700 ret = -EACCES;
701 par = parent_cs(cur);
702 if (par && !is_cpuset_subset(trial, par))
703 goto out;
704
705 ret = 0;
706 out:
707 return ret;
708 }
709
710 /*
711 * validate_change() - Used to validate that any proposed cpuset change
712 * follows the structural rules for cpusets.
713 *
714 * If we replaced the flag and mask values of the current cpuset
715 * (cur) with those values in the trial cpuset (trial), would
716 * our various subset and exclusive rules still be valid? Presumes
717 * cpuset_mutex held.
718 *
719 * 'cur' is the address of an actual, in-use cpuset. Operations
720 * such as list traversal that depend on the actual address of the
721 * cpuset in the list must use cur below, not trial.
722 *
723 * 'trial' is the address of bulk structure copy of cur, with
724 * perhaps one or more of the fields cpus_allowed, mems_allowed,
725 * or flags changed to new, trial values.
726 *
727 * Return 0 if valid, -errno if not.
728 */
729
validate_change(struct cpuset * cur,struct cpuset * trial)730 static int validate_change(struct cpuset *cur, struct cpuset *trial)
731 {
732 struct cgroup_subsys_state *css;
733 struct cpuset *c, *par;
734 int ret = 0;
735
736 rcu_read_lock();
737
738 if (!is_in_v2_mode())
739 ret = validate_change_legacy(cur, trial);
740 if (ret)
741 goto out;
742
743 /* Remaining checks don't apply to root cpuset */
744 if (cur == &top_cpuset)
745 goto out;
746
747 par = parent_cs(cur);
748
749 /*
750 * Cpusets with tasks - existing or newly being attached - can't
751 * be changed to have empty cpus_allowed or mems_allowed.
752 */
753 ret = -ENOSPC;
754 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
755 if (!cpumask_empty(cur->cpus_allowed) &&
756 cpumask_empty(trial->cpus_allowed))
757 goto out;
758 if (!nodes_empty(cur->mems_allowed) &&
759 nodes_empty(trial->mems_allowed))
760 goto out;
761 }
762
763 /*
764 * We can't shrink if we won't have enough room for SCHED_DEADLINE
765 * tasks.
766 */
767 ret = -EBUSY;
768 if (is_cpu_exclusive(cur) &&
769 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
770 trial->cpus_allowed))
771 goto out;
772
773 /*
774 * If either I or some sibling (!= me) is exclusive, we can't
775 * overlap
776 */
777 ret = -EINVAL;
778 cpuset_for_each_child(c, css, par) {
779 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
780 c != cur &&
781 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
782 goto out;
783 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
784 c != cur &&
785 nodes_intersects(trial->mems_allowed, c->mems_allowed))
786 goto out;
787 }
788
789 ret = 0;
790 out:
791 rcu_read_unlock();
792 return ret;
793 }
794
795 #ifdef CONFIG_SMP
796 /*
797 * Helper routine for generate_sched_domains().
798 * Do cpusets a, b have overlapping effective cpus_allowed masks?
799 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)800 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
801 {
802 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
803 }
804
805 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)806 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
807 {
808 if (dattr->relax_domain_level < c->relax_domain_level)
809 dattr->relax_domain_level = c->relax_domain_level;
810 return;
811 }
812
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)813 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
814 struct cpuset *root_cs)
815 {
816 struct cpuset *cp;
817 struct cgroup_subsys_state *pos_css;
818
819 rcu_read_lock();
820 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
821 /* skip the whole subtree if @cp doesn't have any CPU */
822 if (cpumask_empty(cp->cpus_allowed)) {
823 pos_css = css_rightmost_descendant(pos_css);
824 continue;
825 }
826
827 if (is_sched_load_balance(cp))
828 update_domain_attr(dattr, cp);
829 }
830 rcu_read_unlock();
831 }
832
833 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)834 static inline int nr_cpusets(void)
835 {
836 /* jump label reference count + the top-level cpuset */
837 return static_key_count(&cpusets_enabled_key.key) + 1;
838 }
839
840 /*
841 * generate_sched_domains()
842 *
843 * This function builds a partial partition of the systems CPUs
844 * A 'partial partition' is a set of non-overlapping subsets whose
845 * union is a subset of that set.
846 * The output of this function needs to be passed to kernel/sched/core.c
847 * partition_sched_domains() routine, which will rebuild the scheduler's
848 * load balancing domains (sched domains) as specified by that partial
849 * partition.
850 *
851 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
852 * for a background explanation of this.
853 *
854 * Does not return errors, on the theory that the callers of this
855 * routine would rather not worry about failures to rebuild sched
856 * domains when operating in the severe memory shortage situations
857 * that could cause allocation failures below.
858 *
859 * Must be called with cpuset_mutex held.
860 *
861 * The three key local variables below are:
862 * cp - cpuset pointer, used (together with pos_css) to perform a
863 * top-down scan of all cpusets. For our purposes, rebuilding
864 * the schedulers sched domains, we can ignore !is_sched_load_
865 * balance cpusets.
866 * csa - (for CpuSet Array) Array of pointers to all the cpusets
867 * that need to be load balanced, for convenient iterative
868 * access by the subsequent code that finds the best partition,
869 * i.e the set of domains (subsets) of CPUs such that the
870 * cpus_allowed of every cpuset marked is_sched_load_balance
871 * is a subset of one of these domains, while there are as
872 * many such domains as possible, each as small as possible.
873 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
874 * the kernel/sched/core.c routine partition_sched_domains() in a
875 * convenient format, that can be easily compared to the prior
876 * value to determine what partition elements (sched domains)
877 * were changed (added or removed.)
878 *
879 * Finding the best partition (set of domains):
880 * The triple nested loops below over i, j, k scan over the
881 * load balanced cpusets (using the array of cpuset pointers in
882 * csa[]) looking for pairs of cpusets that have overlapping
883 * cpus_allowed, but which don't have the same 'pn' partition
884 * number and gives them in the same partition number. It keeps
885 * looping on the 'restart' label until it can no longer find
886 * any such pairs.
887 *
888 * The union of the cpus_allowed masks from the set of
889 * all cpusets having the same 'pn' value then form the one
890 * element of the partition (one sched domain) to be passed to
891 * partition_sched_domains().
892 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)893 static int generate_sched_domains(cpumask_var_t **domains,
894 struct sched_domain_attr **attributes)
895 {
896 struct cpuset *cp; /* top-down scan of cpusets */
897 struct cpuset **csa; /* array of all cpuset ptrs */
898 int csn; /* how many cpuset ptrs in csa so far */
899 int i, j, k; /* indices for partition finding loops */
900 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
901 struct sched_domain_attr *dattr; /* attributes for custom domains */
902 int ndoms = 0; /* number of sched domains in result */
903 int nslot; /* next empty doms[] struct cpumask slot */
904 struct cgroup_subsys_state *pos_css;
905 bool root_load_balance = is_sched_load_balance(&top_cpuset);
906
907 doms = NULL;
908 dattr = NULL;
909 csa = NULL;
910
911 /* Special case for the 99% of systems with one, full, sched domain */
912 if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
913 ndoms = 1;
914 doms = alloc_sched_domains(ndoms);
915 if (!doms)
916 goto done;
917
918 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
919 if (dattr) {
920 *dattr = SD_ATTR_INIT;
921 update_domain_attr_tree(dattr, &top_cpuset);
922 }
923 cpumask_and(doms[0], top_cpuset.effective_cpus,
924 housekeeping_cpumask(HK_TYPE_DOMAIN));
925
926 goto done;
927 }
928
929 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
930 if (!csa)
931 goto done;
932 csn = 0;
933
934 rcu_read_lock();
935 if (root_load_balance)
936 csa[csn++] = &top_cpuset;
937 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
938 if (cp == &top_cpuset)
939 continue;
940 /*
941 * Continue traversing beyond @cp iff @cp has some CPUs and
942 * isn't load balancing. The former is obvious. The
943 * latter: All child cpusets contain a subset of the
944 * parent's cpus, so just skip them, and then we call
945 * update_domain_attr_tree() to calc relax_domain_level of
946 * the corresponding sched domain.
947 *
948 * If root is load-balancing, we can skip @cp if it
949 * is a subset of the root's effective_cpus.
950 */
951 if (!cpumask_empty(cp->cpus_allowed) &&
952 !(is_sched_load_balance(cp) &&
953 cpumask_intersects(cp->cpus_allowed,
954 housekeeping_cpumask(HK_TYPE_DOMAIN))))
955 continue;
956
957 if (root_load_balance &&
958 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
959 continue;
960
961 if (is_sched_load_balance(cp) &&
962 !cpumask_empty(cp->effective_cpus))
963 csa[csn++] = cp;
964
965 /* skip @cp's subtree if not a partition root */
966 if (!is_partition_valid(cp))
967 pos_css = css_rightmost_descendant(pos_css);
968 }
969 rcu_read_unlock();
970
971 for (i = 0; i < csn; i++)
972 csa[i]->pn = i;
973 ndoms = csn;
974
975 restart:
976 /* Find the best partition (set of sched domains) */
977 for (i = 0; i < csn; i++) {
978 struct cpuset *a = csa[i];
979 int apn = a->pn;
980
981 for (j = 0; j < csn; j++) {
982 struct cpuset *b = csa[j];
983 int bpn = b->pn;
984
985 if (apn != bpn && cpusets_overlap(a, b)) {
986 for (k = 0; k < csn; k++) {
987 struct cpuset *c = csa[k];
988
989 if (c->pn == bpn)
990 c->pn = apn;
991 }
992 ndoms--; /* one less element */
993 goto restart;
994 }
995 }
996 }
997
998 /*
999 * Now we know how many domains to create.
1000 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1001 */
1002 doms = alloc_sched_domains(ndoms);
1003 if (!doms)
1004 goto done;
1005
1006 /*
1007 * The rest of the code, including the scheduler, can deal with
1008 * dattr==NULL case. No need to abort if alloc fails.
1009 */
1010 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1011 GFP_KERNEL);
1012
1013 for (nslot = 0, i = 0; i < csn; i++) {
1014 struct cpuset *a = csa[i];
1015 struct cpumask *dp;
1016 int apn = a->pn;
1017
1018 if (apn < 0) {
1019 /* Skip completed partitions */
1020 continue;
1021 }
1022
1023 dp = doms[nslot];
1024
1025 if (nslot == ndoms) {
1026 static int warnings = 10;
1027 if (warnings) {
1028 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1029 nslot, ndoms, csn, i, apn);
1030 warnings--;
1031 }
1032 continue;
1033 }
1034
1035 cpumask_clear(dp);
1036 if (dattr)
1037 *(dattr + nslot) = SD_ATTR_INIT;
1038 for (j = i; j < csn; j++) {
1039 struct cpuset *b = csa[j];
1040
1041 if (apn == b->pn) {
1042 cpumask_or(dp, dp, b->effective_cpus);
1043 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1044 if (dattr)
1045 update_domain_attr_tree(dattr + nslot, b);
1046
1047 /* Done with this partition */
1048 b->pn = -1;
1049 }
1050 }
1051 nslot++;
1052 }
1053 BUG_ON(nslot != ndoms);
1054
1055 done:
1056 kfree(csa);
1057
1058 /*
1059 * Fallback to the default domain if kmalloc() failed.
1060 * See comments in partition_sched_domains().
1061 */
1062 if (doms == NULL)
1063 ndoms = 1;
1064
1065 *domains = doms;
1066 *attributes = dattr;
1067 return ndoms;
1068 }
1069
dl_update_tasks_root_domain(struct cpuset * cs)1070 static void dl_update_tasks_root_domain(struct cpuset *cs)
1071 {
1072 struct css_task_iter it;
1073 struct task_struct *task;
1074
1075 if (cs->nr_deadline_tasks == 0)
1076 return;
1077
1078 css_task_iter_start(&cs->css, 0, &it);
1079
1080 while ((task = css_task_iter_next(&it)))
1081 dl_add_task_root_domain(task);
1082
1083 css_task_iter_end(&it);
1084 }
1085
dl_rebuild_rd_accounting(void)1086 static void dl_rebuild_rd_accounting(void)
1087 {
1088 struct cpuset *cs = NULL;
1089 struct cgroup_subsys_state *pos_css;
1090
1091 lockdep_assert_held(&cpuset_mutex);
1092 lockdep_assert_cpus_held();
1093 lockdep_assert_held(&sched_domains_mutex);
1094
1095 rcu_read_lock();
1096
1097 /*
1098 * Clear default root domain DL accounting, it will be computed again
1099 * if a task belongs to it.
1100 */
1101 dl_clear_root_domain(&def_root_domain);
1102
1103 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1104
1105 if (cpumask_empty(cs->effective_cpus)) {
1106 pos_css = css_rightmost_descendant(pos_css);
1107 continue;
1108 }
1109
1110 css_get(&cs->css);
1111
1112 rcu_read_unlock();
1113
1114 dl_update_tasks_root_domain(cs);
1115
1116 rcu_read_lock();
1117 css_put(&cs->css);
1118 }
1119 rcu_read_unlock();
1120 }
1121
1122 static void
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)1123 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1124 struct sched_domain_attr *dattr_new)
1125 {
1126 mutex_lock(&sched_domains_mutex);
1127 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1128 dl_rebuild_rd_accounting();
1129 mutex_unlock(&sched_domains_mutex);
1130 }
1131
1132 /*
1133 * Rebuild scheduler domains.
1134 *
1135 * If the flag 'sched_load_balance' of any cpuset with non-empty
1136 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1137 * which has that flag enabled, or if any cpuset with a non-empty
1138 * 'cpus' is removed, then call this routine to rebuild the
1139 * scheduler's dynamic sched domains.
1140 *
1141 * Call with cpuset_mutex held. Takes cpus_read_lock().
1142 */
rebuild_sched_domains_locked(void)1143 static void rebuild_sched_domains_locked(void)
1144 {
1145 struct cgroup_subsys_state *pos_css;
1146 struct sched_domain_attr *attr;
1147 cpumask_var_t *doms;
1148 struct cpuset *cs;
1149 int ndoms;
1150
1151 lockdep_assert_cpus_held();
1152 lockdep_assert_held(&cpuset_mutex);
1153
1154 /*
1155 * If we have raced with CPU hotplug, return early to avoid
1156 * passing doms with offlined cpu to partition_sched_domains().
1157 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1158 *
1159 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1160 * should be the same as the active CPUs, so checking only top_cpuset
1161 * is enough to detect racing CPU offlines.
1162 */
1163 if (!top_cpuset.nr_subparts_cpus &&
1164 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1165 return;
1166
1167 /*
1168 * With subpartition CPUs, however, the effective CPUs of a partition
1169 * root should be only a subset of the active CPUs. Since a CPU in any
1170 * partition root could be offlined, all must be checked.
1171 */
1172 if (top_cpuset.nr_subparts_cpus) {
1173 rcu_read_lock();
1174 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1175 if (!is_partition_valid(cs)) {
1176 pos_css = css_rightmost_descendant(pos_css);
1177 continue;
1178 }
1179 if (!cpumask_subset(cs->effective_cpus,
1180 cpu_active_mask)) {
1181 rcu_read_unlock();
1182 return;
1183 }
1184 }
1185 rcu_read_unlock();
1186 }
1187
1188 /* Generate domain masks and attrs */
1189 ndoms = generate_sched_domains(&doms, &attr);
1190
1191 /* Have scheduler rebuild the domains */
1192 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1193 }
1194 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1195 static void rebuild_sched_domains_locked(void)
1196 {
1197 }
1198 #endif /* CONFIG_SMP */
1199
rebuild_sched_domains(void)1200 void rebuild_sched_domains(void)
1201 {
1202 cpus_read_lock();
1203 mutex_lock(&cpuset_mutex);
1204 rebuild_sched_domains_locked();
1205 mutex_unlock(&cpuset_mutex);
1206 cpus_read_unlock();
1207 }
1208
1209 /**
1210 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1211 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1212 * @new_cpus: the temp variable for the new effective_cpus mask
1213 *
1214 * Iterate through each task of @cs updating its cpus_allowed to the
1215 * effective cpuset's. As this function is called with cpuset_mutex held,
1216 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1217 * is used instead of effective_cpus to make sure all offline CPUs are also
1218 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1219 */
update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1220 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1221 {
1222 struct css_task_iter it;
1223 struct task_struct *task;
1224 bool top_cs = cs == &top_cpuset;
1225
1226 css_task_iter_start(&cs->css, 0, &it);
1227 while ((task = css_task_iter_next(&it))) {
1228 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1229
1230 if (top_cs) {
1231 /*
1232 * Percpu kthreads in top_cpuset are ignored
1233 */
1234 if (kthread_is_per_cpu(task))
1235 continue;
1236 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
1237 } else {
1238 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1239 }
1240 set_cpus_allowed_ptr(task, new_cpus);
1241 }
1242 css_task_iter_end(&it);
1243 }
1244
1245 /**
1246 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1247 * @new_cpus: the temp variable for the new effective_cpus mask
1248 * @cs: the cpuset the need to recompute the new effective_cpus mask
1249 * @parent: the parent cpuset
1250 *
1251 * If the parent has subpartition CPUs, include them in the list of
1252 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1253 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1254 * to mask those out.
1255 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1256 static void compute_effective_cpumask(struct cpumask *new_cpus,
1257 struct cpuset *cs, struct cpuset *parent)
1258 {
1259 if (parent->nr_subparts_cpus && is_partition_valid(cs)) {
1260 cpumask_or(new_cpus, parent->effective_cpus,
1261 parent->subparts_cpus);
1262 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1263 cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1264 } else {
1265 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1266 }
1267 }
1268
1269 /*
1270 * Commands for update_parent_subparts_cpumask
1271 */
1272 enum subparts_cmd {
1273 partcmd_enable, /* Enable partition root */
1274 partcmd_disable, /* Disable partition root */
1275 partcmd_update, /* Update parent's subparts_cpus */
1276 partcmd_invalidate, /* Make partition invalid */
1277 };
1278
1279 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1280 int turning_on);
1281 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1282 struct tmpmasks *tmp);
1283
1284 /*
1285 * Update partition exclusive flag
1286 *
1287 * Return: 0 if successful, an error code otherwise
1288 */
update_partition_exclusive(struct cpuset * cs,int new_prs)1289 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1290 {
1291 bool exclusive = (new_prs > 0);
1292
1293 if (exclusive && !is_cpu_exclusive(cs)) {
1294 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1295 return PERR_NOTEXCL;
1296 } else if (!exclusive && is_cpu_exclusive(cs)) {
1297 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1298 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1299 }
1300 return 0;
1301 }
1302
1303 /*
1304 * Update partition load balance flag and/or rebuild sched domain
1305 *
1306 * Changing load balance flag will automatically call
1307 * rebuild_sched_domains_locked().
1308 * This function is for cgroup v2 only.
1309 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1310 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1311 {
1312 int new_prs = cs->partition_root_state;
1313 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1314 bool new_lb;
1315
1316 /*
1317 * If cs is not a valid partition root, the load balance state
1318 * will follow its parent.
1319 */
1320 if (new_prs > 0) {
1321 new_lb = (new_prs != PRS_ISOLATED);
1322 } else {
1323 new_lb = is_sched_load_balance(parent_cs(cs));
1324 }
1325 if (new_lb != !!is_sched_load_balance(cs)) {
1326 rebuild_domains = true;
1327 if (new_lb)
1328 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1329 else
1330 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1331 }
1332
1333 if (rebuild_domains)
1334 rebuild_sched_domains_locked();
1335 }
1336
1337 /**
1338 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1339 * @cs: The cpuset that requests change in partition root state
1340 * @cmd: Partition root state change command
1341 * @newmask: Optional new cpumask for partcmd_update
1342 * @tmp: Temporary addmask and delmask
1343 * Return: 0 or a partition root state error code
1344 *
1345 * For partcmd_enable, the cpuset is being transformed from a non-partition
1346 * root to a partition root. The cpus_allowed mask of the given cpuset will
1347 * be put into parent's subparts_cpus and taken away from parent's
1348 * effective_cpus. The function will return 0 if all the CPUs listed in
1349 * cpus_allowed can be granted or an error code will be returned.
1350 *
1351 * For partcmd_disable, the cpuset is being transformed from a partition
1352 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1353 * parent's subparts_cpus will be taken away from that cpumask and put back
1354 * into parent's effective_cpus. 0 will always be returned.
1355 *
1356 * For partcmd_update, if the optional newmask is specified, the cpu list is
1357 * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1358 * assumed to remain the same. The cpuset should either be a valid or invalid
1359 * partition root. The partition root state may change from valid to invalid
1360 * or vice versa. An error code will only be returned if transitioning from
1361 * invalid to valid violates the exclusivity rule.
1362 *
1363 * For partcmd_invalidate, the current partition will be made invalid.
1364 *
1365 * The partcmd_enable and partcmd_disable commands are used by
1366 * update_prstate(). An error code may be returned and the caller will check
1367 * for error.
1368 *
1369 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1370 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1371 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1372 * check for error and so partition_root_state and prs_error will be updated
1373 * directly.
1374 */
update_parent_subparts_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1375 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1376 struct cpumask *newmask,
1377 struct tmpmasks *tmp)
1378 {
1379 struct cpuset *parent = parent_cs(cs);
1380 int adding; /* Moving cpus from effective_cpus to subparts_cpus */
1381 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
1382 int old_prs, new_prs;
1383 int part_error = PERR_NONE; /* Partition error? */
1384
1385 lockdep_assert_held(&cpuset_mutex);
1386
1387 /*
1388 * The parent must be a partition root.
1389 * The new cpumask, if present, or the current cpus_allowed must
1390 * not be empty.
1391 */
1392 if (!is_partition_valid(parent)) {
1393 return is_partition_invalid(parent)
1394 ? PERR_INVPARENT : PERR_NOTPART;
1395 }
1396 if (!newmask && cpumask_empty(cs->cpus_allowed))
1397 return PERR_CPUSEMPTY;
1398
1399 /*
1400 * new_prs will only be changed for the partcmd_update and
1401 * partcmd_invalidate commands.
1402 */
1403 adding = deleting = false;
1404 old_prs = new_prs = cs->partition_root_state;
1405 if (cmd == partcmd_enable) {
1406 /*
1407 * Enabling partition root is not allowed if cpus_allowed
1408 * doesn't overlap parent's cpus_allowed.
1409 */
1410 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
1411 return PERR_INVCPUS;
1412
1413 /*
1414 * A parent can be left with no CPU as long as there is no
1415 * task directly associated with the parent partition.
1416 */
1417 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1418 partition_is_populated(parent, cs))
1419 return PERR_NOCPUS;
1420
1421 cpumask_copy(tmp->addmask, cs->cpus_allowed);
1422 adding = true;
1423 } else if (cmd == partcmd_disable) {
1424 /*
1425 * Need to remove cpus from parent's subparts_cpus for valid
1426 * partition root.
1427 */
1428 deleting = !is_prs_invalid(old_prs) &&
1429 cpumask_and(tmp->delmask, cs->cpus_allowed,
1430 parent->subparts_cpus);
1431 } else if (cmd == partcmd_invalidate) {
1432 if (is_prs_invalid(old_prs))
1433 return 0;
1434
1435 /*
1436 * Make the current partition invalid. It is assumed that
1437 * invalidation is caused by violating cpu exclusivity rule.
1438 */
1439 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1440 parent->subparts_cpus);
1441 if (old_prs > 0) {
1442 new_prs = -old_prs;
1443 part_error = PERR_NOTEXCL;
1444 }
1445 } else if (newmask) {
1446 /*
1447 * partcmd_update with newmask:
1448 *
1449 * Compute add/delete mask to/from subparts_cpus
1450 *
1451 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1452 * addmask = newmask & parent->cpus_allowed
1453 * & ~parent->subparts_cpus
1454 */
1455 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1456 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1457 parent->subparts_cpus);
1458
1459 cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1460 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1461 parent->subparts_cpus);
1462 /*
1463 * Empty cpumask is not allowed
1464 */
1465 if (cpumask_empty(newmask)) {
1466 part_error = PERR_CPUSEMPTY;
1467 /*
1468 * Make partition invalid if parent's effective_cpus could
1469 * become empty and there are tasks in the parent.
1470 */
1471 } else if (adding &&
1472 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1473 !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1474 partition_is_populated(parent, cs)) {
1475 part_error = PERR_NOCPUS;
1476 adding = false;
1477 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1478 parent->subparts_cpus);
1479 }
1480 } else {
1481 /*
1482 * partcmd_update w/o newmask:
1483 *
1484 * delmask = cpus_allowed & parent->subparts_cpus
1485 * addmask = cpus_allowed & parent->cpus_allowed
1486 * & ~parent->subparts_cpus
1487 *
1488 * This gets invoked either due to a hotplug event or from
1489 * update_cpumasks_hier(). This can cause the state of a
1490 * partition root to transition from valid to invalid or vice
1491 * versa. So we still need to compute the addmask and delmask.
1492
1493 * A partition error happens when:
1494 * 1) Cpuset is valid partition, but parent does not distribute
1495 * out any CPUs.
1496 * 2) Parent has tasks and all its effective CPUs will have
1497 * to be distributed out.
1498 */
1499 cpumask_and(tmp->addmask, cs->cpus_allowed,
1500 parent->cpus_allowed);
1501 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1502 parent->subparts_cpus);
1503
1504 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1505 (adding &&
1506 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1507 partition_is_populated(parent, cs))) {
1508 part_error = PERR_NOCPUS;
1509 adding = false;
1510 }
1511
1512 if (part_error && is_partition_valid(cs) &&
1513 parent->nr_subparts_cpus)
1514 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1515 parent->subparts_cpus);
1516 }
1517 if (part_error)
1518 WRITE_ONCE(cs->prs_err, part_error);
1519
1520 if (cmd == partcmd_update) {
1521 /*
1522 * Check for possible transition between valid and invalid
1523 * partition root.
1524 */
1525 switch (cs->partition_root_state) {
1526 case PRS_ROOT:
1527 case PRS_ISOLATED:
1528 if (part_error)
1529 new_prs = -old_prs;
1530 break;
1531 case PRS_INVALID_ROOT:
1532 case PRS_INVALID_ISOLATED:
1533 if (!part_error)
1534 new_prs = -old_prs;
1535 break;
1536 }
1537 }
1538
1539 if (!adding && !deleting && (new_prs == old_prs))
1540 return 0;
1541
1542 /*
1543 * Transitioning between invalid to valid or vice versa may require
1544 * changing CS_CPU_EXCLUSIVE.
1545 */
1546 if (old_prs != new_prs) {
1547 int err = update_partition_exclusive(cs, new_prs);
1548
1549 if (err)
1550 return err;
1551 }
1552
1553 /*
1554 * Change the parent's subparts_cpus.
1555 * Newly added CPUs will be removed from effective_cpus and
1556 * newly deleted ones will be added back to effective_cpus.
1557 */
1558 spin_lock_irq(&callback_lock);
1559 if (adding) {
1560 cpumask_or(parent->subparts_cpus,
1561 parent->subparts_cpus, tmp->addmask);
1562 cpumask_andnot(parent->effective_cpus,
1563 parent->effective_cpus, tmp->addmask);
1564 }
1565 if (deleting) {
1566 cpumask_andnot(parent->subparts_cpus,
1567 parent->subparts_cpus, tmp->delmask);
1568 /*
1569 * Some of the CPUs in subparts_cpus might have been offlined.
1570 */
1571 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1572 cpumask_or(parent->effective_cpus,
1573 parent->effective_cpus, tmp->delmask);
1574 }
1575
1576 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1577
1578 if (old_prs != new_prs)
1579 cs->partition_root_state = new_prs;
1580
1581 spin_unlock_irq(&callback_lock);
1582
1583 if (adding || deleting) {
1584 update_tasks_cpumask(parent, tmp->addmask);
1585 if (parent->child_ecpus_count)
1586 update_sibling_cpumasks(parent, cs, tmp);
1587 }
1588
1589 /*
1590 * For partcmd_update without newmask, it is being called from
1591 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
1592 * Update the load balance flag and scheduling domain if
1593 * cpus_read_trylock() is successful.
1594 */
1595 if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
1596 update_partition_sd_lb(cs, old_prs);
1597 cpus_read_unlock();
1598 }
1599
1600 notify_partition_change(cs, old_prs);
1601 return 0;
1602 }
1603
1604 /*
1605 * update_cpumasks_hier() flags
1606 */
1607 #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
1608 #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
1609
1610 /*
1611 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1612 * @cs: the cpuset to consider
1613 * @tmp: temp variables for calculating effective_cpus & partition setup
1614 * @force: don't skip any descendant cpusets if set
1615 *
1616 * When configured cpumask is changed, the effective cpumasks of this cpuset
1617 * and all its descendants need to be updated.
1618 *
1619 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1620 *
1621 * Called with cpuset_mutex held
1622 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,int flags)1623 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1624 int flags)
1625 {
1626 struct cpuset *cp;
1627 struct cgroup_subsys_state *pos_css;
1628 bool need_rebuild_sched_domains = false;
1629 int old_prs, new_prs;
1630
1631 rcu_read_lock();
1632 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1633 struct cpuset *parent = parent_cs(cp);
1634 bool update_parent = false;
1635
1636 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1637
1638 /*
1639 * If it becomes empty, inherit the effective mask of the
1640 * parent, which is guaranteed to have some CPUs unless
1641 * it is a partition root that has explicitly distributed
1642 * out all its CPUs.
1643 */
1644 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1645 if (is_partition_valid(cp) &&
1646 cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1647 goto update_parent_subparts;
1648
1649 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1650 if (!cp->use_parent_ecpus) {
1651 cp->use_parent_ecpus = true;
1652 parent->child_ecpus_count++;
1653 }
1654 } else if (cp->use_parent_ecpus) {
1655 cp->use_parent_ecpus = false;
1656 WARN_ON_ONCE(!parent->child_ecpus_count);
1657 parent->child_ecpus_count--;
1658 }
1659
1660 /*
1661 * Skip the whole subtree if
1662 * 1) the cpumask remains the same,
1663 * 2) has no partition root state,
1664 * 3) HIER_CHECKALL flag not set, and
1665 * 4) for v2 load balance state same as its parent.
1666 */
1667 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
1668 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
1669 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1670 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
1671 pos_css = css_rightmost_descendant(pos_css);
1672 continue;
1673 }
1674
1675 update_parent_subparts:
1676 /*
1677 * update_parent_subparts_cpumask() should have been called
1678 * for cs already in update_cpumask(). We should also call
1679 * update_tasks_cpumask() again for tasks in the parent
1680 * cpuset if the parent's subparts_cpus changes.
1681 */
1682 old_prs = new_prs = cp->partition_root_state;
1683 if ((cp != cs) && old_prs) {
1684 switch (parent->partition_root_state) {
1685 case PRS_ROOT:
1686 case PRS_ISOLATED:
1687 update_parent = true;
1688 break;
1689
1690 default:
1691 /*
1692 * When parent is not a partition root or is
1693 * invalid, child partition roots become
1694 * invalid too.
1695 */
1696 if (is_partition_valid(cp))
1697 new_prs = -cp->partition_root_state;
1698 WRITE_ONCE(cp->prs_err,
1699 is_partition_invalid(parent)
1700 ? PERR_INVPARENT : PERR_NOTPART);
1701 break;
1702 }
1703 }
1704
1705 if (!css_tryget_online(&cp->css))
1706 continue;
1707 rcu_read_unlock();
1708
1709 if (update_parent) {
1710 update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1711 tmp);
1712 /*
1713 * The cpuset partition_root_state may become
1714 * invalid. Capture it.
1715 */
1716 new_prs = cp->partition_root_state;
1717 }
1718
1719 spin_lock_irq(&callback_lock);
1720
1721 if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1722 /*
1723 * Put all active subparts_cpus back to effective_cpus.
1724 */
1725 cpumask_or(tmp->new_cpus, tmp->new_cpus,
1726 cp->subparts_cpus);
1727 cpumask_and(tmp->new_cpus, tmp->new_cpus,
1728 cpu_active_mask);
1729 cp->nr_subparts_cpus = 0;
1730 cpumask_clear(cp->subparts_cpus);
1731 }
1732
1733 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1734 if (cp->nr_subparts_cpus) {
1735 /*
1736 * Make sure that effective_cpus & subparts_cpus
1737 * are mutually exclusive.
1738 */
1739 cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1740 cp->subparts_cpus);
1741 }
1742
1743 cp->partition_root_state = new_prs;
1744 spin_unlock_irq(&callback_lock);
1745
1746 notify_partition_change(cp, old_prs);
1747
1748 WARN_ON(!is_in_v2_mode() &&
1749 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1750
1751 update_tasks_cpumask(cp, tmp->new_cpus);
1752
1753 /*
1754 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
1755 * from parent if current cpuset isn't a valid partition root
1756 * and their load balance states differ.
1757 */
1758 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1759 !is_partition_valid(cp) &&
1760 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
1761 if (is_sched_load_balance(parent))
1762 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1763 else
1764 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1765 }
1766
1767 /*
1768 * On legacy hierarchy, if the effective cpumask of any non-
1769 * empty cpuset is changed, we need to rebuild sched domains.
1770 * On default hierarchy, the cpuset needs to be a partition
1771 * root as well.
1772 */
1773 if (!cpumask_empty(cp->cpus_allowed) &&
1774 is_sched_load_balance(cp) &&
1775 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1776 is_partition_valid(cp)))
1777 need_rebuild_sched_domains = true;
1778
1779 rcu_read_lock();
1780 css_put(&cp->css);
1781 }
1782 rcu_read_unlock();
1783
1784 if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
1785 rebuild_sched_domains_locked();
1786 }
1787
1788 /**
1789 * update_sibling_cpumasks - Update siblings cpumasks
1790 * @parent: Parent cpuset
1791 * @cs: Current cpuset
1792 * @tmp: Temp variables
1793 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)1794 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1795 struct tmpmasks *tmp)
1796 {
1797 struct cpuset *sibling;
1798 struct cgroup_subsys_state *pos_css;
1799
1800 lockdep_assert_held(&cpuset_mutex);
1801
1802 /*
1803 * Check all its siblings and call update_cpumasks_hier()
1804 * if their use_parent_ecpus flag is set in order for them
1805 * to use the right effective_cpus value.
1806 *
1807 * The update_cpumasks_hier() function may sleep. So we have to
1808 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
1809 * flag is used to suppress rebuild of sched domains as the callers
1810 * will take care of that.
1811 */
1812 rcu_read_lock();
1813 cpuset_for_each_child(sibling, pos_css, parent) {
1814 if (sibling == cs)
1815 continue;
1816 if (!sibling->use_parent_ecpus)
1817 continue;
1818 if (!css_tryget_online(&sibling->css))
1819 continue;
1820
1821 rcu_read_unlock();
1822 update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
1823 rcu_read_lock();
1824 css_put(&sibling->css);
1825 }
1826 rcu_read_unlock();
1827 }
1828
1829 /**
1830 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1831 * @cs: the cpuset to consider
1832 * @trialcs: trial cpuset
1833 * @buf: buffer of cpu numbers written to this cpuset
1834 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1835 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1836 const char *buf)
1837 {
1838 int retval;
1839 struct tmpmasks tmp;
1840 bool invalidate = false;
1841 int old_prs = cs->partition_root_state;
1842
1843 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1844 if (cs == &top_cpuset)
1845 return -EACCES;
1846
1847 /*
1848 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1849 * Since cpulist_parse() fails on an empty mask, we special case
1850 * that parsing. The validate_change() call ensures that cpusets
1851 * with tasks have cpus.
1852 */
1853 if (!*buf) {
1854 cpumask_clear(trialcs->cpus_allowed);
1855 } else {
1856 retval = cpulist_parse(buf, trialcs->cpus_allowed);
1857 if (retval < 0)
1858 return retval;
1859
1860 if (!cpumask_subset(trialcs->cpus_allowed,
1861 top_cpuset.cpus_allowed))
1862 return -EINVAL;
1863 }
1864
1865 /* Nothing to do if the cpus didn't change */
1866 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1867 return 0;
1868
1869 if (alloc_cpumasks(NULL, &tmp))
1870 return -ENOMEM;
1871
1872 retval = validate_change(cs, trialcs);
1873
1874 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1875 struct cpuset *cp, *parent;
1876 struct cgroup_subsys_state *css;
1877
1878 /*
1879 * The -EINVAL error code indicates that partition sibling
1880 * CPU exclusivity rule has been violated. We still allow
1881 * the cpumask change to proceed while invalidating the
1882 * partition. However, any conflicting sibling partitions
1883 * have to be marked as invalid too.
1884 */
1885 invalidate = true;
1886 rcu_read_lock();
1887 parent = parent_cs(cs);
1888 cpuset_for_each_child(cp, css, parent)
1889 if (is_partition_valid(cp) &&
1890 cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1891 rcu_read_unlock();
1892 update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1893 rcu_read_lock();
1894 }
1895 rcu_read_unlock();
1896 retval = 0;
1897 }
1898 if (retval < 0)
1899 goto out_free;
1900
1901 if (cs->partition_root_state) {
1902 if (invalidate)
1903 update_parent_subparts_cpumask(cs, partcmd_invalidate,
1904 NULL, &tmp);
1905 else
1906 update_parent_subparts_cpumask(cs, partcmd_update,
1907 trialcs->cpus_allowed, &tmp);
1908 }
1909
1910 compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1911 parent_cs(cs));
1912 spin_lock_irq(&callback_lock);
1913 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1914
1915 /*
1916 * Make sure that subparts_cpus, if not empty, is a subset of
1917 * cpus_allowed. Clear subparts_cpus if partition not valid or
1918 * empty effective cpus with tasks.
1919 */
1920 if (cs->nr_subparts_cpus) {
1921 if (!is_partition_valid(cs) ||
1922 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1923 partition_is_populated(cs, NULL))) {
1924 cs->nr_subparts_cpus = 0;
1925 cpumask_clear(cs->subparts_cpus);
1926 } else {
1927 cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1928 cs->cpus_allowed);
1929 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1930 }
1931 }
1932 spin_unlock_irq(&callback_lock);
1933
1934 /* effective_cpus will be updated here */
1935 update_cpumasks_hier(cs, &tmp, 0);
1936
1937 if (cs->partition_root_state) {
1938 struct cpuset *parent = parent_cs(cs);
1939
1940 /*
1941 * For partition root, update the cpumasks of sibling
1942 * cpusets if they use parent's effective_cpus.
1943 */
1944 if (parent->child_ecpus_count)
1945 update_sibling_cpumasks(parent, cs, &tmp);
1946
1947 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
1948 update_partition_sd_lb(cs, old_prs);
1949 }
1950 out_free:
1951 free_cpumasks(NULL, &tmp);
1952 return retval;
1953 }
1954
1955 /*
1956 * Migrate memory region from one set of nodes to another. This is
1957 * performed asynchronously as it can be called from process migration path
1958 * holding locks involved in process management. All mm migrations are
1959 * performed in the queued order and can be waited for by flushing
1960 * cpuset_migrate_mm_wq.
1961 */
1962
1963 struct cpuset_migrate_mm_work {
1964 struct work_struct work;
1965 struct mm_struct *mm;
1966 nodemask_t from;
1967 nodemask_t to;
1968 };
1969
cpuset_migrate_mm_workfn(struct work_struct * work)1970 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1971 {
1972 struct cpuset_migrate_mm_work *mwork =
1973 container_of(work, struct cpuset_migrate_mm_work, work);
1974
1975 /* on a wq worker, no need to worry about %current's mems_allowed */
1976 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1977 mmput(mwork->mm);
1978 kfree(mwork);
1979 }
1980
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)1981 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1982 const nodemask_t *to)
1983 {
1984 struct cpuset_migrate_mm_work *mwork;
1985
1986 if (nodes_equal(*from, *to)) {
1987 mmput(mm);
1988 return;
1989 }
1990
1991 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1992 if (mwork) {
1993 mwork->mm = mm;
1994 mwork->from = *from;
1995 mwork->to = *to;
1996 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1997 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1998 } else {
1999 mmput(mm);
2000 }
2001 }
2002
cpuset_post_attach(void)2003 static void cpuset_post_attach(void)
2004 {
2005 flush_workqueue(cpuset_migrate_mm_wq);
2006 }
2007
2008 /*
2009 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2010 * @tsk: the task to change
2011 * @newmems: new nodes that the task will be set
2012 *
2013 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2014 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2015 * parallel, it might temporarily see an empty intersection, which results in
2016 * a seqlock check and retry before OOM or allocation failure.
2017 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2018 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2019 nodemask_t *newmems)
2020 {
2021 task_lock(tsk);
2022
2023 local_irq_disable();
2024 write_seqcount_begin(&tsk->mems_allowed_seq);
2025
2026 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2027 mpol_rebind_task(tsk, newmems);
2028 tsk->mems_allowed = *newmems;
2029
2030 write_seqcount_end(&tsk->mems_allowed_seq);
2031 local_irq_enable();
2032
2033 task_unlock(tsk);
2034 }
2035
2036 static void *cpuset_being_rebound;
2037
2038 /**
2039 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2040 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2041 *
2042 * Iterate through each task of @cs updating its mems_allowed to the
2043 * effective cpuset's. As this function is called with cpuset_mutex held,
2044 * cpuset membership stays stable.
2045 */
update_tasks_nodemask(struct cpuset * cs)2046 static void update_tasks_nodemask(struct cpuset *cs)
2047 {
2048 static nodemask_t newmems; /* protected by cpuset_mutex */
2049 struct css_task_iter it;
2050 struct task_struct *task;
2051
2052 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2053
2054 guarantee_online_mems(cs, &newmems);
2055
2056 /*
2057 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2058 * take while holding tasklist_lock. Forks can happen - the
2059 * mpol_dup() cpuset_being_rebound check will catch such forks,
2060 * and rebind their vma mempolicies too. Because we still hold
2061 * the global cpuset_mutex, we know that no other rebind effort
2062 * will be contending for the global variable cpuset_being_rebound.
2063 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2064 * is idempotent. Also migrate pages in each mm to new nodes.
2065 */
2066 css_task_iter_start(&cs->css, 0, &it);
2067 while ((task = css_task_iter_next(&it))) {
2068 struct mm_struct *mm;
2069 bool migrate;
2070
2071 cpuset_change_task_nodemask(task, &newmems);
2072
2073 mm = get_task_mm(task);
2074 if (!mm)
2075 continue;
2076
2077 migrate = is_memory_migrate(cs);
2078
2079 mpol_rebind_mm(mm, &cs->mems_allowed);
2080 if (migrate)
2081 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2082 else
2083 mmput(mm);
2084 }
2085 css_task_iter_end(&it);
2086
2087 /*
2088 * All the tasks' nodemasks have been updated, update
2089 * cs->old_mems_allowed.
2090 */
2091 cs->old_mems_allowed = newmems;
2092
2093 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2094 cpuset_being_rebound = NULL;
2095 }
2096
2097 /*
2098 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2099 * @cs: the cpuset to consider
2100 * @new_mems: a temp variable for calculating new effective_mems
2101 *
2102 * When configured nodemask is changed, the effective nodemasks of this cpuset
2103 * and all its descendants need to be updated.
2104 *
2105 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2106 *
2107 * Called with cpuset_mutex held
2108 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2109 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2110 {
2111 struct cpuset *cp;
2112 struct cgroup_subsys_state *pos_css;
2113
2114 rcu_read_lock();
2115 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2116 struct cpuset *parent = parent_cs(cp);
2117
2118 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2119
2120 /*
2121 * If it becomes empty, inherit the effective mask of the
2122 * parent, which is guaranteed to have some MEMs.
2123 */
2124 if (is_in_v2_mode() && nodes_empty(*new_mems))
2125 *new_mems = parent->effective_mems;
2126
2127 /* Skip the whole subtree if the nodemask remains the same. */
2128 if (nodes_equal(*new_mems, cp->effective_mems)) {
2129 pos_css = css_rightmost_descendant(pos_css);
2130 continue;
2131 }
2132
2133 if (!css_tryget_online(&cp->css))
2134 continue;
2135 rcu_read_unlock();
2136
2137 spin_lock_irq(&callback_lock);
2138 cp->effective_mems = *new_mems;
2139 spin_unlock_irq(&callback_lock);
2140
2141 WARN_ON(!is_in_v2_mode() &&
2142 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2143
2144 update_tasks_nodemask(cp);
2145
2146 rcu_read_lock();
2147 css_put(&cp->css);
2148 }
2149 rcu_read_unlock();
2150 }
2151
2152 /*
2153 * Handle user request to change the 'mems' memory placement
2154 * of a cpuset. Needs to validate the request, update the
2155 * cpusets mems_allowed, and for each task in the cpuset,
2156 * update mems_allowed and rebind task's mempolicy and any vma
2157 * mempolicies and if the cpuset is marked 'memory_migrate',
2158 * migrate the tasks pages to the new memory.
2159 *
2160 * Call with cpuset_mutex held. May take callback_lock during call.
2161 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2162 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2163 * their mempolicies to the cpusets new mems_allowed.
2164 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2165 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2166 const char *buf)
2167 {
2168 int retval;
2169
2170 /*
2171 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2172 * it's read-only
2173 */
2174 if (cs == &top_cpuset) {
2175 retval = -EACCES;
2176 goto done;
2177 }
2178
2179 /*
2180 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2181 * Since nodelist_parse() fails on an empty mask, we special case
2182 * that parsing. The validate_change() call ensures that cpusets
2183 * with tasks have memory.
2184 */
2185 if (!*buf) {
2186 nodes_clear(trialcs->mems_allowed);
2187 } else {
2188 retval = nodelist_parse(buf, trialcs->mems_allowed);
2189 if (retval < 0)
2190 goto done;
2191
2192 if (!nodes_subset(trialcs->mems_allowed,
2193 top_cpuset.mems_allowed)) {
2194 retval = -EINVAL;
2195 goto done;
2196 }
2197 }
2198
2199 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2200 retval = 0; /* Too easy - nothing to do */
2201 goto done;
2202 }
2203 retval = validate_change(cs, trialcs);
2204 if (retval < 0)
2205 goto done;
2206
2207 check_insane_mems_config(&trialcs->mems_allowed);
2208
2209 spin_lock_irq(&callback_lock);
2210 cs->mems_allowed = trialcs->mems_allowed;
2211 spin_unlock_irq(&callback_lock);
2212
2213 /* use trialcs->mems_allowed as a temp variable */
2214 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2215 done:
2216 return retval;
2217 }
2218
current_cpuset_is_being_rebound(void)2219 bool current_cpuset_is_being_rebound(void)
2220 {
2221 bool ret;
2222
2223 rcu_read_lock();
2224 ret = task_cs(current) == cpuset_being_rebound;
2225 rcu_read_unlock();
2226
2227 return ret;
2228 }
2229
update_relax_domain_level(struct cpuset * cs,s64 val)2230 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2231 {
2232 #ifdef CONFIG_SMP
2233 if (val < -1 || val > sched_domain_level_max + 1)
2234 return -EINVAL;
2235 #endif
2236
2237 if (val != cs->relax_domain_level) {
2238 cs->relax_domain_level = val;
2239 if (!cpumask_empty(cs->cpus_allowed) &&
2240 is_sched_load_balance(cs))
2241 rebuild_sched_domains_locked();
2242 }
2243
2244 return 0;
2245 }
2246
2247 /**
2248 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2249 * @cs: the cpuset in which each task's spread flags needs to be changed
2250 *
2251 * Iterate through each task of @cs updating its spread flags. As this
2252 * function is called with cpuset_mutex held, cpuset membership stays
2253 * stable.
2254 */
update_tasks_flags(struct cpuset * cs)2255 static void update_tasks_flags(struct cpuset *cs)
2256 {
2257 struct css_task_iter it;
2258 struct task_struct *task;
2259
2260 css_task_iter_start(&cs->css, 0, &it);
2261 while ((task = css_task_iter_next(&it)))
2262 cpuset_update_task_spread_flags(cs, task);
2263 css_task_iter_end(&it);
2264 }
2265
2266 /*
2267 * update_flag - read a 0 or a 1 in a file and update associated flag
2268 * bit: the bit to update (see cpuset_flagbits_t)
2269 * cs: the cpuset to update
2270 * turning_on: whether the flag is being set or cleared
2271 *
2272 * Call with cpuset_mutex held.
2273 */
2274
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2275 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2276 int turning_on)
2277 {
2278 struct cpuset *trialcs;
2279 int balance_flag_changed;
2280 int spread_flag_changed;
2281 int err;
2282
2283 trialcs = alloc_trial_cpuset(cs);
2284 if (!trialcs)
2285 return -ENOMEM;
2286
2287 if (turning_on)
2288 set_bit(bit, &trialcs->flags);
2289 else
2290 clear_bit(bit, &trialcs->flags);
2291
2292 err = validate_change(cs, trialcs);
2293 if (err < 0)
2294 goto out;
2295
2296 balance_flag_changed = (is_sched_load_balance(cs) !=
2297 is_sched_load_balance(trialcs));
2298
2299 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2300 || (is_spread_page(cs) != is_spread_page(trialcs)));
2301
2302 spin_lock_irq(&callback_lock);
2303 cs->flags = trialcs->flags;
2304 spin_unlock_irq(&callback_lock);
2305
2306 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2307 rebuild_sched_domains_locked();
2308
2309 if (spread_flag_changed)
2310 update_tasks_flags(cs);
2311 out:
2312 free_cpuset(trialcs);
2313 return err;
2314 }
2315
2316 /**
2317 * update_prstate - update partition_root_state
2318 * @cs: the cpuset to update
2319 * @new_prs: new partition root state
2320 * Return: 0 if successful, != 0 if error
2321 *
2322 * Call with cpuset_mutex held.
2323 */
update_prstate(struct cpuset * cs,int new_prs)2324 static int update_prstate(struct cpuset *cs, int new_prs)
2325 {
2326 int err = PERR_NONE, old_prs = cs->partition_root_state;
2327 struct cpuset *parent = parent_cs(cs);
2328 struct tmpmasks tmpmask;
2329
2330 if (old_prs == new_prs)
2331 return 0;
2332
2333 /*
2334 * For a previously invalid partition root, leave it at being
2335 * invalid if new_prs is not "member".
2336 */
2337 if (new_prs && is_prs_invalid(old_prs)) {
2338 cs->partition_root_state = -new_prs;
2339 return 0;
2340 }
2341
2342 if (alloc_cpumasks(NULL, &tmpmask))
2343 return -ENOMEM;
2344
2345 err = update_partition_exclusive(cs, new_prs);
2346 if (err)
2347 goto out;
2348
2349 if (!old_prs) {
2350 /*
2351 * cpus_allowed cannot be empty.
2352 */
2353 if (cpumask_empty(cs->cpus_allowed)) {
2354 err = PERR_CPUSEMPTY;
2355 goto out;
2356 }
2357
2358 err = update_parent_subparts_cpumask(cs, partcmd_enable,
2359 NULL, &tmpmask);
2360 } else if (old_prs && new_prs) {
2361 /*
2362 * A change in load balance state only, no change in cpumasks.
2363 */
2364 ;
2365 } else {
2366 /*
2367 * Switching back to member is always allowed even if it
2368 * disables child partitions.
2369 */
2370 update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2371 &tmpmask);
2372
2373 /*
2374 * If there are child partitions, they will all become invalid.
2375 */
2376 if (unlikely(cs->nr_subparts_cpus)) {
2377 spin_lock_irq(&callback_lock);
2378 cs->nr_subparts_cpus = 0;
2379 cpumask_clear(cs->subparts_cpus);
2380 compute_effective_cpumask(cs->effective_cpus, cs, parent);
2381 spin_unlock_irq(&callback_lock);
2382 }
2383 }
2384 out:
2385 /*
2386 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2387 * happens.
2388 */
2389 if (err) {
2390 new_prs = -new_prs;
2391 update_partition_exclusive(cs, new_prs);
2392 }
2393
2394 spin_lock_irq(&callback_lock);
2395 cs->partition_root_state = new_prs;
2396 WRITE_ONCE(cs->prs_err, err);
2397 spin_unlock_irq(&callback_lock);
2398
2399 /*
2400 * Update child cpusets, if present.
2401 * Force update if switching back to member.
2402 */
2403 if (!list_empty(&cs->css.children))
2404 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
2405
2406 /* Update sched domains and load balance flag */
2407 update_partition_sd_lb(cs, old_prs);
2408
2409 notify_partition_change(cs, old_prs);
2410 free_cpumasks(NULL, &tmpmask);
2411 return 0;
2412 }
2413
2414 /*
2415 * Frequency meter - How fast is some event occurring?
2416 *
2417 * These routines manage a digitally filtered, constant time based,
2418 * event frequency meter. There are four routines:
2419 * fmeter_init() - initialize a frequency meter.
2420 * fmeter_markevent() - called each time the event happens.
2421 * fmeter_getrate() - returns the recent rate of such events.
2422 * fmeter_update() - internal routine used to update fmeter.
2423 *
2424 * A common data structure is passed to each of these routines,
2425 * which is used to keep track of the state required to manage the
2426 * frequency meter and its digital filter.
2427 *
2428 * The filter works on the number of events marked per unit time.
2429 * The filter is single-pole low-pass recursive (IIR). The time unit
2430 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2431 * simulate 3 decimal digits of precision (multiplied by 1000).
2432 *
2433 * With an FM_COEF of 933, and a time base of 1 second, the filter
2434 * has a half-life of 10 seconds, meaning that if the events quit
2435 * happening, then the rate returned from the fmeter_getrate()
2436 * will be cut in half each 10 seconds, until it converges to zero.
2437 *
2438 * It is not worth doing a real infinitely recursive filter. If more
2439 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2440 * just compute FM_MAXTICKS ticks worth, by which point the level
2441 * will be stable.
2442 *
2443 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2444 * arithmetic overflow in the fmeter_update() routine.
2445 *
2446 * Given the simple 32 bit integer arithmetic used, this meter works
2447 * best for reporting rates between one per millisecond (msec) and
2448 * one per 32 (approx) seconds. At constant rates faster than one
2449 * per msec it maxes out at values just under 1,000,000. At constant
2450 * rates between one per msec, and one per second it will stabilize
2451 * to a value N*1000, where N is the rate of events per second.
2452 * At constant rates between one per second and one per 32 seconds,
2453 * it will be choppy, moving up on the seconds that have an event,
2454 * and then decaying until the next event. At rates slower than
2455 * about one in 32 seconds, it decays all the way back to zero between
2456 * each event.
2457 */
2458
2459 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2460 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
2461 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
2462 #define FM_SCALE 1000 /* faux fixed point scale */
2463
2464 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)2465 static void fmeter_init(struct fmeter *fmp)
2466 {
2467 fmp->cnt = 0;
2468 fmp->val = 0;
2469 fmp->time = 0;
2470 spin_lock_init(&fmp->lock);
2471 }
2472
2473 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)2474 static void fmeter_update(struct fmeter *fmp)
2475 {
2476 time64_t now;
2477 u32 ticks;
2478
2479 now = ktime_get_seconds();
2480 ticks = now - fmp->time;
2481
2482 if (ticks == 0)
2483 return;
2484
2485 ticks = min(FM_MAXTICKS, ticks);
2486 while (ticks-- > 0)
2487 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2488 fmp->time = now;
2489
2490 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2491 fmp->cnt = 0;
2492 }
2493
2494 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)2495 static void fmeter_markevent(struct fmeter *fmp)
2496 {
2497 spin_lock(&fmp->lock);
2498 fmeter_update(fmp);
2499 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2500 spin_unlock(&fmp->lock);
2501 }
2502
2503 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)2504 static int fmeter_getrate(struct fmeter *fmp)
2505 {
2506 int val;
2507
2508 spin_lock(&fmp->lock);
2509 fmeter_update(fmp);
2510 val = fmp->val;
2511 spin_unlock(&fmp->lock);
2512 return val;
2513 }
2514
2515 static struct cpuset *cpuset_attach_old_cs;
2516
2517 /*
2518 * Check to see if a cpuset can accept a new task
2519 * For v1, cpus_allowed and mems_allowed can't be empty.
2520 * For v2, effective_cpus can't be empty.
2521 * Note that in v1, effective_cpus = cpus_allowed.
2522 */
cpuset_can_attach_check(struct cpuset * cs)2523 static int cpuset_can_attach_check(struct cpuset *cs)
2524 {
2525 if (cpumask_empty(cs->effective_cpus) ||
2526 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2527 return -ENOSPC;
2528 return 0;
2529 }
2530
reset_migrate_dl_data(struct cpuset * cs)2531 static void reset_migrate_dl_data(struct cpuset *cs)
2532 {
2533 cs->nr_migrate_dl_tasks = 0;
2534 cs->sum_migrate_dl_bw = 0;
2535 }
2536
2537 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2538 static int cpuset_can_attach(struct cgroup_taskset *tset)
2539 {
2540 struct cgroup_subsys_state *css;
2541 struct cpuset *cs, *oldcs;
2542 struct task_struct *task;
2543 bool cpus_updated, mems_updated;
2544 int ret;
2545
2546 /* used later by cpuset_attach() */
2547 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2548 oldcs = cpuset_attach_old_cs;
2549 cs = css_cs(css);
2550
2551 mutex_lock(&cpuset_mutex);
2552
2553 /* Check to see if task is allowed in the cpuset */
2554 ret = cpuset_can_attach_check(cs);
2555 if (ret)
2556 goto out_unlock;
2557
2558 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2559 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2560
2561 cgroup_taskset_for_each(task, css, tset) {
2562 ret = task_can_attach(task);
2563 if (ret)
2564 goto out_unlock;
2565
2566 /*
2567 * Skip rights over task check in v2 when nothing changes,
2568 * migration permission derives from hierarchy ownership in
2569 * cgroup_procs_write_permission()).
2570 */
2571 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2572 (cpus_updated || mems_updated)) {
2573 ret = security_task_setscheduler(task);
2574 if (ret)
2575 goto out_unlock;
2576 }
2577
2578 if (dl_task(task)) {
2579 cs->nr_migrate_dl_tasks++;
2580 cs->sum_migrate_dl_bw += task->dl.dl_bw;
2581 }
2582 }
2583
2584 if (!cs->nr_migrate_dl_tasks)
2585 goto out_success;
2586
2587 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2588 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2589
2590 if (unlikely(cpu >= nr_cpu_ids)) {
2591 reset_migrate_dl_data(cs);
2592 ret = -EINVAL;
2593 goto out_unlock;
2594 }
2595
2596 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2597 if (ret) {
2598 reset_migrate_dl_data(cs);
2599 goto out_unlock;
2600 }
2601 }
2602
2603 out_success:
2604 /*
2605 * Mark attach is in progress. This makes validate_change() fail
2606 * changes which zero cpus/mems_allowed.
2607 */
2608 cs->attach_in_progress++;
2609 out_unlock:
2610 mutex_unlock(&cpuset_mutex);
2611 return ret;
2612 }
2613
cpuset_cancel_attach(struct cgroup_taskset * tset)2614 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2615 {
2616 struct cgroup_subsys_state *css;
2617 struct cpuset *cs;
2618
2619 cgroup_taskset_first(tset, &css);
2620 cs = css_cs(css);
2621
2622 mutex_lock(&cpuset_mutex);
2623 cs->attach_in_progress--;
2624 if (!cs->attach_in_progress)
2625 wake_up(&cpuset_attach_wq);
2626
2627 if (cs->nr_migrate_dl_tasks) {
2628 int cpu = cpumask_any(cs->effective_cpus);
2629
2630 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
2631 reset_migrate_dl_data(cs);
2632 }
2633
2634 mutex_unlock(&cpuset_mutex);
2635 }
2636
2637 /*
2638 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
2639 * but we can't allocate it dynamically there. Define it global and
2640 * allocate from cpuset_init().
2641 */
2642 static cpumask_var_t cpus_attach;
2643 static nodemask_t cpuset_attach_nodemask_to;
2644
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)2645 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2646 {
2647 lockdep_assert_held(&cpuset_mutex);
2648
2649 if (cs != &top_cpuset)
2650 guarantee_online_cpus(task, cpus_attach);
2651 else
2652 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
2653 cs->subparts_cpus);
2654 /*
2655 * can_attach beforehand should guarantee that this doesn't
2656 * fail. TODO: have a better way to handle failure here
2657 */
2658 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2659
2660 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2661 cpuset_update_task_spread_flags(cs, task);
2662 }
2663
cpuset_attach(struct cgroup_taskset * tset)2664 static void cpuset_attach(struct cgroup_taskset *tset)
2665 {
2666 struct task_struct *task;
2667 struct task_struct *leader;
2668 struct cgroup_subsys_state *css;
2669 struct cpuset *cs;
2670 struct cpuset *oldcs = cpuset_attach_old_cs;
2671 bool cpus_updated, mems_updated;
2672
2673 cgroup_taskset_first(tset, &css);
2674 cs = css_cs(css);
2675
2676 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
2677 mutex_lock(&cpuset_mutex);
2678 cpus_updated = !cpumask_equal(cs->effective_cpus,
2679 oldcs->effective_cpus);
2680 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2681
2682 /*
2683 * In the default hierarchy, enabling cpuset in the child cgroups
2684 * will trigger a number of cpuset_attach() calls with no change
2685 * in effective cpus and mems. In that case, we can optimize out
2686 * by skipping the task iteration and update.
2687 */
2688 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2689 !cpus_updated && !mems_updated) {
2690 cpuset_attach_nodemask_to = cs->effective_mems;
2691 goto out;
2692 }
2693
2694 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2695
2696 cgroup_taskset_for_each(task, css, tset)
2697 cpuset_attach_task(cs, task);
2698
2699 /*
2700 * Change mm for all threadgroup leaders. This is expensive and may
2701 * sleep and should be moved outside migration path proper. Skip it
2702 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
2703 * not set.
2704 */
2705 cpuset_attach_nodemask_to = cs->effective_mems;
2706 if (!is_memory_migrate(cs) && !mems_updated)
2707 goto out;
2708
2709 cgroup_taskset_for_each_leader(leader, css, tset) {
2710 struct mm_struct *mm = get_task_mm(leader);
2711
2712 if (mm) {
2713 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2714
2715 /*
2716 * old_mems_allowed is the same with mems_allowed
2717 * here, except if this task is being moved
2718 * automatically due to hotplug. In that case
2719 * @mems_allowed has been updated and is empty, so
2720 * @old_mems_allowed is the right nodesets that we
2721 * migrate mm from.
2722 */
2723 if (is_memory_migrate(cs))
2724 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2725 &cpuset_attach_nodemask_to);
2726 else
2727 mmput(mm);
2728 }
2729 }
2730
2731 out:
2732 cs->old_mems_allowed = cpuset_attach_nodemask_to;
2733
2734 if (cs->nr_migrate_dl_tasks) {
2735 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
2736 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
2737 reset_migrate_dl_data(cs);
2738 }
2739
2740 cs->attach_in_progress--;
2741 if (!cs->attach_in_progress)
2742 wake_up(&cpuset_attach_wq);
2743
2744 mutex_unlock(&cpuset_mutex);
2745 }
2746
2747 /* The various types of files and directories in a cpuset file system */
2748
2749 typedef enum {
2750 FILE_MEMORY_MIGRATE,
2751 FILE_CPULIST,
2752 FILE_MEMLIST,
2753 FILE_EFFECTIVE_CPULIST,
2754 FILE_EFFECTIVE_MEMLIST,
2755 FILE_SUBPARTS_CPULIST,
2756 FILE_CPU_EXCLUSIVE,
2757 FILE_MEM_EXCLUSIVE,
2758 FILE_MEM_HARDWALL,
2759 FILE_SCHED_LOAD_BALANCE,
2760 FILE_PARTITION_ROOT,
2761 FILE_SCHED_RELAX_DOMAIN_LEVEL,
2762 FILE_MEMORY_PRESSURE_ENABLED,
2763 FILE_MEMORY_PRESSURE,
2764 FILE_SPREAD_PAGE,
2765 FILE_SPREAD_SLAB,
2766 } cpuset_filetype_t;
2767
cpuset_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)2768 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2769 u64 val)
2770 {
2771 struct cpuset *cs = css_cs(css);
2772 cpuset_filetype_t type = cft->private;
2773 int retval = 0;
2774
2775 cpus_read_lock();
2776 mutex_lock(&cpuset_mutex);
2777 if (!is_cpuset_online(cs)) {
2778 retval = -ENODEV;
2779 goto out_unlock;
2780 }
2781
2782 switch (type) {
2783 case FILE_CPU_EXCLUSIVE:
2784 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2785 break;
2786 case FILE_MEM_EXCLUSIVE:
2787 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2788 break;
2789 case FILE_MEM_HARDWALL:
2790 retval = update_flag(CS_MEM_HARDWALL, cs, val);
2791 break;
2792 case FILE_SCHED_LOAD_BALANCE:
2793 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2794 break;
2795 case FILE_MEMORY_MIGRATE:
2796 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2797 break;
2798 case FILE_MEMORY_PRESSURE_ENABLED:
2799 cpuset_memory_pressure_enabled = !!val;
2800 break;
2801 case FILE_SPREAD_PAGE:
2802 retval = update_flag(CS_SPREAD_PAGE, cs, val);
2803 break;
2804 case FILE_SPREAD_SLAB:
2805 retval = update_flag(CS_SPREAD_SLAB, cs, val);
2806 break;
2807 default:
2808 retval = -EINVAL;
2809 break;
2810 }
2811 out_unlock:
2812 mutex_unlock(&cpuset_mutex);
2813 cpus_read_unlock();
2814 return retval;
2815 }
2816
cpuset_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)2817 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2818 s64 val)
2819 {
2820 struct cpuset *cs = css_cs(css);
2821 cpuset_filetype_t type = cft->private;
2822 int retval = -ENODEV;
2823
2824 cpus_read_lock();
2825 mutex_lock(&cpuset_mutex);
2826 if (!is_cpuset_online(cs))
2827 goto out_unlock;
2828
2829 switch (type) {
2830 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2831 retval = update_relax_domain_level(cs, val);
2832 break;
2833 default:
2834 retval = -EINVAL;
2835 break;
2836 }
2837 out_unlock:
2838 mutex_unlock(&cpuset_mutex);
2839 cpus_read_unlock();
2840 return retval;
2841 }
2842
2843 /*
2844 * Common handling for a write to a "cpus" or "mems" file.
2845 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2846 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2847 char *buf, size_t nbytes, loff_t off)
2848 {
2849 struct cpuset *cs = css_cs(of_css(of));
2850 struct cpuset *trialcs;
2851 int retval = -ENODEV;
2852
2853 buf = strstrip(buf);
2854
2855 /*
2856 * CPU or memory hotunplug may leave @cs w/o any execution
2857 * resources, in which case the hotplug code asynchronously updates
2858 * configuration and transfers all tasks to the nearest ancestor
2859 * which can execute.
2860 *
2861 * As writes to "cpus" or "mems" may restore @cs's execution
2862 * resources, wait for the previously scheduled operations before
2863 * proceeding, so that we don't end up keep removing tasks added
2864 * after execution capability is restored.
2865 *
2866 * cpuset_hotplug_work calls back into cgroup core via
2867 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2868 * operation like this one can lead to a deadlock through kernfs
2869 * active_ref protection. Let's break the protection. Losing the
2870 * protection is okay as we check whether @cs is online after
2871 * grabbing cpuset_mutex anyway. This only happens on the legacy
2872 * hierarchies.
2873 */
2874 css_get(&cs->css);
2875 kernfs_break_active_protection(of->kn);
2876 flush_work(&cpuset_hotplug_work);
2877
2878 cpus_read_lock();
2879 mutex_lock(&cpuset_mutex);
2880 if (!is_cpuset_online(cs))
2881 goto out_unlock;
2882
2883 trialcs = alloc_trial_cpuset(cs);
2884 if (!trialcs) {
2885 retval = -ENOMEM;
2886 goto out_unlock;
2887 }
2888
2889 switch (of_cft(of)->private) {
2890 case FILE_CPULIST:
2891 retval = update_cpumask(cs, trialcs, buf);
2892 break;
2893 case FILE_MEMLIST:
2894 retval = update_nodemask(cs, trialcs, buf);
2895 break;
2896 default:
2897 retval = -EINVAL;
2898 break;
2899 }
2900
2901 free_cpuset(trialcs);
2902 out_unlock:
2903 mutex_unlock(&cpuset_mutex);
2904 cpus_read_unlock();
2905 kernfs_unbreak_active_protection(of->kn);
2906 css_put(&cs->css);
2907 flush_workqueue(cpuset_migrate_mm_wq);
2908 return retval ?: nbytes;
2909 }
2910
2911 /*
2912 * These ascii lists should be read in a single call, by using a user
2913 * buffer large enough to hold the entire map. If read in smaller
2914 * chunks, there is no guarantee of atomicity. Since the display format
2915 * used, list of ranges of sequential numbers, is variable length,
2916 * and since these maps can change value dynamically, one could read
2917 * gibberish by doing partial reads while a list was changing.
2918 */
cpuset_common_seq_show(struct seq_file * sf,void * v)2919 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2920 {
2921 struct cpuset *cs = css_cs(seq_css(sf));
2922 cpuset_filetype_t type = seq_cft(sf)->private;
2923 int ret = 0;
2924
2925 spin_lock_irq(&callback_lock);
2926
2927 switch (type) {
2928 case FILE_CPULIST:
2929 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2930 break;
2931 case FILE_MEMLIST:
2932 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2933 break;
2934 case FILE_EFFECTIVE_CPULIST:
2935 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2936 break;
2937 case FILE_EFFECTIVE_MEMLIST:
2938 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2939 break;
2940 case FILE_SUBPARTS_CPULIST:
2941 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2942 break;
2943 default:
2944 ret = -EINVAL;
2945 }
2946
2947 spin_unlock_irq(&callback_lock);
2948 return ret;
2949 }
2950
cpuset_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)2951 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2952 {
2953 struct cpuset *cs = css_cs(css);
2954 cpuset_filetype_t type = cft->private;
2955 switch (type) {
2956 case FILE_CPU_EXCLUSIVE:
2957 return is_cpu_exclusive(cs);
2958 case FILE_MEM_EXCLUSIVE:
2959 return is_mem_exclusive(cs);
2960 case FILE_MEM_HARDWALL:
2961 return is_mem_hardwall(cs);
2962 case FILE_SCHED_LOAD_BALANCE:
2963 return is_sched_load_balance(cs);
2964 case FILE_MEMORY_MIGRATE:
2965 return is_memory_migrate(cs);
2966 case FILE_MEMORY_PRESSURE_ENABLED:
2967 return cpuset_memory_pressure_enabled;
2968 case FILE_MEMORY_PRESSURE:
2969 return fmeter_getrate(&cs->fmeter);
2970 case FILE_SPREAD_PAGE:
2971 return is_spread_page(cs);
2972 case FILE_SPREAD_SLAB:
2973 return is_spread_slab(cs);
2974 default:
2975 BUG();
2976 }
2977
2978 /* Unreachable but makes gcc happy */
2979 return 0;
2980 }
2981
cpuset_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)2982 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2983 {
2984 struct cpuset *cs = css_cs(css);
2985 cpuset_filetype_t type = cft->private;
2986 switch (type) {
2987 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2988 return cs->relax_domain_level;
2989 default:
2990 BUG();
2991 }
2992
2993 /* Unreachable but makes gcc happy */
2994 return 0;
2995 }
2996
sched_partition_show(struct seq_file * seq,void * v)2997 static int sched_partition_show(struct seq_file *seq, void *v)
2998 {
2999 struct cpuset *cs = css_cs(seq_css(seq));
3000 const char *err, *type = NULL;
3001
3002 switch (cs->partition_root_state) {
3003 case PRS_ROOT:
3004 seq_puts(seq, "root\n");
3005 break;
3006 case PRS_ISOLATED:
3007 seq_puts(seq, "isolated\n");
3008 break;
3009 case PRS_MEMBER:
3010 seq_puts(seq, "member\n");
3011 break;
3012 case PRS_INVALID_ROOT:
3013 type = "root";
3014 fallthrough;
3015 case PRS_INVALID_ISOLATED:
3016 if (!type)
3017 type = "isolated";
3018 err = perr_strings[READ_ONCE(cs->prs_err)];
3019 if (err)
3020 seq_printf(seq, "%s invalid (%s)\n", type, err);
3021 else
3022 seq_printf(seq, "%s invalid\n", type);
3023 break;
3024 }
3025 return 0;
3026 }
3027
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3028 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3029 size_t nbytes, loff_t off)
3030 {
3031 struct cpuset *cs = css_cs(of_css(of));
3032 int val;
3033 int retval = -ENODEV;
3034
3035 buf = strstrip(buf);
3036
3037 /*
3038 * Convert "root" to ENABLED, and convert "member" to DISABLED.
3039 */
3040 if (!strcmp(buf, "root"))
3041 val = PRS_ROOT;
3042 else if (!strcmp(buf, "member"))
3043 val = PRS_MEMBER;
3044 else if (!strcmp(buf, "isolated"))
3045 val = PRS_ISOLATED;
3046 else
3047 return -EINVAL;
3048
3049 css_get(&cs->css);
3050 cpus_read_lock();
3051 mutex_lock(&cpuset_mutex);
3052 if (!is_cpuset_online(cs))
3053 goto out_unlock;
3054
3055 retval = update_prstate(cs, val);
3056 out_unlock:
3057 mutex_unlock(&cpuset_mutex);
3058 cpus_read_unlock();
3059 css_put(&cs->css);
3060 return retval ?: nbytes;
3061 }
3062
3063 /*
3064 * for the common functions, 'private' gives the type of file
3065 */
3066
3067 static struct cftype legacy_files[] = {
3068 {
3069 .name = "cpus",
3070 .seq_show = cpuset_common_seq_show,
3071 .write = cpuset_write_resmask,
3072 .max_write_len = (100U + 6 * NR_CPUS),
3073 .private = FILE_CPULIST,
3074 },
3075
3076 {
3077 .name = "mems",
3078 .seq_show = cpuset_common_seq_show,
3079 .write = cpuset_write_resmask,
3080 .max_write_len = (100U + 6 * MAX_NUMNODES),
3081 .private = FILE_MEMLIST,
3082 },
3083
3084 {
3085 .name = "effective_cpus",
3086 .seq_show = cpuset_common_seq_show,
3087 .private = FILE_EFFECTIVE_CPULIST,
3088 },
3089
3090 {
3091 .name = "effective_mems",
3092 .seq_show = cpuset_common_seq_show,
3093 .private = FILE_EFFECTIVE_MEMLIST,
3094 },
3095
3096 {
3097 .name = "cpu_exclusive",
3098 .read_u64 = cpuset_read_u64,
3099 .write_u64 = cpuset_write_u64,
3100 .private = FILE_CPU_EXCLUSIVE,
3101 },
3102
3103 {
3104 .name = "mem_exclusive",
3105 .read_u64 = cpuset_read_u64,
3106 .write_u64 = cpuset_write_u64,
3107 .private = FILE_MEM_EXCLUSIVE,
3108 },
3109
3110 {
3111 .name = "mem_hardwall",
3112 .read_u64 = cpuset_read_u64,
3113 .write_u64 = cpuset_write_u64,
3114 .private = FILE_MEM_HARDWALL,
3115 },
3116
3117 {
3118 .name = "sched_load_balance",
3119 .read_u64 = cpuset_read_u64,
3120 .write_u64 = cpuset_write_u64,
3121 .private = FILE_SCHED_LOAD_BALANCE,
3122 },
3123
3124 {
3125 .name = "sched_relax_domain_level",
3126 .read_s64 = cpuset_read_s64,
3127 .write_s64 = cpuset_write_s64,
3128 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3129 },
3130
3131 {
3132 .name = "memory_migrate",
3133 .read_u64 = cpuset_read_u64,
3134 .write_u64 = cpuset_write_u64,
3135 .private = FILE_MEMORY_MIGRATE,
3136 },
3137
3138 {
3139 .name = "memory_pressure",
3140 .read_u64 = cpuset_read_u64,
3141 .private = FILE_MEMORY_PRESSURE,
3142 },
3143
3144 {
3145 .name = "memory_spread_page",
3146 .read_u64 = cpuset_read_u64,
3147 .write_u64 = cpuset_write_u64,
3148 .private = FILE_SPREAD_PAGE,
3149 },
3150
3151 {
3152 .name = "memory_spread_slab",
3153 .read_u64 = cpuset_read_u64,
3154 .write_u64 = cpuset_write_u64,
3155 .private = FILE_SPREAD_SLAB,
3156 },
3157
3158 {
3159 .name = "memory_pressure_enabled",
3160 .flags = CFTYPE_ONLY_ON_ROOT,
3161 .read_u64 = cpuset_read_u64,
3162 .write_u64 = cpuset_write_u64,
3163 .private = FILE_MEMORY_PRESSURE_ENABLED,
3164 },
3165
3166 { } /* terminate */
3167 };
3168
3169 /*
3170 * This is currently a minimal set for the default hierarchy. It can be
3171 * expanded later on by migrating more features and control files from v1.
3172 */
3173 static struct cftype dfl_files[] = {
3174 {
3175 .name = "cpus",
3176 .seq_show = cpuset_common_seq_show,
3177 .write = cpuset_write_resmask,
3178 .max_write_len = (100U + 6 * NR_CPUS),
3179 .private = FILE_CPULIST,
3180 .flags = CFTYPE_NOT_ON_ROOT,
3181 },
3182
3183 {
3184 .name = "mems",
3185 .seq_show = cpuset_common_seq_show,
3186 .write = cpuset_write_resmask,
3187 .max_write_len = (100U + 6 * MAX_NUMNODES),
3188 .private = FILE_MEMLIST,
3189 .flags = CFTYPE_NOT_ON_ROOT,
3190 },
3191
3192 {
3193 .name = "cpus.effective",
3194 .seq_show = cpuset_common_seq_show,
3195 .private = FILE_EFFECTIVE_CPULIST,
3196 },
3197
3198 {
3199 .name = "mems.effective",
3200 .seq_show = cpuset_common_seq_show,
3201 .private = FILE_EFFECTIVE_MEMLIST,
3202 },
3203
3204 {
3205 .name = "cpus.partition",
3206 .seq_show = sched_partition_show,
3207 .write = sched_partition_write,
3208 .private = FILE_PARTITION_ROOT,
3209 .flags = CFTYPE_NOT_ON_ROOT,
3210 .file_offset = offsetof(struct cpuset, partition_file),
3211 },
3212
3213 {
3214 .name = "cpus.subpartitions",
3215 .seq_show = cpuset_common_seq_show,
3216 .private = FILE_SUBPARTS_CPULIST,
3217 .flags = CFTYPE_DEBUG,
3218 },
3219
3220 { } /* terminate */
3221 };
3222
3223
3224 /**
3225 * cpuset_css_alloc - Allocate a cpuset css
3226 * @parent_css: Parent css of the control group that the new cpuset will be
3227 * part of
3228 * Return: cpuset css on success, -ENOMEM on failure.
3229 *
3230 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3231 * top cpuset css otherwise.
3232 */
3233 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3234 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3235 {
3236 struct cpuset *cs;
3237
3238 if (!parent_css)
3239 return &top_cpuset.css;
3240
3241 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3242 if (!cs)
3243 return ERR_PTR(-ENOMEM);
3244
3245 if (alloc_cpumasks(cs, NULL)) {
3246 kfree(cs);
3247 return ERR_PTR(-ENOMEM);
3248 }
3249
3250 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3251 nodes_clear(cs->mems_allowed);
3252 nodes_clear(cs->effective_mems);
3253 fmeter_init(&cs->fmeter);
3254 cs->relax_domain_level = -1;
3255
3256 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3257 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3258 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3259
3260 return &cs->css;
3261 }
3262
cpuset_css_online(struct cgroup_subsys_state * css)3263 static int cpuset_css_online(struct cgroup_subsys_state *css)
3264 {
3265 struct cpuset *cs = css_cs(css);
3266 struct cpuset *parent = parent_cs(cs);
3267 struct cpuset *tmp_cs;
3268 struct cgroup_subsys_state *pos_css;
3269
3270 if (!parent)
3271 return 0;
3272
3273 cpus_read_lock();
3274 mutex_lock(&cpuset_mutex);
3275
3276 set_bit(CS_ONLINE, &cs->flags);
3277 if (is_spread_page(parent))
3278 set_bit(CS_SPREAD_PAGE, &cs->flags);
3279 if (is_spread_slab(parent))
3280 set_bit(CS_SPREAD_SLAB, &cs->flags);
3281
3282 cpuset_inc();
3283
3284 spin_lock_irq(&callback_lock);
3285 if (is_in_v2_mode()) {
3286 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3287 cs->effective_mems = parent->effective_mems;
3288 cs->use_parent_ecpus = true;
3289 parent->child_ecpus_count++;
3290 }
3291
3292 /*
3293 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3294 */
3295 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3296 !is_sched_load_balance(parent))
3297 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3298
3299 spin_unlock_irq(&callback_lock);
3300
3301 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3302 goto out_unlock;
3303
3304 /*
3305 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3306 * set. This flag handling is implemented in cgroup core for
3307 * historical reasons - the flag may be specified during mount.
3308 *
3309 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3310 * refuse to clone the configuration - thereby refusing the task to
3311 * be entered, and as a result refusing the sys_unshare() or
3312 * clone() which initiated it. If this becomes a problem for some
3313 * users who wish to allow that scenario, then this could be
3314 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3315 * (and likewise for mems) to the new cgroup.
3316 */
3317 rcu_read_lock();
3318 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3319 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3320 rcu_read_unlock();
3321 goto out_unlock;
3322 }
3323 }
3324 rcu_read_unlock();
3325
3326 spin_lock_irq(&callback_lock);
3327 cs->mems_allowed = parent->mems_allowed;
3328 cs->effective_mems = parent->mems_allowed;
3329 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3330 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3331 spin_unlock_irq(&callback_lock);
3332 out_unlock:
3333 mutex_unlock(&cpuset_mutex);
3334 cpus_read_unlock();
3335 return 0;
3336 }
3337
3338 /*
3339 * If the cpuset being removed has its flag 'sched_load_balance'
3340 * enabled, then simulate turning sched_load_balance off, which
3341 * will call rebuild_sched_domains_locked(). That is not needed
3342 * in the default hierarchy where only changes in partition
3343 * will cause repartitioning.
3344 *
3345 * If the cpuset has the 'sched.partition' flag enabled, simulate
3346 * turning 'sched.partition" off.
3347 */
3348
cpuset_css_offline(struct cgroup_subsys_state * css)3349 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3350 {
3351 struct cpuset *cs = css_cs(css);
3352
3353 cpus_read_lock();
3354 mutex_lock(&cpuset_mutex);
3355
3356 if (is_partition_valid(cs))
3357 update_prstate(cs, 0);
3358
3359 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3360 is_sched_load_balance(cs))
3361 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3362
3363 if (cs->use_parent_ecpus) {
3364 struct cpuset *parent = parent_cs(cs);
3365
3366 cs->use_parent_ecpus = false;
3367 parent->child_ecpus_count--;
3368 }
3369
3370 cpuset_dec();
3371 clear_bit(CS_ONLINE, &cs->flags);
3372
3373 mutex_unlock(&cpuset_mutex);
3374 cpus_read_unlock();
3375 }
3376
cpuset_css_free(struct cgroup_subsys_state * css)3377 static void cpuset_css_free(struct cgroup_subsys_state *css)
3378 {
3379 struct cpuset *cs = css_cs(css);
3380
3381 free_cpuset(cs);
3382 }
3383
cpuset_bind(struct cgroup_subsys_state * root_css)3384 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3385 {
3386 mutex_lock(&cpuset_mutex);
3387 spin_lock_irq(&callback_lock);
3388
3389 if (is_in_v2_mode()) {
3390 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3391 top_cpuset.mems_allowed = node_possible_map;
3392 } else {
3393 cpumask_copy(top_cpuset.cpus_allowed,
3394 top_cpuset.effective_cpus);
3395 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3396 }
3397
3398 spin_unlock_irq(&callback_lock);
3399 mutex_unlock(&cpuset_mutex);
3400 }
3401
3402 /*
3403 * In case the child is cloned into a cpuset different from its parent,
3404 * additional checks are done to see if the move is allowed.
3405 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3406 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3407 {
3408 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3409 bool same_cs;
3410 int ret;
3411
3412 rcu_read_lock();
3413 same_cs = (cs == task_cs(current));
3414 rcu_read_unlock();
3415
3416 if (same_cs)
3417 return 0;
3418
3419 lockdep_assert_held(&cgroup_mutex);
3420 mutex_lock(&cpuset_mutex);
3421
3422 /* Check to see if task is allowed in the cpuset */
3423 ret = cpuset_can_attach_check(cs);
3424 if (ret)
3425 goto out_unlock;
3426
3427 ret = task_can_attach(task);
3428 if (ret)
3429 goto out_unlock;
3430
3431 ret = security_task_setscheduler(task);
3432 if (ret)
3433 goto out_unlock;
3434
3435 /*
3436 * Mark attach is in progress. This makes validate_change() fail
3437 * changes which zero cpus/mems_allowed.
3438 */
3439 cs->attach_in_progress++;
3440 out_unlock:
3441 mutex_unlock(&cpuset_mutex);
3442 return ret;
3443 }
3444
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3445 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3446 {
3447 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3448 bool same_cs;
3449
3450 rcu_read_lock();
3451 same_cs = (cs == task_cs(current));
3452 rcu_read_unlock();
3453
3454 if (same_cs)
3455 return;
3456
3457 mutex_lock(&cpuset_mutex);
3458 cs->attach_in_progress--;
3459 if (!cs->attach_in_progress)
3460 wake_up(&cpuset_attach_wq);
3461 mutex_unlock(&cpuset_mutex);
3462 }
3463
3464 /*
3465 * Make sure the new task conform to the current state of its parent,
3466 * which could have been changed by cpuset just after it inherits the
3467 * state from the parent and before it sits on the cgroup's task list.
3468 */
cpuset_fork(struct task_struct * task)3469 static void cpuset_fork(struct task_struct *task)
3470 {
3471 struct cpuset *cs;
3472 bool same_cs;
3473
3474 rcu_read_lock();
3475 cs = task_cs(task);
3476 same_cs = (cs == task_cs(current));
3477 rcu_read_unlock();
3478
3479 if (same_cs) {
3480 if (cs == &top_cpuset)
3481 return;
3482
3483 set_cpus_allowed_ptr(task, current->cpus_ptr);
3484 task->mems_allowed = current->mems_allowed;
3485 return;
3486 }
3487
3488 /* CLONE_INTO_CGROUP */
3489 mutex_lock(&cpuset_mutex);
3490 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3491 cpuset_attach_task(cs, task);
3492
3493 cs->attach_in_progress--;
3494 if (!cs->attach_in_progress)
3495 wake_up(&cpuset_attach_wq);
3496
3497 mutex_unlock(&cpuset_mutex);
3498 }
3499
3500 struct cgroup_subsys cpuset_cgrp_subsys = {
3501 .css_alloc = cpuset_css_alloc,
3502 .css_online = cpuset_css_online,
3503 .css_offline = cpuset_css_offline,
3504 .css_free = cpuset_css_free,
3505 .can_attach = cpuset_can_attach,
3506 .cancel_attach = cpuset_cancel_attach,
3507 .attach = cpuset_attach,
3508 .post_attach = cpuset_post_attach,
3509 .bind = cpuset_bind,
3510 .can_fork = cpuset_can_fork,
3511 .cancel_fork = cpuset_cancel_fork,
3512 .fork = cpuset_fork,
3513 .legacy_cftypes = legacy_files,
3514 .dfl_cftypes = dfl_files,
3515 .early_init = true,
3516 .threaded = true,
3517 };
3518
3519 /**
3520 * cpuset_init - initialize cpusets at system boot
3521 *
3522 * Description: Initialize top_cpuset
3523 **/
3524
cpuset_init(void)3525 int __init cpuset_init(void)
3526 {
3527 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3528 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3529 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
3530
3531 cpumask_setall(top_cpuset.cpus_allowed);
3532 nodes_setall(top_cpuset.mems_allowed);
3533 cpumask_setall(top_cpuset.effective_cpus);
3534 nodes_setall(top_cpuset.effective_mems);
3535
3536 fmeter_init(&top_cpuset.fmeter);
3537 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3538 top_cpuset.relax_domain_level = -1;
3539
3540 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3541
3542 return 0;
3543 }
3544
3545 /*
3546 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3547 * or memory nodes, we need to walk over the cpuset hierarchy,
3548 * removing that CPU or node from all cpusets. If this removes the
3549 * last CPU or node from a cpuset, then move the tasks in the empty
3550 * cpuset to its next-highest non-empty parent.
3551 */
remove_tasks_in_empty_cpuset(struct cpuset * cs)3552 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3553 {
3554 struct cpuset *parent;
3555
3556 /*
3557 * Find its next-highest non-empty parent, (top cpuset
3558 * has online cpus, so can't be empty).
3559 */
3560 parent = parent_cs(cs);
3561 while (cpumask_empty(parent->cpus_allowed) ||
3562 nodes_empty(parent->mems_allowed))
3563 parent = parent_cs(parent);
3564
3565 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3566 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3567 pr_cont_cgroup_name(cs->css.cgroup);
3568 pr_cont("\n");
3569 }
3570 }
3571
3572 static void
hotplug_update_tasks_legacy(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3573 hotplug_update_tasks_legacy(struct cpuset *cs,
3574 struct cpumask *new_cpus, nodemask_t *new_mems,
3575 bool cpus_updated, bool mems_updated)
3576 {
3577 bool is_empty;
3578
3579 spin_lock_irq(&callback_lock);
3580 cpumask_copy(cs->cpus_allowed, new_cpus);
3581 cpumask_copy(cs->effective_cpus, new_cpus);
3582 cs->mems_allowed = *new_mems;
3583 cs->effective_mems = *new_mems;
3584 spin_unlock_irq(&callback_lock);
3585
3586 /*
3587 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3588 * as the tasks will be migrated to an ancestor.
3589 */
3590 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3591 update_tasks_cpumask(cs, new_cpus);
3592 if (mems_updated && !nodes_empty(cs->mems_allowed))
3593 update_tasks_nodemask(cs);
3594
3595 is_empty = cpumask_empty(cs->cpus_allowed) ||
3596 nodes_empty(cs->mems_allowed);
3597
3598 /*
3599 * Move tasks to the nearest ancestor with execution resources,
3600 * This is full cgroup operation which will also call back into
3601 * cpuset. Should be done outside any lock.
3602 */
3603 if (is_empty) {
3604 mutex_unlock(&cpuset_mutex);
3605 remove_tasks_in_empty_cpuset(cs);
3606 mutex_lock(&cpuset_mutex);
3607 }
3608 }
3609
3610 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3611 hotplug_update_tasks(struct cpuset *cs,
3612 struct cpumask *new_cpus, nodemask_t *new_mems,
3613 bool cpus_updated, bool mems_updated)
3614 {
3615 /* A partition root is allowed to have empty effective cpus */
3616 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3617 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3618 if (nodes_empty(*new_mems))
3619 *new_mems = parent_cs(cs)->effective_mems;
3620
3621 spin_lock_irq(&callback_lock);
3622 cpumask_copy(cs->effective_cpus, new_cpus);
3623 cs->effective_mems = *new_mems;
3624 spin_unlock_irq(&callback_lock);
3625
3626 if (cpus_updated)
3627 update_tasks_cpumask(cs, new_cpus);
3628 if (mems_updated)
3629 update_tasks_nodemask(cs);
3630 }
3631
3632 static bool force_rebuild;
3633
cpuset_force_rebuild(void)3634 void cpuset_force_rebuild(void)
3635 {
3636 force_rebuild = true;
3637 }
3638
3639 /**
3640 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3641 * @cs: cpuset in interest
3642 * @tmp: the tmpmasks structure pointer
3643 *
3644 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3645 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3646 * all its tasks are moved to the nearest ancestor with both resources.
3647 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3648 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3649 {
3650 static cpumask_t new_cpus;
3651 static nodemask_t new_mems;
3652 bool cpus_updated;
3653 bool mems_updated;
3654 struct cpuset *parent;
3655 retry:
3656 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3657
3658 mutex_lock(&cpuset_mutex);
3659
3660 /*
3661 * We have raced with task attaching. We wait until attaching
3662 * is finished, so we won't attach a task to an empty cpuset.
3663 */
3664 if (cs->attach_in_progress) {
3665 mutex_unlock(&cpuset_mutex);
3666 goto retry;
3667 }
3668
3669 parent = parent_cs(cs);
3670 compute_effective_cpumask(&new_cpus, cs, parent);
3671 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3672
3673 if (cs->nr_subparts_cpus)
3674 /*
3675 * Make sure that CPUs allocated to child partitions
3676 * do not show up in effective_cpus.
3677 */
3678 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3679
3680 if (!tmp || !cs->partition_root_state)
3681 goto update_tasks;
3682
3683 /*
3684 * In the unlikely event that a partition root has empty
3685 * effective_cpus with tasks, we will have to invalidate child
3686 * partitions, if present, by setting nr_subparts_cpus to 0 to
3687 * reclaim their cpus.
3688 */
3689 if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3690 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3691 spin_lock_irq(&callback_lock);
3692 cs->nr_subparts_cpus = 0;
3693 cpumask_clear(cs->subparts_cpus);
3694 spin_unlock_irq(&callback_lock);
3695 compute_effective_cpumask(&new_cpus, cs, parent);
3696 }
3697
3698 /*
3699 * Force the partition to become invalid if either one of
3700 * the following conditions hold:
3701 * 1) empty effective cpus but not valid empty partition.
3702 * 2) parent is invalid or doesn't grant any cpus to child
3703 * partitions.
3704 */
3705 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3706 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3707 int old_prs, parent_prs;
3708
3709 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3710 if (cs->nr_subparts_cpus) {
3711 spin_lock_irq(&callback_lock);
3712 cs->nr_subparts_cpus = 0;
3713 cpumask_clear(cs->subparts_cpus);
3714 spin_unlock_irq(&callback_lock);
3715 compute_effective_cpumask(&new_cpus, cs, parent);
3716 }
3717
3718 old_prs = cs->partition_root_state;
3719 parent_prs = parent->partition_root_state;
3720 if (is_partition_valid(cs)) {
3721 spin_lock_irq(&callback_lock);
3722 make_partition_invalid(cs);
3723 spin_unlock_irq(&callback_lock);
3724 if (is_prs_invalid(parent_prs))
3725 WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3726 else if (!parent_prs)
3727 WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3728 else
3729 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3730 notify_partition_change(cs, old_prs);
3731 }
3732 cpuset_force_rebuild();
3733 }
3734
3735 /*
3736 * On the other hand, an invalid partition root may be transitioned
3737 * back to a regular one.
3738 */
3739 else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3740 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3741 if (is_partition_valid(cs))
3742 cpuset_force_rebuild();
3743 }
3744
3745 update_tasks:
3746 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3747 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3748 if (!cpus_updated && !mems_updated)
3749 goto unlock; /* Hotplug doesn't affect this cpuset */
3750
3751 if (mems_updated)
3752 check_insane_mems_config(&new_mems);
3753
3754 if (is_in_v2_mode())
3755 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3756 cpus_updated, mems_updated);
3757 else
3758 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3759 cpus_updated, mems_updated);
3760
3761 unlock:
3762 mutex_unlock(&cpuset_mutex);
3763 }
3764
3765 /**
3766 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3767 * @work: unused
3768 *
3769 * This function is called after either CPU or memory configuration has
3770 * changed and updates cpuset accordingly. The top_cpuset is always
3771 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3772 * order to make cpusets transparent (of no affect) on systems that are
3773 * actively using CPU hotplug but making no active use of cpusets.
3774 *
3775 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3776 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3777 * all descendants.
3778 *
3779 * Note that CPU offlining during suspend is ignored. We don't modify
3780 * cpusets across suspend/resume cycles at all.
3781 */
cpuset_hotplug_workfn(struct work_struct * work)3782 static void cpuset_hotplug_workfn(struct work_struct *work)
3783 {
3784 static cpumask_t new_cpus;
3785 static nodemask_t new_mems;
3786 bool cpus_updated, mems_updated;
3787 bool on_dfl = is_in_v2_mode();
3788 struct tmpmasks tmp, *ptmp = NULL;
3789
3790 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3791 ptmp = &tmp;
3792
3793 mutex_lock(&cpuset_mutex);
3794
3795 /* fetch the available cpus/mems and find out which changed how */
3796 cpumask_copy(&new_cpus, cpu_active_mask);
3797 new_mems = node_states[N_MEMORY];
3798
3799 /*
3800 * If subparts_cpus is populated, it is likely that the check below
3801 * will produce a false positive on cpus_updated when the cpu list
3802 * isn't changed. It is extra work, but it is better to be safe.
3803 */
3804 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3805 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3806
3807 /*
3808 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3809 * we assumed that cpus are updated.
3810 */
3811 if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3812 cpus_updated = true;
3813
3814 /* synchronize cpus_allowed to cpu_active_mask */
3815 if (cpus_updated) {
3816 spin_lock_irq(&callback_lock);
3817 if (!on_dfl)
3818 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3819 /*
3820 * Make sure that CPUs allocated to child partitions
3821 * do not show up in effective_cpus. If no CPU is left,
3822 * we clear the subparts_cpus & let the child partitions
3823 * fight for the CPUs again.
3824 */
3825 if (top_cpuset.nr_subparts_cpus) {
3826 if (cpumask_subset(&new_cpus,
3827 top_cpuset.subparts_cpus)) {
3828 top_cpuset.nr_subparts_cpus = 0;
3829 cpumask_clear(top_cpuset.subparts_cpus);
3830 } else {
3831 cpumask_andnot(&new_cpus, &new_cpus,
3832 top_cpuset.subparts_cpus);
3833 }
3834 }
3835 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3836 spin_unlock_irq(&callback_lock);
3837 /* we don't mess with cpumasks of tasks in top_cpuset */
3838 }
3839
3840 /* synchronize mems_allowed to N_MEMORY */
3841 if (mems_updated) {
3842 spin_lock_irq(&callback_lock);
3843 if (!on_dfl)
3844 top_cpuset.mems_allowed = new_mems;
3845 top_cpuset.effective_mems = new_mems;
3846 spin_unlock_irq(&callback_lock);
3847 update_tasks_nodemask(&top_cpuset);
3848 }
3849
3850 mutex_unlock(&cpuset_mutex);
3851
3852 /* if cpus or mems changed, we need to propagate to descendants */
3853 if (cpus_updated || mems_updated) {
3854 struct cpuset *cs;
3855 struct cgroup_subsys_state *pos_css;
3856
3857 rcu_read_lock();
3858 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3859 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3860 continue;
3861 rcu_read_unlock();
3862
3863 cpuset_hotplug_update_tasks(cs, ptmp);
3864
3865 rcu_read_lock();
3866 css_put(&cs->css);
3867 }
3868 rcu_read_unlock();
3869 }
3870
3871 /* rebuild sched domains if cpus_allowed has changed */
3872 if (cpus_updated || force_rebuild) {
3873 force_rebuild = false;
3874 rebuild_sched_domains();
3875 }
3876
3877 free_cpumasks(NULL, ptmp);
3878 }
3879
cpuset_update_active_cpus(void)3880 void cpuset_update_active_cpus(void)
3881 {
3882 /*
3883 * We're inside cpu hotplug critical region which usually nests
3884 * inside cgroup synchronization. Bounce actual hotplug processing
3885 * to a work item to avoid reverse locking order.
3886 */
3887 schedule_work(&cpuset_hotplug_work);
3888 }
3889
cpuset_wait_for_hotplug(void)3890 void cpuset_wait_for_hotplug(void)
3891 {
3892 flush_work(&cpuset_hotplug_work);
3893 }
3894
3895 /*
3896 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3897 * Call this routine anytime after node_states[N_MEMORY] changes.
3898 * See cpuset_update_active_cpus() for CPU hotplug handling.
3899 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3900 static int cpuset_track_online_nodes(struct notifier_block *self,
3901 unsigned long action, void *arg)
3902 {
3903 schedule_work(&cpuset_hotplug_work);
3904 return NOTIFY_OK;
3905 }
3906
3907 /**
3908 * cpuset_init_smp - initialize cpus_allowed
3909 *
3910 * Description: Finish top cpuset after cpu, node maps are initialized
3911 */
cpuset_init_smp(void)3912 void __init cpuset_init_smp(void)
3913 {
3914 /*
3915 * cpus_allowd/mems_allowed set to v2 values in the initial
3916 * cpuset_bind() call will be reset to v1 values in another
3917 * cpuset_bind() call when v1 cpuset is mounted.
3918 */
3919 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3920
3921 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3922 top_cpuset.effective_mems = node_states[N_MEMORY];
3923
3924 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3925
3926 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3927 BUG_ON(!cpuset_migrate_mm_wq);
3928 }
3929
3930 /**
3931 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3932 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3933 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3934 *
3935 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3936 * attached to the specified @tsk. Guaranteed to return some non-empty
3937 * subset of cpu_online_mask, even if this means going outside the
3938 * tasks cpuset, except when the task is in the top cpuset.
3939 **/
3940
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3941 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3942 {
3943 unsigned long flags;
3944 struct cpuset *cs;
3945
3946 spin_lock_irqsave(&callback_lock, flags);
3947 rcu_read_lock();
3948
3949 cs = task_cs(tsk);
3950 if (cs != &top_cpuset)
3951 guarantee_online_cpus(tsk, pmask);
3952 /*
3953 * Tasks in the top cpuset won't get update to their cpumasks
3954 * when a hotplug online/offline event happens. So we include all
3955 * offline cpus in the allowed cpu list.
3956 */
3957 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3958 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3959
3960 /*
3961 * We first exclude cpus allocated to partitions. If there is no
3962 * allowable online cpu left, we fall back to all possible cpus.
3963 */
3964 cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
3965 if (!cpumask_intersects(pmask, cpu_online_mask))
3966 cpumask_copy(pmask, possible_mask);
3967 }
3968
3969 rcu_read_unlock();
3970 spin_unlock_irqrestore(&callback_lock, flags);
3971 }
3972
3973 /**
3974 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3975 * @tsk: pointer to task_struct with which the scheduler is struggling
3976 *
3977 * Description: In the case that the scheduler cannot find an allowed cpu in
3978 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3979 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3980 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3981 * This is the absolute last resort for the scheduler and it is only used if
3982 * _every_ other avenue has been traveled.
3983 *
3984 * Returns true if the affinity of @tsk was changed, false otherwise.
3985 **/
3986
cpuset_cpus_allowed_fallback(struct task_struct * tsk)3987 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3988 {
3989 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3990 const struct cpumask *cs_mask;
3991 bool changed = false;
3992
3993 rcu_read_lock();
3994 cs_mask = task_cs(tsk)->cpus_allowed;
3995 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3996 do_set_cpus_allowed(tsk, cs_mask);
3997 changed = true;
3998 }
3999 rcu_read_unlock();
4000
4001 /*
4002 * We own tsk->cpus_allowed, nobody can change it under us.
4003 *
4004 * But we used cs && cs->cpus_allowed lockless and thus can
4005 * race with cgroup_attach_task() or update_cpumask() and get
4006 * the wrong tsk->cpus_allowed. However, both cases imply the
4007 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4008 * which takes task_rq_lock().
4009 *
4010 * If we are called after it dropped the lock we must see all
4011 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4012 * set any mask even if it is not right from task_cs() pov,
4013 * the pending set_cpus_allowed_ptr() will fix things.
4014 *
4015 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4016 * if required.
4017 */
4018 return changed;
4019 }
4020
cpuset_init_current_mems_allowed(void)4021 void __init cpuset_init_current_mems_allowed(void)
4022 {
4023 nodes_setall(current->mems_allowed);
4024 }
4025
4026 /**
4027 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4028 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4029 *
4030 * Description: Returns the nodemask_t mems_allowed of the cpuset
4031 * attached to the specified @tsk. Guaranteed to return some non-empty
4032 * subset of node_states[N_MEMORY], even if this means going outside the
4033 * tasks cpuset.
4034 **/
4035
cpuset_mems_allowed(struct task_struct * tsk)4036 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4037 {
4038 nodemask_t mask;
4039 unsigned long flags;
4040
4041 spin_lock_irqsave(&callback_lock, flags);
4042 rcu_read_lock();
4043 guarantee_online_mems(task_cs(tsk), &mask);
4044 rcu_read_unlock();
4045 spin_unlock_irqrestore(&callback_lock, flags);
4046
4047 return mask;
4048 }
4049
4050 /**
4051 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4052 * @nodemask: the nodemask to be checked
4053 *
4054 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4055 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4056 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4057 {
4058 return nodes_intersects(*nodemask, current->mems_allowed);
4059 }
4060
4061 /*
4062 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4063 * mem_hardwall ancestor to the specified cpuset. Call holding
4064 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4065 * (an unusual configuration), then returns the root cpuset.
4066 */
nearest_hardwall_ancestor(struct cpuset * cs)4067 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4068 {
4069 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4070 cs = parent_cs(cs);
4071 return cs;
4072 }
4073
4074 /*
4075 * cpuset_node_allowed - Can we allocate on a memory node?
4076 * @node: is this an allowed node?
4077 * @gfp_mask: memory allocation flags
4078 *
4079 * If we're in interrupt, yes, we can always allocate. If @node is set in
4080 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4081 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4082 * yes. If current has access to memory reserves as an oom victim, yes.
4083 * Otherwise, no.
4084 *
4085 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4086 * and do not allow allocations outside the current tasks cpuset
4087 * unless the task has been OOM killed.
4088 * GFP_KERNEL allocations are not so marked, so can escape to the
4089 * nearest enclosing hardwalled ancestor cpuset.
4090 *
4091 * Scanning up parent cpusets requires callback_lock. The
4092 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4093 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4094 * current tasks mems_allowed came up empty on the first pass over
4095 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4096 * cpuset are short of memory, might require taking the callback_lock.
4097 *
4098 * The first call here from mm/page_alloc:get_page_from_freelist()
4099 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4100 * so no allocation on a node outside the cpuset is allowed (unless
4101 * in interrupt, of course).
4102 *
4103 * The second pass through get_page_from_freelist() doesn't even call
4104 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4105 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4106 * in alloc_flags. That logic and the checks below have the combined
4107 * affect that:
4108 * in_interrupt - any node ok (current task context irrelevant)
4109 * GFP_ATOMIC - any node ok
4110 * tsk_is_oom_victim - any node ok
4111 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4112 * GFP_USER - only nodes in current tasks mems allowed ok.
4113 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4114 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4115 {
4116 struct cpuset *cs; /* current cpuset ancestors */
4117 bool allowed; /* is allocation in zone z allowed? */
4118 unsigned long flags;
4119
4120 if (in_interrupt())
4121 return true;
4122 if (node_isset(node, current->mems_allowed))
4123 return true;
4124 /*
4125 * Allow tasks that have access to memory reserves because they have
4126 * been OOM killed to get memory anywhere.
4127 */
4128 if (unlikely(tsk_is_oom_victim(current)))
4129 return true;
4130 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4131 return false;
4132
4133 if (current->flags & PF_EXITING) /* Let dying task have memory */
4134 return true;
4135
4136 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4137 spin_lock_irqsave(&callback_lock, flags);
4138
4139 rcu_read_lock();
4140 cs = nearest_hardwall_ancestor(task_cs(current));
4141 allowed = node_isset(node, cs->mems_allowed);
4142 rcu_read_unlock();
4143
4144 spin_unlock_irqrestore(&callback_lock, flags);
4145 return allowed;
4146 }
4147
4148 /**
4149 * cpuset_spread_node() - On which node to begin search for a page
4150 * @rotor: round robin rotor
4151 *
4152 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4153 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4154 * and if the memory allocation used cpuset_mem_spread_node()
4155 * to determine on which node to start looking, as it will for
4156 * certain page cache or slab cache pages such as used for file
4157 * system buffers and inode caches, then instead of starting on the
4158 * local node to look for a free page, rather spread the starting
4159 * node around the tasks mems_allowed nodes.
4160 *
4161 * We don't have to worry about the returned node being offline
4162 * because "it can't happen", and even if it did, it would be ok.
4163 *
4164 * The routines calling guarantee_online_mems() are careful to
4165 * only set nodes in task->mems_allowed that are online. So it
4166 * should not be possible for the following code to return an
4167 * offline node. But if it did, that would be ok, as this routine
4168 * is not returning the node where the allocation must be, only
4169 * the node where the search should start. The zonelist passed to
4170 * __alloc_pages() will include all nodes. If the slab allocator
4171 * is passed an offline node, it will fall back to the local node.
4172 * See kmem_cache_alloc_node().
4173 */
cpuset_spread_node(int * rotor)4174 static int cpuset_spread_node(int *rotor)
4175 {
4176 return *rotor = next_node_in(*rotor, current->mems_allowed);
4177 }
4178
4179 /**
4180 * cpuset_mem_spread_node() - On which node to begin search for a file page
4181 */
cpuset_mem_spread_node(void)4182 int cpuset_mem_spread_node(void)
4183 {
4184 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4185 current->cpuset_mem_spread_rotor =
4186 node_random(¤t->mems_allowed);
4187
4188 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4189 }
4190
4191 /**
4192 * cpuset_slab_spread_node() - On which node to begin search for a slab page
4193 */
cpuset_slab_spread_node(void)4194 int cpuset_slab_spread_node(void)
4195 {
4196 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4197 current->cpuset_slab_spread_rotor =
4198 node_random(¤t->mems_allowed);
4199
4200 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
4201 }
4202 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4203
4204 /**
4205 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4206 * @tsk1: pointer to task_struct of some task.
4207 * @tsk2: pointer to task_struct of some other task.
4208 *
4209 * Description: Return true if @tsk1's mems_allowed intersects the
4210 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4211 * one of the task's memory usage might impact the memory available
4212 * to the other.
4213 **/
4214
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4215 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4216 const struct task_struct *tsk2)
4217 {
4218 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4219 }
4220
4221 /**
4222 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4223 *
4224 * Description: Prints current's name, cpuset name, and cached copy of its
4225 * mems_allowed to the kernel log.
4226 */
cpuset_print_current_mems_allowed(void)4227 void cpuset_print_current_mems_allowed(void)
4228 {
4229 struct cgroup *cgrp;
4230
4231 rcu_read_lock();
4232
4233 cgrp = task_cs(current)->css.cgroup;
4234 pr_cont(",cpuset=");
4235 pr_cont_cgroup_name(cgrp);
4236 pr_cont(",mems_allowed=%*pbl",
4237 nodemask_pr_args(¤t->mems_allowed));
4238
4239 rcu_read_unlock();
4240 }
4241
4242 /*
4243 * Collection of memory_pressure is suppressed unless
4244 * this flag is enabled by writing "1" to the special
4245 * cpuset file 'memory_pressure_enabled' in the root cpuset.
4246 */
4247
4248 int cpuset_memory_pressure_enabled __read_mostly;
4249
4250 /*
4251 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4252 *
4253 * Keep a running average of the rate of synchronous (direct)
4254 * page reclaim efforts initiated by tasks in each cpuset.
4255 *
4256 * This represents the rate at which some task in the cpuset
4257 * ran low on memory on all nodes it was allowed to use, and
4258 * had to enter the kernels page reclaim code in an effort to
4259 * create more free memory by tossing clean pages or swapping
4260 * or writing dirty pages.
4261 *
4262 * Display to user space in the per-cpuset read-only file
4263 * "memory_pressure". Value displayed is an integer
4264 * representing the recent rate of entry into the synchronous
4265 * (direct) page reclaim by any task attached to the cpuset.
4266 */
4267
__cpuset_memory_pressure_bump(void)4268 void __cpuset_memory_pressure_bump(void)
4269 {
4270 rcu_read_lock();
4271 fmeter_markevent(&task_cs(current)->fmeter);
4272 rcu_read_unlock();
4273 }
4274
4275 #ifdef CONFIG_PROC_PID_CPUSET
4276 /*
4277 * proc_cpuset_show()
4278 * - Print tasks cpuset path into seq_file.
4279 * - Used for /proc/<pid>/cpuset.
4280 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4281 * doesn't really matter if tsk->cpuset changes after we read it,
4282 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
4283 * anyway.
4284 */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)4285 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4286 struct pid *pid, struct task_struct *tsk)
4287 {
4288 char *buf;
4289 struct cgroup_subsys_state *css;
4290 int retval;
4291
4292 retval = -ENOMEM;
4293 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4294 if (!buf)
4295 goto out;
4296
4297 rcu_read_lock();
4298 spin_lock_irq(&css_set_lock);
4299 css = task_css(tsk, cpuset_cgrp_id);
4300 retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
4301 current->nsproxy->cgroup_ns);
4302 spin_unlock_irq(&css_set_lock);
4303 rcu_read_unlock();
4304
4305 if (retval == -E2BIG)
4306 retval = -ENAMETOOLONG;
4307 if (retval < 0)
4308 goto out_free;
4309 seq_puts(m, buf);
4310 seq_putc(m, '\n');
4311 retval = 0;
4312 out_free:
4313 kfree(buf);
4314 out:
4315 return retval;
4316 }
4317 #endif /* CONFIG_PROC_PID_CPUSET */
4318
4319 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4320 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4321 {
4322 seq_printf(m, "Mems_allowed:\t%*pb\n",
4323 nodemask_pr_args(&task->mems_allowed));
4324 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4325 nodemask_pr_args(&task->mems_allowed));
4326 }
4327