xref: /openbmc/linux/kernel/cgroup/cpuset.c (revision 3ddc8b84)
1 /*
2  *  kernel/cpuset.c
3  *
4  *  Processor and Memory placement constraints for sets of tasks.
5  *
6  *  Copyright (C) 2003 BULL SA.
7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8  *  Copyright (C) 2006 Google, Inc
9  *
10  *  Portions derived from Patrick Mochel's sysfs code.
11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
12  *
13  *  2003-10-10 Written by Simon Derr.
14  *  2003-10-22 Updates by Stephen Hemminger.
15  *  2004 May-July Rework by Paul Jackson.
16  *  2006 Rework by Paul Menage to use generic cgroups
17  *  2008 Rework of the scheduler domains and CPU hotplug handling
18  *       by Max Krasnyansky
19  *
20  *  This file is subject to the terms and conditions of the GNU General Public
21  *  License.  See the file COPYING in the main directory of the Linux
22  *  distribution for more details.
23  */
24 
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/mempolicy.h>
32 #include <linux/mm.h>
33 #include <linux/memory.h>
34 #include <linux/export.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched.h>
37 #include <linux/sched/deadline.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/task.h>
40 #include <linux/security.h>
41 #include <linux/spinlock.h>
42 #include <linux/oom.h>
43 #include <linux/sched/isolation.h>
44 #include <linux/cgroup.h>
45 #include <linux/wait.h>
46 
47 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
48 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
49 
50 /*
51  * There could be abnormal cpuset configurations for cpu or memory
52  * node binding, add this key to provide a quick low-cost judgment
53  * of the situation.
54  */
55 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
56 
57 /* See "Frequency meter" comments, below. */
58 
59 struct fmeter {
60 	int cnt;		/* unprocessed events count */
61 	int val;		/* most recent output value */
62 	time64_t time;		/* clock (secs) when val computed */
63 	spinlock_t lock;	/* guards read or write of above */
64 };
65 
66 /*
67  * Invalid partition error code
68  */
69 enum prs_errcode {
70 	PERR_NONE = 0,
71 	PERR_INVCPUS,
72 	PERR_INVPARENT,
73 	PERR_NOTPART,
74 	PERR_NOTEXCL,
75 	PERR_NOCPUS,
76 	PERR_HOTPLUG,
77 	PERR_CPUSEMPTY,
78 };
79 
80 static const char * const perr_strings[] = {
81 	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus",
82 	[PERR_INVPARENT] = "Parent is an invalid partition root",
83 	[PERR_NOTPART]   = "Parent is not a partition root",
84 	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
85 	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
86 	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
87 	[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
88 };
89 
90 struct cpuset {
91 	struct cgroup_subsys_state css;
92 
93 	unsigned long flags;		/* "unsigned long" so bitops work */
94 
95 	/*
96 	 * On default hierarchy:
97 	 *
98 	 * The user-configured masks can only be changed by writing to
99 	 * cpuset.cpus and cpuset.mems, and won't be limited by the
100 	 * parent masks.
101 	 *
102 	 * The effective masks is the real masks that apply to the tasks
103 	 * in the cpuset. They may be changed if the configured masks are
104 	 * changed or hotplug happens.
105 	 *
106 	 * effective_mask == configured_mask & parent's effective_mask,
107 	 * and if it ends up empty, it will inherit the parent's mask.
108 	 *
109 	 *
110 	 * On legacy hierarchy:
111 	 *
112 	 * The user-configured masks are always the same with effective masks.
113 	 */
114 
115 	/* user-configured CPUs and Memory Nodes allow to tasks */
116 	cpumask_var_t cpus_allowed;
117 	nodemask_t mems_allowed;
118 
119 	/* effective CPUs and Memory Nodes allow to tasks */
120 	cpumask_var_t effective_cpus;
121 	nodemask_t effective_mems;
122 
123 	/*
124 	 * CPUs allocated to child sub-partitions (default hierarchy only)
125 	 * - CPUs granted by the parent = effective_cpus U subparts_cpus
126 	 * - effective_cpus and subparts_cpus are mutually exclusive.
127 	 *
128 	 * effective_cpus contains only onlined CPUs, but subparts_cpus
129 	 * may have offlined ones.
130 	 */
131 	cpumask_var_t subparts_cpus;
132 
133 	/*
134 	 * This is old Memory Nodes tasks took on.
135 	 *
136 	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
137 	 * - A new cpuset's old_mems_allowed is initialized when some
138 	 *   task is moved into it.
139 	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
140 	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
141 	 *   then old_mems_allowed is updated to mems_allowed.
142 	 */
143 	nodemask_t old_mems_allowed;
144 
145 	struct fmeter fmeter;		/* memory_pressure filter */
146 
147 	/*
148 	 * Tasks are being attached to this cpuset.  Used to prevent
149 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
150 	 */
151 	int attach_in_progress;
152 
153 	/* partition number for rebuild_sched_domains() */
154 	int pn;
155 
156 	/* for custom sched domain */
157 	int relax_domain_level;
158 
159 	/* number of CPUs in subparts_cpus */
160 	int nr_subparts_cpus;
161 
162 	/* partition root state */
163 	int partition_root_state;
164 
165 	/*
166 	 * Default hierarchy only:
167 	 * use_parent_ecpus - set if using parent's effective_cpus
168 	 * child_ecpus_count - # of children with use_parent_ecpus set
169 	 */
170 	int use_parent_ecpus;
171 	int child_ecpus_count;
172 
173 	/*
174 	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
175 	 * know when to rebuild associated root domain bandwidth information.
176 	 */
177 	int nr_deadline_tasks;
178 	int nr_migrate_dl_tasks;
179 	u64 sum_migrate_dl_bw;
180 
181 	/* Invalid partition error code, not lock protected */
182 	enum prs_errcode prs_err;
183 
184 	/* Handle for cpuset.cpus.partition */
185 	struct cgroup_file partition_file;
186 };
187 
188 /*
189  * Partition root states:
190  *
191  *   0 - member (not a partition root)
192  *   1 - partition root
193  *   2 - partition root without load balancing (isolated)
194  *  -1 - invalid partition root
195  *  -2 - invalid isolated partition root
196  */
197 #define PRS_MEMBER		0
198 #define PRS_ROOT		1
199 #define PRS_ISOLATED		2
200 #define PRS_INVALID_ROOT	-1
201 #define PRS_INVALID_ISOLATED	-2
202 
203 static inline bool is_prs_invalid(int prs_state)
204 {
205 	return prs_state < 0;
206 }
207 
208 /*
209  * Temporary cpumasks for working with partitions that are passed among
210  * functions to avoid memory allocation in inner functions.
211  */
212 struct tmpmasks {
213 	cpumask_var_t addmask, delmask;	/* For partition root */
214 	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
215 };
216 
217 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
218 {
219 	return css ? container_of(css, struct cpuset, css) : NULL;
220 }
221 
222 /* Retrieve the cpuset for a task */
223 static inline struct cpuset *task_cs(struct task_struct *task)
224 {
225 	return css_cs(task_css(task, cpuset_cgrp_id));
226 }
227 
228 static inline struct cpuset *parent_cs(struct cpuset *cs)
229 {
230 	return css_cs(cs->css.parent);
231 }
232 
233 void inc_dl_tasks_cs(struct task_struct *p)
234 {
235 	struct cpuset *cs = task_cs(p);
236 
237 	cs->nr_deadline_tasks++;
238 }
239 
240 void dec_dl_tasks_cs(struct task_struct *p)
241 {
242 	struct cpuset *cs = task_cs(p);
243 
244 	cs->nr_deadline_tasks--;
245 }
246 
247 /* bits in struct cpuset flags field */
248 typedef enum {
249 	CS_ONLINE,
250 	CS_CPU_EXCLUSIVE,
251 	CS_MEM_EXCLUSIVE,
252 	CS_MEM_HARDWALL,
253 	CS_MEMORY_MIGRATE,
254 	CS_SCHED_LOAD_BALANCE,
255 	CS_SPREAD_PAGE,
256 	CS_SPREAD_SLAB,
257 } cpuset_flagbits_t;
258 
259 /* convenient tests for these bits */
260 static inline bool is_cpuset_online(struct cpuset *cs)
261 {
262 	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
263 }
264 
265 static inline int is_cpu_exclusive(const struct cpuset *cs)
266 {
267 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
268 }
269 
270 static inline int is_mem_exclusive(const struct cpuset *cs)
271 {
272 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
273 }
274 
275 static inline int is_mem_hardwall(const struct cpuset *cs)
276 {
277 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
278 }
279 
280 static inline int is_sched_load_balance(const struct cpuset *cs)
281 {
282 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
283 }
284 
285 static inline int is_memory_migrate(const struct cpuset *cs)
286 {
287 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
288 }
289 
290 static inline int is_spread_page(const struct cpuset *cs)
291 {
292 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
293 }
294 
295 static inline int is_spread_slab(const struct cpuset *cs)
296 {
297 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
298 }
299 
300 static inline int is_partition_valid(const struct cpuset *cs)
301 {
302 	return cs->partition_root_state > 0;
303 }
304 
305 static inline int is_partition_invalid(const struct cpuset *cs)
306 {
307 	return cs->partition_root_state < 0;
308 }
309 
310 /*
311  * Callers should hold callback_lock to modify partition_root_state.
312  */
313 static inline void make_partition_invalid(struct cpuset *cs)
314 {
315 	if (is_partition_valid(cs))
316 		cs->partition_root_state = -cs->partition_root_state;
317 }
318 
319 /*
320  * Send notification event of whenever partition_root_state changes.
321  */
322 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
323 {
324 	if (old_prs == cs->partition_root_state)
325 		return;
326 	cgroup_file_notify(&cs->partition_file);
327 
328 	/* Reset prs_err if not invalid */
329 	if (is_partition_valid(cs))
330 		WRITE_ONCE(cs->prs_err, PERR_NONE);
331 }
332 
333 static struct cpuset top_cpuset = {
334 	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
335 		  (1 << CS_MEM_EXCLUSIVE)),
336 	.partition_root_state = PRS_ROOT,
337 };
338 
339 /**
340  * cpuset_for_each_child - traverse online children of a cpuset
341  * @child_cs: loop cursor pointing to the current child
342  * @pos_css: used for iteration
343  * @parent_cs: target cpuset to walk children of
344  *
345  * Walk @child_cs through the online children of @parent_cs.  Must be used
346  * with RCU read locked.
347  */
348 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
349 	css_for_each_child((pos_css), &(parent_cs)->css)		\
350 		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
351 
352 /**
353  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
354  * @des_cs: loop cursor pointing to the current descendant
355  * @pos_css: used for iteration
356  * @root_cs: target cpuset to walk ancestor of
357  *
358  * Walk @des_cs through the online descendants of @root_cs.  Must be used
359  * with RCU read locked.  The caller may modify @pos_css by calling
360  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
361  * iteration and the first node to be visited.
362  */
363 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
364 	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
365 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
366 
367 /*
368  * There are two global locks guarding cpuset structures - cpuset_mutex and
369  * callback_lock. We also require taking task_lock() when dereferencing a
370  * task's cpuset pointer. See "The task_lock() exception", at the end of this
371  * comment.  The cpuset code uses only cpuset_mutex. Other kernel subsystems
372  * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
373  * structures. Note that cpuset_mutex needs to be a mutex as it is used in
374  * paths that rely on priority inheritance (e.g. scheduler - on RT) for
375  * correctness.
376  *
377  * A task must hold both locks to modify cpusets.  If a task holds
378  * cpuset_mutex, it blocks others, ensuring that it is the only task able to
379  * also acquire callback_lock and be able to modify cpusets.  It can perform
380  * various checks on the cpuset structure first, knowing nothing will change.
381  * It can also allocate memory while just holding cpuset_mutex.  While it is
382  * performing these checks, various callback routines can briefly acquire
383  * callback_lock to query cpusets.  Once it is ready to make the changes, it
384  * takes callback_lock, blocking everyone else.
385  *
386  * Calls to the kernel memory allocator can not be made while holding
387  * callback_lock, as that would risk double tripping on callback_lock
388  * from one of the callbacks into the cpuset code from within
389  * __alloc_pages().
390  *
391  * If a task is only holding callback_lock, then it has read-only
392  * access to cpusets.
393  *
394  * Now, the task_struct fields mems_allowed and mempolicy may be changed
395  * by other task, we use alloc_lock in the task_struct fields to protect
396  * them.
397  *
398  * The cpuset_common_file_read() handlers only hold callback_lock across
399  * small pieces of code, such as when reading out possibly multi-word
400  * cpumasks and nodemasks.
401  *
402  * Accessing a task's cpuset should be done in accordance with the
403  * guidelines for accessing subsystem state in kernel/cgroup.c
404  */
405 
406 static DEFINE_MUTEX(cpuset_mutex);
407 
408 void cpuset_lock(void)
409 {
410 	mutex_lock(&cpuset_mutex);
411 }
412 
413 void cpuset_unlock(void)
414 {
415 	mutex_unlock(&cpuset_mutex);
416 }
417 
418 static DEFINE_SPINLOCK(callback_lock);
419 
420 static struct workqueue_struct *cpuset_migrate_mm_wq;
421 
422 /*
423  * CPU / memory hotplug is handled asynchronously.
424  */
425 static void cpuset_hotplug_workfn(struct work_struct *work);
426 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
427 
428 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
429 
430 static inline void check_insane_mems_config(nodemask_t *nodes)
431 {
432 	if (!cpusets_insane_config() &&
433 		movable_only_nodes(nodes)) {
434 		static_branch_enable(&cpusets_insane_config_key);
435 		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
436 			"Cpuset allocations might fail even with a lot of memory available.\n",
437 			nodemask_pr_args(nodes));
438 	}
439 }
440 
441 /*
442  * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
443  * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
444  * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
445  * With v2 behavior, "cpus" and "mems" are always what the users have
446  * requested and won't be changed by hotplug events. Only the effective
447  * cpus or mems will be affected.
448  */
449 static inline bool is_in_v2_mode(void)
450 {
451 	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
452 	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
453 }
454 
455 /**
456  * partition_is_populated - check if partition has tasks
457  * @cs: partition root to be checked
458  * @excluded_child: a child cpuset to be excluded in task checking
459  * Return: true if there are tasks, false otherwise
460  *
461  * It is assumed that @cs is a valid partition root. @excluded_child should
462  * be non-NULL when this cpuset is going to become a partition itself.
463  */
464 static inline bool partition_is_populated(struct cpuset *cs,
465 					  struct cpuset *excluded_child)
466 {
467 	struct cgroup_subsys_state *css;
468 	struct cpuset *child;
469 
470 	if (cs->css.cgroup->nr_populated_csets)
471 		return true;
472 	if (!excluded_child && !cs->nr_subparts_cpus)
473 		return cgroup_is_populated(cs->css.cgroup);
474 
475 	rcu_read_lock();
476 	cpuset_for_each_child(child, css, cs) {
477 		if (child == excluded_child)
478 			continue;
479 		if (is_partition_valid(child))
480 			continue;
481 		if (cgroup_is_populated(child->css.cgroup)) {
482 			rcu_read_unlock();
483 			return true;
484 		}
485 	}
486 	rcu_read_unlock();
487 	return false;
488 }
489 
490 /*
491  * Return in pmask the portion of a task's cpusets's cpus_allowed that
492  * are online and are capable of running the task.  If none are found,
493  * walk up the cpuset hierarchy until we find one that does have some
494  * appropriate cpus.
495  *
496  * One way or another, we guarantee to return some non-empty subset
497  * of cpu_online_mask.
498  *
499  * Call with callback_lock or cpuset_mutex held.
500  */
501 static void guarantee_online_cpus(struct task_struct *tsk,
502 				  struct cpumask *pmask)
503 {
504 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
505 	struct cpuset *cs;
506 
507 	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
508 		cpumask_copy(pmask, cpu_online_mask);
509 
510 	rcu_read_lock();
511 	cs = task_cs(tsk);
512 
513 	while (!cpumask_intersects(cs->effective_cpus, pmask)) {
514 		cs = parent_cs(cs);
515 		if (unlikely(!cs)) {
516 			/*
517 			 * The top cpuset doesn't have any online cpu as a
518 			 * consequence of a race between cpuset_hotplug_work
519 			 * and cpu hotplug notifier.  But we know the top
520 			 * cpuset's effective_cpus is on its way to be
521 			 * identical to cpu_online_mask.
522 			 */
523 			goto out_unlock;
524 		}
525 	}
526 	cpumask_and(pmask, pmask, cs->effective_cpus);
527 
528 out_unlock:
529 	rcu_read_unlock();
530 }
531 
532 /*
533  * Return in *pmask the portion of a cpusets's mems_allowed that
534  * are online, with memory.  If none are online with memory, walk
535  * up the cpuset hierarchy until we find one that does have some
536  * online mems.  The top cpuset always has some mems online.
537  *
538  * One way or another, we guarantee to return some non-empty subset
539  * of node_states[N_MEMORY].
540  *
541  * Call with callback_lock or cpuset_mutex held.
542  */
543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
544 {
545 	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
546 		cs = parent_cs(cs);
547 	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
548 }
549 
550 /*
551  * update task's spread flag if cpuset's page/slab spread flag is set
552  *
553  * Call with callback_lock or cpuset_mutex held. The check can be skipped
554  * if on default hierarchy.
555  */
556 static void cpuset_update_task_spread_flags(struct cpuset *cs,
557 					struct task_struct *tsk)
558 {
559 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
560 		return;
561 
562 	if (is_spread_page(cs))
563 		task_set_spread_page(tsk);
564 	else
565 		task_clear_spread_page(tsk);
566 
567 	if (is_spread_slab(cs))
568 		task_set_spread_slab(tsk);
569 	else
570 		task_clear_spread_slab(tsk);
571 }
572 
573 /*
574  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
575  *
576  * One cpuset is a subset of another if all its allowed CPUs and
577  * Memory Nodes are a subset of the other, and its exclusive flags
578  * are only set if the other's are set.  Call holding cpuset_mutex.
579  */
580 
581 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
582 {
583 	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
584 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
585 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
586 		is_mem_exclusive(p) <= is_mem_exclusive(q);
587 }
588 
589 /**
590  * alloc_cpumasks - allocate three cpumasks for cpuset
591  * @cs:  the cpuset that have cpumasks to be allocated.
592  * @tmp: the tmpmasks structure pointer
593  * Return: 0 if successful, -ENOMEM otherwise.
594  *
595  * Only one of the two input arguments should be non-NULL.
596  */
597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
598 {
599 	cpumask_var_t *pmask1, *pmask2, *pmask3;
600 
601 	if (cs) {
602 		pmask1 = &cs->cpus_allowed;
603 		pmask2 = &cs->effective_cpus;
604 		pmask3 = &cs->subparts_cpus;
605 	} else {
606 		pmask1 = &tmp->new_cpus;
607 		pmask2 = &tmp->addmask;
608 		pmask3 = &tmp->delmask;
609 	}
610 
611 	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
612 		return -ENOMEM;
613 
614 	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
615 		goto free_one;
616 
617 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
618 		goto free_two;
619 
620 	return 0;
621 
622 free_two:
623 	free_cpumask_var(*pmask2);
624 free_one:
625 	free_cpumask_var(*pmask1);
626 	return -ENOMEM;
627 }
628 
629 /**
630  * free_cpumasks - free cpumasks in a tmpmasks structure
631  * @cs:  the cpuset that have cpumasks to be free.
632  * @tmp: the tmpmasks structure pointer
633  */
634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
635 {
636 	if (cs) {
637 		free_cpumask_var(cs->cpus_allowed);
638 		free_cpumask_var(cs->effective_cpus);
639 		free_cpumask_var(cs->subparts_cpus);
640 	}
641 	if (tmp) {
642 		free_cpumask_var(tmp->new_cpus);
643 		free_cpumask_var(tmp->addmask);
644 		free_cpumask_var(tmp->delmask);
645 	}
646 }
647 
648 /**
649  * alloc_trial_cpuset - allocate a trial cpuset
650  * @cs: the cpuset that the trial cpuset duplicates
651  */
652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
653 {
654 	struct cpuset *trial;
655 
656 	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
657 	if (!trial)
658 		return NULL;
659 
660 	if (alloc_cpumasks(trial, NULL)) {
661 		kfree(trial);
662 		return NULL;
663 	}
664 
665 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
666 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
667 	return trial;
668 }
669 
670 /**
671  * free_cpuset - free the cpuset
672  * @cs: the cpuset to be freed
673  */
674 static inline void free_cpuset(struct cpuset *cs)
675 {
676 	free_cpumasks(cs, NULL);
677 	kfree(cs);
678 }
679 
680 /*
681  * validate_change_legacy() - Validate conditions specific to legacy (v1)
682  *                            behavior.
683  */
684 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
685 {
686 	struct cgroup_subsys_state *css;
687 	struct cpuset *c, *par;
688 	int ret;
689 
690 	WARN_ON_ONCE(!rcu_read_lock_held());
691 
692 	/* Each of our child cpusets must be a subset of us */
693 	ret = -EBUSY;
694 	cpuset_for_each_child(c, css, cur)
695 		if (!is_cpuset_subset(c, trial))
696 			goto out;
697 
698 	/* On legacy hierarchy, we must be a subset of our parent cpuset. */
699 	ret = -EACCES;
700 	par = parent_cs(cur);
701 	if (par && !is_cpuset_subset(trial, par))
702 		goto out;
703 
704 	ret = 0;
705 out:
706 	return ret;
707 }
708 
709 /*
710  * validate_change() - Used to validate that any proposed cpuset change
711  *		       follows the structural rules for cpusets.
712  *
713  * If we replaced the flag and mask values of the current cpuset
714  * (cur) with those values in the trial cpuset (trial), would
715  * our various subset and exclusive rules still be valid?  Presumes
716  * cpuset_mutex held.
717  *
718  * 'cur' is the address of an actual, in-use cpuset.  Operations
719  * such as list traversal that depend on the actual address of the
720  * cpuset in the list must use cur below, not trial.
721  *
722  * 'trial' is the address of bulk structure copy of cur, with
723  * perhaps one or more of the fields cpus_allowed, mems_allowed,
724  * or flags changed to new, trial values.
725  *
726  * Return 0 if valid, -errno if not.
727  */
728 
729 static int validate_change(struct cpuset *cur, struct cpuset *trial)
730 {
731 	struct cgroup_subsys_state *css;
732 	struct cpuset *c, *par;
733 	int ret = 0;
734 
735 	rcu_read_lock();
736 
737 	if (!is_in_v2_mode())
738 		ret = validate_change_legacy(cur, trial);
739 	if (ret)
740 		goto out;
741 
742 	/* Remaining checks don't apply to root cpuset */
743 	if (cur == &top_cpuset)
744 		goto out;
745 
746 	par = parent_cs(cur);
747 
748 	/*
749 	 * Cpusets with tasks - existing or newly being attached - can't
750 	 * be changed to have empty cpus_allowed or mems_allowed.
751 	 */
752 	ret = -ENOSPC;
753 	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
754 		if (!cpumask_empty(cur->cpus_allowed) &&
755 		    cpumask_empty(trial->cpus_allowed))
756 			goto out;
757 		if (!nodes_empty(cur->mems_allowed) &&
758 		    nodes_empty(trial->mems_allowed))
759 			goto out;
760 	}
761 
762 	/*
763 	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
764 	 * tasks.
765 	 */
766 	ret = -EBUSY;
767 	if (is_cpu_exclusive(cur) &&
768 	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
769 				       trial->cpus_allowed))
770 		goto out;
771 
772 	/*
773 	 * If either I or some sibling (!= me) is exclusive, we can't
774 	 * overlap
775 	 */
776 	ret = -EINVAL;
777 	cpuset_for_each_child(c, css, par) {
778 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
779 		    c != cur &&
780 		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
781 			goto out;
782 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
783 		    c != cur &&
784 		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
785 			goto out;
786 	}
787 
788 	ret = 0;
789 out:
790 	rcu_read_unlock();
791 	return ret;
792 }
793 
794 #ifdef CONFIG_SMP
795 /*
796  * Helper routine for generate_sched_domains().
797  * Do cpusets a, b have overlapping effective cpus_allowed masks?
798  */
799 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
800 {
801 	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
802 }
803 
804 static void
805 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
806 {
807 	if (dattr->relax_domain_level < c->relax_domain_level)
808 		dattr->relax_domain_level = c->relax_domain_level;
809 	return;
810 }
811 
812 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
813 				    struct cpuset *root_cs)
814 {
815 	struct cpuset *cp;
816 	struct cgroup_subsys_state *pos_css;
817 
818 	rcu_read_lock();
819 	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
820 		/* skip the whole subtree if @cp doesn't have any CPU */
821 		if (cpumask_empty(cp->cpus_allowed)) {
822 			pos_css = css_rightmost_descendant(pos_css);
823 			continue;
824 		}
825 
826 		if (is_sched_load_balance(cp))
827 			update_domain_attr(dattr, cp);
828 	}
829 	rcu_read_unlock();
830 }
831 
832 /* Must be called with cpuset_mutex held.  */
833 static inline int nr_cpusets(void)
834 {
835 	/* jump label reference count + the top-level cpuset */
836 	return static_key_count(&cpusets_enabled_key.key) + 1;
837 }
838 
839 /*
840  * generate_sched_domains()
841  *
842  * This function builds a partial partition of the systems CPUs
843  * A 'partial partition' is a set of non-overlapping subsets whose
844  * union is a subset of that set.
845  * The output of this function needs to be passed to kernel/sched/core.c
846  * partition_sched_domains() routine, which will rebuild the scheduler's
847  * load balancing domains (sched domains) as specified by that partial
848  * partition.
849  *
850  * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
851  * for a background explanation of this.
852  *
853  * Does not return errors, on the theory that the callers of this
854  * routine would rather not worry about failures to rebuild sched
855  * domains when operating in the severe memory shortage situations
856  * that could cause allocation failures below.
857  *
858  * Must be called with cpuset_mutex held.
859  *
860  * The three key local variables below are:
861  *    cp - cpuset pointer, used (together with pos_css) to perform a
862  *	   top-down scan of all cpusets. For our purposes, rebuilding
863  *	   the schedulers sched domains, we can ignore !is_sched_load_
864  *	   balance cpusets.
865  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
866  *	   that need to be load balanced, for convenient iterative
867  *	   access by the subsequent code that finds the best partition,
868  *	   i.e the set of domains (subsets) of CPUs such that the
869  *	   cpus_allowed of every cpuset marked is_sched_load_balance
870  *	   is a subset of one of these domains, while there are as
871  *	   many such domains as possible, each as small as possible.
872  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
873  *	   the kernel/sched/core.c routine partition_sched_domains() in a
874  *	   convenient format, that can be easily compared to the prior
875  *	   value to determine what partition elements (sched domains)
876  *	   were changed (added or removed.)
877  *
878  * Finding the best partition (set of domains):
879  *	The triple nested loops below over i, j, k scan over the
880  *	load balanced cpusets (using the array of cpuset pointers in
881  *	csa[]) looking for pairs of cpusets that have overlapping
882  *	cpus_allowed, but which don't have the same 'pn' partition
883  *	number and gives them in the same partition number.  It keeps
884  *	looping on the 'restart' label until it can no longer find
885  *	any such pairs.
886  *
887  *	The union of the cpus_allowed masks from the set of
888  *	all cpusets having the same 'pn' value then form the one
889  *	element of the partition (one sched domain) to be passed to
890  *	partition_sched_domains().
891  */
892 static int generate_sched_domains(cpumask_var_t **domains,
893 			struct sched_domain_attr **attributes)
894 {
895 	struct cpuset *cp;	/* top-down scan of cpusets */
896 	struct cpuset **csa;	/* array of all cpuset ptrs */
897 	int csn;		/* how many cpuset ptrs in csa so far */
898 	int i, j, k;		/* indices for partition finding loops */
899 	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
900 	struct sched_domain_attr *dattr;  /* attributes for custom domains */
901 	int ndoms = 0;		/* number of sched domains in result */
902 	int nslot;		/* next empty doms[] struct cpumask slot */
903 	struct cgroup_subsys_state *pos_css;
904 	bool root_load_balance = is_sched_load_balance(&top_cpuset);
905 
906 	doms = NULL;
907 	dattr = NULL;
908 	csa = NULL;
909 
910 	/* Special case for the 99% of systems with one, full, sched domain */
911 	if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
912 		ndoms = 1;
913 		doms = alloc_sched_domains(ndoms);
914 		if (!doms)
915 			goto done;
916 
917 		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
918 		if (dattr) {
919 			*dattr = SD_ATTR_INIT;
920 			update_domain_attr_tree(dattr, &top_cpuset);
921 		}
922 		cpumask_and(doms[0], top_cpuset.effective_cpus,
923 			    housekeeping_cpumask(HK_TYPE_DOMAIN));
924 
925 		goto done;
926 	}
927 
928 	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
929 	if (!csa)
930 		goto done;
931 	csn = 0;
932 
933 	rcu_read_lock();
934 	if (root_load_balance)
935 		csa[csn++] = &top_cpuset;
936 	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
937 		if (cp == &top_cpuset)
938 			continue;
939 		/*
940 		 * Continue traversing beyond @cp iff @cp has some CPUs and
941 		 * isn't load balancing.  The former is obvious.  The
942 		 * latter: All child cpusets contain a subset of the
943 		 * parent's cpus, so just skip them, and then we call
944 		 * update_domain_attr_tree() to calc relax_domain_level of
945 		 * the corresponding sched domain.
946 		 *
947 		 * If root is load-balancing, we can skip @cp if it
948 		 * is a subset of the root's effective_cpus.
949 		 */
950 		if (!cpumask_empty(cp->cpus_allowed) &&
951 		    !(is_sched_load_balance(cp) &&
952 		      cpumask_intersects(cp->cpus_allowed,
953 					 housekeeping_cpumask(HK_TYPE_DOMAIN))))
954 			continue;
955 
956 		if (root_load_balance &&
957 		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
958 			continue;
959 
960 		if (is_sched_load_balance(cp) &&
961 		    !cpumask_empty(cp->effective_cpus))
962 			csa[csn++] = cp;
963 
964 		/* skip @cp's subtree if not a partition root */
965 		if (!is_partition_valid(cp))
966 			pos_css = css_rightmost_descendant(pos_css);
967 	}
968 	rcu_read_unlock();
969 
970 	for (i = 0; i < csn; i++)
971 		csa[i]->pn = i;
972 	ndoms = csn;
973 
974 restart:
975 	/* Find the best partition (set of sched domains) */
976 	for (i = 0; i < csn; i++) {
977 		struct cpuset *a = csa[i];
978 		int apn = a->pn;
979 
980 		for (j = 0; j < csn; j++) {
981 			struct cpuset *b = csa[j];
982 			int bpn = b->pn;
983 
984 			if (apn != bpn && cpusets_overlap(a, b)) {
985 				for (k = 0; k < csn; k++) {
986 					struct cpuset *c = csa[k];
987 
988 					if (c->pn == bpn)
989 						c->pn = apn;
990 				}
991 				ndoms--;	/* one less element */
992 				goto restart;
993 			}
994 		}
995 	}
996 
997 	/*
998 	 * Now we know how many domains to create.
999 	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1000 	 */
1001 	doms = alloc_sched_domains(ndoms);
1002 	if (!doms)
1003 		goto done;
1004 
1005 	/*
1006 	 * The rest of the code, including the scheduler, can deal with
1007 	 * dattr==NULL case. No need to abort if alloc fails.
1008 	 */
1009 	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1010 			      GFP_KERNEL);
1011 
1012 	for (nslot = 0, i = 0; i < csn; i++) {
1013 		struct cpuset *a = csa[i];
1014 		struct cpumask *dp;
1015 		int apn = a->pn;
1016 
1017 		if (apn < 0) {
1018 			/* Skip completed partitions */
1019 			continue;
1020 		}
1021 
1022 		dp = doms[nslot];
1023 
1024 		if (nslot == ndoms) {
1025 			static int warnings = 10;
1026 			if (warnings) {
1027 				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1028 					nslot, ndoms, csn, i, apn);
1029 				warnings--;
1030 			}
1031 			continue;
1032 		}
1033 
1034 		cpumask_clear(dp);
1035 		if (dattr)
1036 			*(dattr + nslot) = SD_ATTR_INIT;
1037 		for (j = i; j < csn; j++) {
1038 			struct cpuset *b = csa[j];
1039 
1040 			if (apn == b->pn) {
1041 				cpumask_or(dp, dp, b->effective_cpus);
1042 				cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1043 				if (dattr)
1044 					update_domain_attr_tree(dattr + nslot, b);
1045 
1046 				/* Done with this partition */
1047 				b->pn = -1;
1048 			}
1049 		}
1050 		nslot++;
1051 	}
1052 	BUG_ON(nslot != ndoms);
1053 
1054 done:
1055 	kfree(csa);
1056 
1057 	/*
1058 	 * Fallback to the default domain if kmalloc() failed.
1059 	 * See comments in partition_sched_domains().
1060 	 */
1061 	if (doms == NULL)
1062 		ndoms = 1;
1063 
1064 	*domains    = doms;
1065 	*attributes = dattr;
1066 	return ndoms;
1067 }
1068 
1069 static void dl_update_tasks_root_domain(struct cpuset *cs)
1070 {
1071 	struct css_task_iter it;
1072 	struct task_struct *task;
1073 
1074 	if (cs->nr_deadline_tasks == 0)
1075 		return;
1076 
1077 	css_task_iter_start(&cs->css, 0, &it);
1078 
1079 	while ((task = css_task_iter_next(&it)))
1080 		dl_add_task_root_domain(task);
1081 
1082 	css_task_iter_end(&it);
1083 }
1084 
1085 static void dl_rebuild_rd_accounting(void)
1086 {
1087 	struct cpuset *cs = NULL;
1088 	struct cgroup_subsys_state *pos_css;
1089 
1090 	lockdep_assert_held(&cpuset_mutex);
1091 	lockdep_assert_cpus_held();
1092 	lockdep_assert_held(&sched_domains_mutex);
1093 
1094 	rcu_read_lock();
1095 
1096 	/*
1097 	 * Clear default root domain DL accounting, it will be computed again
1098 	 * if a task belongs to it.
1099 	 */
1100 	dl_clear_root_domain(&def_root_domain);
1101 
1102 	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1103 
1104 		if (cpumask_empty(cs->effective_cpus)) {
1105 			pos_css = css_rightmost_descendant(pos_css);
1106 			continue;
1107 		}
1108 
1109 		css_get(&cs->css);
1110 
1111 		rcu_read_unlock();
1112 
1113 		dl_update_tasks_root_domain(cs);
1114 
1115 		rcu_read_lock();
1116 		css_put(&cs->css);
1117 	}
1118 	rcu_read_unlock();
1119 }
1120 
1121 static void
1122 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1123 				    struct sched_domain_attr *dattr_new)
1124 {
1125 	mutex_lock(&sched_domains_mutex);
1126 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1127 	dl_rebuild_rd_accounting();
1128 	mutex_unlock(&sched_domains_mutex);
1129 }
1130 
1131 /*
1132  * Rebuild scheduler domains.
1133  *
1134  * If the flag 'sched_load_balance' of any cpuset with non-empty
1135  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1136  * which has that flag enabled, or if any cpuset with a non-empty
1137  * 'cpus' is removed, then call this routine to rebuild the
1138  * scheduler's dynamic sched domains.
1139  *
1140  * Call with cpuset_mutex held.  Takes cpus_read_lock().
1141  */
1142 static void rebuild_sched_domains_locked(void)
1143 {
1144 	struct cgroup_subsys_state *pos_css;
1145 	struct sched_domain_attr *attr;
1146 	cpumask_var_t *doms;
1147 	struct cpuset *cs;
1148 	int ndoms;
1149 
1150 	lockdep_assert_cpus_held();
1151 	lockdep_assert_held(&cpuset_mutex);
1152 
1153 	/*
1154 	 * If we have raced with CPU hotplug, return early to avoid
1155 	 * passing doms with offlined cpu to partition_sched_domains().
1156 	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1157 	 *
1158 	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1159 	 * should be the same as the active CPUs, so checking only top_cpuset
1160 	 * is enough to detect racing CPU offlines.
1161 	 */
1162 	if (!top_cpuset.nr_subparts_cpus &&
1163 	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1164 		return;
1165 
1166 	/*
1167 	 * With subpartition CPUs, however, the effective CPUs of a partition
1168 	 * root should be only a subset of the active CPUs.  Since a CPU in any
1169 	 * partition root could be offlined, all must be checked.
1170 	 */
1171 	if (top_cpuset.nr_subparts_cpus) {
1172 		rcu_read_lock();
1173 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1174 			if (!is_partition_valid(cs)) {
1175 				pos_css = css_rightmost_descendant(pos_css);
1176 				continue;
1177 			}
1178 			if (!cpumask_subset(cs->effective_cpus,
1179 					    cpu_active_mask)) {
1180 				rcu_read_unlock();
1181 				return;
1182 			}
1183 		}
1184 		rcu_read_unlock();
1185 	}
1186 
1187 	/* Generate domain masks and attrs */
1188 	ndoms = generate_sched_domains(&doms, &attr);
1189 
1190 	/* Have scheduler rebuild the domains */
1191 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1192 }
1193 #else /* !CONFIG_SMP */
1194 static void rebuild_sched_domains_locked(void)
1195 {
1196 }
1197 #endif /* CONFIG_SMP */
1198 
1199 void rebuild_sched_domains(void)
1200 {
1201 	cpus_read_lock();
1202 	mutex_lock(&cpuset_mutex);
1203 	rebuild_sched_domains_locked();
1204 	mutex_unlock(&cpuset_mutex);
1205 	cpus_read_unlock();
1206 }
1207 
1208 /**
1209  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1210  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1211  * @new_cpus: the temp variable for the new effective_cpus mask
1212  *
1213  * Iterate through each task of @cs updating its cpus_allowed to the
1214  * effective cpuset's.  As this function is called with cpuset_mutex held,
1215  * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1216  * is used instead of effective_cpus to make sure all offline CPUs are also
1217  * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1218  */
1219 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1220 {
1221 	struct css_task_iter it;
1222 	struct task_struct *task;
1223 	bool top_cs = cs == &top_cpuset;
1224 
1225 	css_task_iter_start(&cs->css, 0, &it);
1226 	while ((task = css_task_iter_next(&it))) {
1227 		const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1228 
1229 		if (top_cs) {
1230 			/*
1231 			 * Percpu kthreads in top_cpuset are ignored
1232 			 */
1233 			if (kthread_is_per_cpu(task))
1234 				continue;
1235 			cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
1236 		} else {
1237 			cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1238 		}
1239 		set_cpus_allowed_ptr(task, new_cpus);
1240 	}
1241 	css_task_iter_end(&it);
1242 }
1243 
1244 /**
1245  * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1246  * @new_cpus: the temp variable for the new effective_cpus mask
1247  * @cs: the cpuset the need to recompute the new effective_cpus mask
1248  * @parent: the parent cpuset
1249  *
1250  * If the parent has subpartition CPUs, include them in the list of
1251  * allowable CPUs in computing the new effective_cpus mask. Since offlined
1252  * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1253  * to mask those out.
1254  */
1255 static void compute_effective_cpumask(struct cpumask *new_cpus,
1256 				      struct cpuset *cs, struct cpuset *parent)
1257 {
1258 	if (parent->nr_subparts_cpus && is_partition_valid(cs)) {
1259 		cpumask_or(new_cpus, parent->effective_cpus,
1260 			   parent->subparts_cpus);
1261 		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1262 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1263 	} else {
1264 		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1265 	}
1266 }
1267 
1268 /*
1269  * Commands for update_parent_subparts_cpumask
1270  */
1271 enum subparts_cmd {
1272 	partcmd_enable,		/* Enable partition root	 */
1273 	partcmd_disable,	/* Disable partition root	 */
1274 	partcmd_update,		/* Update parent's subparts_cpus */
1275 	partcmd_invalidate,	/* Make partition invalid	 */
1276 };
1277 
1278 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1279 		       int turning_on);
1280 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1281 				    struct tmpmasks *tmp);
1282 
1283 /*
1284  * Update partition exclusive flag
1285  *
1286  * Return: 0 if successful, an error code otherwise
1287  */
1288 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1289 {
1290 	bool exclusive = (new_prs > 0);
1291 
1292 	if (exclusive && !is_cpu_exclusive(cs)) {
1293 		if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1294 			return PERR_NOTEXCL;
1295 	} else if (!exclusive && is_cpu_exclusive(cs)) {
1296 		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1297 		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1298 	}
1299 	return 0;
1300 }
1301 
1302 /*
1303  * Update partition load balance flag and/or rebuild sched domain
1304  *
1305  * Changing load balance flag will automatically call
1306  * rebuild_sched_domains_locked().
1307  * This function is for cgroup v2 only.
1308  */
1309 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1310 {
1311 	int new_prs = cs->partition_root_state;
1312 	bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1313 	bool new_lb;
1314 
1315 	/*
1316 	 * If cs is not a valid partition root, the load balance state
1317 	 * will follow its parent.
1318 	 */
1319 	if (new_prs > 0) {
1320 		new_lb = (new_prs != PRS_ISOLATED);
1321 	} else {
1322 		new_lb = is_sched_load_balance(parent_cs(cs));
1323 	}
1324 	if (new_lb != !!is_sched_load_balance(cs)) {
1325 		rebuild_domains = true;
1326 		if (new_lb)
1327 			set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1328 		else
1329 			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1330 	}
1331 
1332 	if (rebuild_domains)
1333 		rebuild_sched_domains_locked();
1334 }
1335 
1336 /**
1337  * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1338  * @cs:      The cpuset that requests change in partition root state
1339  * @cmd:     Partition root state change command
1340  * @newmask: Optional new cpumask for partcmd_update
1341  * @tmp:     Temporary addmask and delmask
1342  * Return:   0 or a partition root state error code
1343  *
1344  * For partcmd_enable, the cpuset is being transformed from a non-partition
1345  * root to a partition root. The cpus_allowed mask of the given cpuset will
1346  * be put into parent's subparts_cpus and taken away from parent's
1347  * effective_cpus. The function will return 0 if all the CPUs listed in
1348  * cpus_allowed can be granted or an error code will be returned.
1349  *
1350  * For partcmd_disable, the cpuset is being transformed from a partition
1351  * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1352  * parent's subparts_cpus will be taken away from that cpumask and put back
1353  * into parent's effective_cpus. 0 will always be returned.
1354  *
1355  * For partcmd_update, if the optional newmask is specified, the cpu list is
1356  * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1357  * assumed to remain the same. The cpuset should either be a valid or invalid
1358  * partition root. The partition root state may change from valid to invalid
1359  * or vice versa. An error code will only be returned if transitioning from
1360  * invalid to valid violates the exclusivity rule.
1361  *
1362  * For partcmd_invalidate, the current partition will be made invalid.
1363  *
1364  * The partcmd_enable and partcmd_disable commands are used by
1365  * update_prstate(). An error code may be returned and the caller will check
1366  * for error.
1367  *
1368  * The partcmd_update command is used by update_cpumasks_hier() with newmask
1369  * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1370  * by update_cpumask() with NULL newmask. In both cases, the callers won't
1371  * check for error and so partition_root_state and prs_error will be updated
1372  * directly.
1373  */
1374 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1375 					  struct cpumask *newmask,
1376 					  struct tmpmasks *tmp)
1377 {
1378 	struct cpuset *parent = parent_cs(cs);
1379 	int adding;	/* Moving cpus from effective_cpus to subparts_cpus */
1380 	int deleting;	/* Moving cpus from subparts_cpus to effective_cpus */
1381 	int old_prs, new_prs;
1382 	int part_error = PERR_NONE;	/* Partition error? */
1383 
1384 	lockdep_assert_held(&cpuset_mutex);
1385 
1386 	/*
1387 	 * The parent must be a partition root.
1388 	 * The new cpumask, if present, or the current cpus_allowed must
1389 	 * not be empty.
1390 	 */
1391 	if (!is_partition_valid(parent)) {
1392 		return is_partition_invalid(parent)
1393 		       ? PERR_INVPARENT : PERR_NOTPART;
1394 	}
1395 	if (!newmask && cpumask_empty(cs->cpus_allowed))
1396 		return PERR_CPUSEMPTY;
1397 
1398 	/*
1399 	 * new_prs will only be changed for the partcmd_update and
1400 	 * partcmd_invalidate commands.
1401 	 */
1402 	adding = deleting = false;
1403 	old_prs = new_prs = cs->partition_root_state;
1404 	if (cmd == partcmd_enable) {
1405 		/*
1406 		 * Enabling partition root is not allowed if cpus_allowed
1407 		 * doesn't overlap parent's cpus_allowed.
1408 		 */
1409 		if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
1410 			return PERR_INVCPUS;
1411 
1412 		/*
1413 		 * A parent can be left with no CPU as long as there is no
1414 		 * task directly associated with the parent partition.
1415 		 */
1416 		if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1417 		    partition_is_populated(parent, cs))
1418 			return PERR_NOCPUS;
1419 
1420 		cpumask_copy(tmp->addmask, cs->cpus_allowed);
1421 		adding = true;
1422 	} else if (cmd == partcmd_disable) {
1423 		/*
1424 		 * Need to remove cpus from parent's subparts_cpus for valid
1425 		 * partition root.
1426 		 */
1427 		deleting = !is_prs_invalid(old_prs) &&
1428 			   cpumask_and(tmp->delmask, cs->cpus_allowed,
1429 				       parent->subparts_cpus);
1430 	} else if (cmd == partcmd_invalidate) {
1431 		if (is_prs_invalid(old_prs))
1432 			return 0;
1433 
1434 		/*
1435 		 * Make the current partition invalid. It is assumed that
1436 		 * invalidation is caused by violating cpu exclusivity rule.
1437 		 */
1438 		deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1439 				       parent->subparts_cpus);
1440 		if (old_prs > 0) {
1441 			new_prs = -old_prs;
1442 			part_error = PERR_NOTEXCL;
1443 		}
1444 	} else if (newmask) {
1445 		/*
1446 		 * partcmd_update with newmask:
1447 		 *
1448 		 * Compute add/delete mask to/from subparts_cpus
1449 		 *
1450 		 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1451 		 * addmask = newmask & parent->cpus_allowed
1452 		 *		     & ~parent->subparts_cpus
1453 		 */
1454 		cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1455 		deleting = cpumask_and(tmp->delmask, tmp->delmask,
1456 				       parent->subparts_cpus);
1457 
1458 		cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1459 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1460 					parent->subparts_cpus);
1461 		/*
1462 		 * Empty cpumask is not allowed
1463 		 */
1464 		if (cpumask_empty(newmask)) {
1465 			part_error = PERR_CPUSEMPTY;
1466 		/*
1467 		 * Make partition invalid if parent's effective_cpus could
1468 		 * become empty and there are tasks in the parent.
1469 		 */
1470 		} else if (adding &&
1471 		    cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1472 		    !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1473 		    partition_is_populated(parent, cs)) {
1474 			part_error = PERR_NOCPUS;
1475 			adding = false;
1476 			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1477 					       parent->subparts_cpus);
1478 		}
1479 	} else {
1480 		/*
1481 		 * partcmd_update w/o newmask:
1482 		 *
1483 		 * delmask = cpus_allowed & parent->subparts_cpus
1484 		 * addmask = cpus_allowed & parent->cpus_allowed
1485 		 *			  & ~parent->subparts_cpus
1486 		 *
1487 		 * This gets invoked either due to a hotplug event or from
1488 		 * update_cpumasks_hier(). This can cause the state of a
1489 		 * partition root to transition from valid to invalid or vice
1490 		 * versa. So we still need to compute the addmask and delmask.
1491 
1492 		 * A partition error happens when:
1493 		 * 1) Cpuset is valid partition, but parent does not distribute
1494 		 *    out any CPUs.
1495 		 * 2) Parent has tasks and all its effective CPUs will have
1496 		 *    to be distributed out.
1497 		 */
1498 		cpumask_and(tmp->addmask, cs->cpus_allowed,
1499 					  parent->cpus_allowed);
1500 		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1501 					parent->subparts_cpus);
1502 
1503 		if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1504 		    (adding &&
1505 		     cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1506 		     partition_is_populated(parent, cs))) {
1507 			part_error = PERR_NOCPUS;
1508 			adding = false;
1509 		}
1510 
1511 		if (part_error && is_partition_valid(cs) &&
1512 		    parent->nr_subparts_cpus)
1513 			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1514 					       parent->subparts_cpus);
1515 	}
1516 	if (part_error)
1517 		WRITE_ONCE(cs->prs_err, part_error);
1518 
1519 	if (cmd == partcmd_update) {
1520 		/*
1521 		 * Check for possible transition between valid and invalid
1522 		 * partition root.
1523 		 */
1524 		switch (cs->partition_root_state) {
1525 		case PRS_ROOT:
1526 		case PRS_ISOLATED:
1527 			if (part_error)
1528 				new_prs = -old_prs;
1529 			break;
1530 		case PRS_INVALID_ROOT:
1531 		case PRS_INVALID_ISOLATED:
1532 			if (!part_error)
1533 				new_prs = -old_prs;
1534 			break;
1535 		}
1536 	}
1537 
1538 	if (!adding && !deleting && (new_prs == old_prs))
1539 		return 0;
1540 
1541 	/*
1542 	 * Transitioning between invalid to valid or vice versa may require
1543 	 * changing CS_CPU_EXCLUSIVE.
1544 	 */
1545 	if (old_prs != new_prs) {
1546 		int err = update_partition_exclusive(cs, new_prs);
1547 
1548 		if (err)
1549 			return err;
1550 	}
1551 
1552 	/*
1553 	 * Change the parent's subparts_cpus.
1554 	 * Newly added CPUs will be removed from effective_cpus and
1555 	 * newly deleted ones will be added back to effective_cpus.
1556 	 */
1557 	spin_lock_irq(&callback_lock);
1558 	if (adding) {
1559 		cpumask_or(parent->subparts_cpus,
1560 			   parent->subparts_cpus, tmp->addmask);
1561 		cpumask_andnot(parent->effective_cpus,
1562 			       parent->effective_cpus, tmp->addmask);
1563 	}
1564 	if (deleting) {
1565 		cpumask_andnot(parent->subparts_cpus,
1566 			       parent->subparts_cpus, tmp->delmask);
1567 		/*
1568 		 * Some of the CPUs in subparts_cpus might have been offlined.
1569 		 */
1570 		cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1571 		cpumask_or(parent->effective_cpus,
1572 			   parent->effective_cpus, tmp->delmask);
1573 	}
1574 
1575 	parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1576 
1577 	if (old_prs != new_prs)
1578 		cs->partition_root_state = new_prs;
1579 
1580 	spin_unlock_irq(&callback_lock);
1581 
1582 	if (adding || deleting) {
1583 		update_tasks_cpumask(parent, tmp->addmask);
1584 		if (parent->child_ecpus_count)
1585 			update_sibling_cpumasks(parent, cs, tmp);
1586 	}
1587 
1588 	/*
1589 	 * For partcmd_update without newmask, it is being called from
1590 	 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
1591 	 * Update the load balance flag and scheduling domain if
1592 	 * cpus_read_trylock() is successful.
1593 	 */
1594 	if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
1595 		update_partition_sd_lb(cs, old_prs);
1596 		cpus_read_unlock();
1597 	}
1598 
1599 	notify_partition_change(cs, old_prs);
1600 	return 0;
1601 }
1602 
1603 /*
1604  * update_cpumasks_hier() flags
1605  */
1606 #define HIER_CHECKALL		0x01	/* Check all cpusets with no skipping */
1607 #define HIER_NO_SD_REBUILD	0x02	/* Don't rebuild sched domains */
1608 
1609 /*
1610  * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1611  * @cs:  the cpuset to consider
1612  * @tmp: temp variables for calculating effective_cpus & partition setup
1613  * @force: don't skip any descendant cpusets if set
1614  *
1615  * When configured cpumask is changed, the effective cpumasks of this cpuset
1616  * and all its descendants need to be updated.
1617  *
1618  * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1619  *
1620  * Called with cpuset_mutex held
1621  */
1622 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1623 				 int flags)
1624 {
1625 	struct cpuset *cp;
1626 	struct cgroup_subsys_state *pos_css;
1627 	bool need_rebuild_sched_domains = false;
1628 	int old_prs, new_prs;
1629 
1630 	rcu_read_lock();
1631 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1632 		struct cpuset *parent = parent_cs(cp);
1633 		bool update_parent = false;
1634 
1635 		compute_effective_cpumask(tmp->new_cpus, cp, parent);
1636 
1637 		/*
1638 		 * If it becomes empty, inherit the effective mask of the
1639 		 * parent, which is guaranteed to have some CPUs unless
1640 		 * it is a partition root that has explicitly distributed
1641 		 * out all its CPUs.
1642 		 */
1643 		if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1644 			if (is_partition_valid(cp) &&
1645 			    cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1646 				goto update_parent_subparts;
1647 
1648 			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1649 			if (!cp->use_parent_ecpus) {
1650 				cp->use_parent_ecpus = true;
1651 				parent->child_ecpus_count++;
1652 			}
1653 		} else if (cp->use_parent_ecpus) {
1654 			cp->use_parent_ecpus = false;
1655 			WARN_ON_ONCE(!parent->child_ecpus_count);
1656 			parent->child_ecpus_count--;
1657 		}
1658 
1659 		/*
1660 		 * Skip the whole subtree if
1661 		 * 1) the cpumask remains the same,
1662 		 * 2) has no partition root state,
1663 		 * 3) HIER_CHECKALL flag not set, and
1664 		 * 4) for v2 load balance state same as its parent.
1665 		 */
1666 		if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
1667 		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
1668 		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1669 		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
1670 			pos_css = css_rightmost_descendant(pos_css);
1671 			continue;
1672 		}
1673 
1674 update_parent_subparts:
1675 		/*
1676 		 * update_parent_subparts_cpumask() should have been called
1677 		 * for cs already in update_cpumask(). We should also call
1678 		 * update_tasks_cpumask() again for tasks in the parent
1679 		 * cpuset if the parent's subparts_cpus changes.
1680 		 */
1681 		old_prs = new_prs = cp->partition_root_state;
1682 		if ((cp != cs) && old_prs) {
1683 			switch (parent->partition_root_state) {
1684 			case PRS_ROOT:
1685 			case PRS_ISOLATED:
1686 				update_parent = true;
1687 				break;
1688 
1689 			default:
1690 				/*
1691 				 * When parent is not a partition root or is
1692 				 * invalid, child partition roots become
1693 				 * invalid too.
1694 				 */
1695 				if (is_partition_valid(cp))
1696 					new_prs = -cp->partition_root_state;
1697 				WRITE_ONCE(cp->prs_err,
1698 					   is_partition_invalid(parent)
1699 					   ? PERR_INVPARENT : PERR_NOTPART);
1700 				break;
1701 			}
1702 		}
1703 
1704 		if (!css_tryget_online(&cp->css))
1705 			continue;
1706 		rcu_read_unlock();
1707 
1708 		if (update_parent) {
1709 			update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1710 						       tmp);
1711 			/*
1712 			 * The cpuset partition_root_state may become
1713 			 * invalid. Capture it.
1714 			 */
1715 			new_prs = cp->partition_root_state;
1716 		}
1717 
1718 		spin_lock_irq(&callback_lock);
1719 
1720 		if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1721 			/*
1722 			 * Put all active subparts_cpus back to effective_cpus.
1723 			 */
1724 			cpumask_or(tmp->new_cpus, tmp->new_cpus,
1725 				   cp->subparts_cpus);
1726 			cpumask_and(tmp->new_cpus, tmp->new_cpus,
1727 				   cpu_active_mask);
1728 			cp->nr_subparts_cpus = 0;
1729 			cpumask_clear(cp->subparts_cpus);
1730 		}
1731 
1732 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1733 		if (cp->nr_subparts_cpus) {
1734 			/*
1735 			 * Make sure that effective_cpus & subparts_cpus
1736 			 * are mutually exclusive.
1737 			 */
1738 			cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1739 				       cp->subparts_cpus);
1740 		}
1741 
1742 		cp->partition_root_state = new_prs;
1743 		spin_unlock_irq(&callback_lock);
1744 
1745 		notify_partition_change(cp, old_prs);
1746 
1747 		WARN_ON(!is_in_v2_mode() &&
1748 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1749 
1750 		update_tasks_cpumask(cp, tmp->new_cpus);
1751 
1752 		/*
1753 		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
1754 		 * from parent if current cpuset isn't a valid partition root
1755 		 * and their load balance states differ.
1756 		 */
1757 		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1758 		    !is_partition_valid(cp) &&
1759 		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
1760 			if (is_sched_load_balance(parent))
1761 				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1762 			else
1763 				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1764 		}
1765 
1766 		/*
1767 		 * On legacy hierarchy, if the effective cpumask of any non-
1768 		 * empty cpuset is changed, we need to rebuild sched domains.
1769 		 * On default hierarchy, the cpuset needs to be a partition
1770 		 * root as well.
1771 		 */
1772 		if (!cpumask_empty(cp->cpus_allowed) &&
1773 		    is_sched_load_balance(cp) &&
1774 		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1775 		    is_partition_valid(cp)))
1776 			need_rebuild_sched_domains = true;
1777 
1778 		rcu_read_lock();
1779 		css_put(&cp->css);
1780 	}
1781 	rcu_read_unlock();
1782 
1783 	if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
1784 		rebuild_sched_domains_locked();
1785 }
1786 
1787 /**
1788  * update_sibling_cpumasks - Update siblings cpumasks
1789  * @parent:  Parent cpuset
1790  * @cs:      Current cpuset
1791  * @tmp:     Temp variables
1792  */
1793 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1794 				    struct tmpmasks *tmp)
1795 {
1796 	struct cpuset *sibling;
1797 	struct cgroup_subsys_state *pos_css;
1798 
1799 	lockdep_assert_held(&cpuset_mutex);
1800 
1801 	/*
1802 	 * Check all its siblings and call update_cpumasks_hier()
1803 	 * if their use_parent_ecpus flag is set in order for them
1804 	 * to use the right effective_cpus value.
1805 	 *
1806 	 * The update_cpumasks_hier() function may sleep. So we have to
1807 	 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
1808 	 * flag is used to suppress rebuild of sched domains as the callers
1809 	 * will take care of that.
1810 	 */
1811 	rcu_read_lock();
1812 	cpuset_for_each_child(sibling, pos_css, parent) {
1813 		if (sibling == cs)
1814 			continue;
1815 		if (!sibling->use_parent_ecpus)
1816 			continue;
1817 		if (!css_tryget_online(&sibling->css))
1818 			continue;
1819 
1820 		rcu_read_unlock();
1821 		update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
1822 		rcu_read_lock();
1823 		css_put(&sibling->css);
1824 	}
1825 	rcu_read_unlock();
1826 }
1827 
1828 /**
1829  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1830  * @cs: the cpuset to consider
1831  * @trialcs: trial cpuset
1832  * @buf: buffer of cpu numbers written to this cpuset
1833  */
1834 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1835 			  const char *buf)
1836 {
1837 	int retval;
1838 	struct tmpmasks tmp;
1839 	bool invalidate = false;
1840 	int old_prs = cs->partition_root_state;
1841 
1842 	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1843 	if (cs == &top_cpuset)
1844 		return -EACCES;
1845 
1846 	/*
1847 	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1848 	 * Since cpulist_parse() fails on an empty mask, we special case
1849 	 * that parsing.  The validate_change() call ensures that cpusets
1850 	 * with tasks have cpus.
1851 	 */
1852 	if (!*buf) {
1853 		cpumask_clear(trialcs->cpus_allowed);
1854 	} else {
1855 		retval = cpulist_parse(buf, trialcs->cpus_allowed);
1856 		if (retval < 0)
1857 			return retval;
1858 
1859 		if (!cpumask_subset(trialcs->cpus_allowed,
1860 				    top_cpuset.cpus_allowed))
1861 			return -EINVAL;
1862 	}
1863 
1864 	/* Nothing to do if the cpus didn't change */
1865 	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1866 		return 0;
1867 
1868 	if (alloc_cpumasks(NULL, &tmp))
1869 		return -ENOMEM;
1870 
1871 	retval = validate_change(cs, trialcs);
1872 
1873 	if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1874 		struct cpuset *cp, *parent;
1875 		struct cgroup_subsys_state *css;
1876 
1877 		/*
1878 		 * The -EINVAL error code indicates that partition sibling
1879 		 * CPU exclusivity rule has been violated. We still allow
1880 		 * the cpumask change to proceed while invalidating the
1881 		 * partition. However, any conflicting sibling partitions
1882 		 * have to be marked as invalid too.
1883 		 */
1884 		invalidate = true;
1885 		rcu_read_lock();
1886 		parent = parent_cs(cs);
1887 		cpuset_for_each_child(cp, css, parent)
1888 			if (is_partition_valid(cp) &&
1889 			    cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1890 				rcu_read_unlock();
1891 				update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1892 				rcu_read_lock();
1893 			}
1894 		rcu_read_unlock();
1895 		retval = 0;
1896 	}
1897 	if (retval < 0)
1898 		goto out_free;
1899 
1900 	if (cs->partition_root_state) {
1901 		if (invalidate)
1902 			update_parent_subparts_cpumask(cs, partcmd_invalidate,
1903 						       NULL, &tmp);
1904 		else
1905 			update_parent_subparts_cpumask(cs, partcmd_update,
1906 						trialcs->cpus_allowed, &tmp);
1907 	}
1908 
1909 	compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1910 				  parent_cs(cs));
1911 	spin_lock_irq(&callback_lock);
1912 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1913 
1914 	/*
1915 	 * Make sure that subparts_cpus, if not empty, is a subset of
1916 	 * cpus_allowed. Clear subparts_cpus if partition not valid or
1917 	 * empty effective cpus with tasks.
1918 	 */
1919 	if (cs->nr_subparts_cpus) {
1920 		if (!is_partition_valid(cs) ||
1921 		   (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1922 		    partition_is_populated(cs, NULL))) {
1923 			cs->nr_subparts_cpus = 0;
1924 			cpumask_clear(cs->subparts_cpus);
1925 		} else {
1926 			cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1927 				    cs->cpus_allowed);
1928 			cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1929 		}
1930 	}
1931 	spin_unlock_irq(&callback_lock);
1932 
1933 	/* effective_cpus will be updated here */
1934 	update_cpumasks_hier(cs, &tmp, 0);
1935 
1936 	if (cs->partition_root_state) {
1937 		struct cpuset *parent = parent_cs(cs);
1938 
1939 		/*
1940 		 * For partition root, update the cpumasks of sibling
1941 		 * cpusets if they use parent's effective_cpus.
1942 		 */
1943 		if (parent->child_ecpus_count)
1944 			update_sibling_cpumasks(parent, cs, &tmp);
1945 
1946 		/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
1947 		update_partition_sd_lb(cs, old_prs);
1948 	}
1949 out_free:
1950 	free_cpumasks(NULL, &tmp);
1951 	return 0;
1952 }
1953 
1954 /*
1955  * Migrate memory region from one set of nodes to another.  This is
1956  * performed asynchronously as it can be called from process migration path
1957  * holding locks involved in process management.  All mm migrations are
1958  * performed in the queued order and can be waited for by flushing
1959  * cpuset_migrate_mm_wq.
1960  */
1961 
1962 struct cpuset_migrate_mm_work {
1963 	struct work_struct	work;
1964 	struct mm_struct	*mm;
1965 	nodemask_t		from;
1966 	nodemask_t		to;
1967 };
1968 
1969 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1970 {
1971 	struct cpuset_migrate_mm_work *mwork =
1972 		container_of(work, struct cpuset_migrate_mm_work, work);
1973 
1974 	/* on a wq worker, no need to worry about %current's mems_allowed */
1975 	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1976 	mmput(mwork->mm);
1977 	kfree(mwork);
1978 }
1979 
1980 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1981 							const nodemask_t *to)
1982 {
1983 	struct cpuset_migrate_mm_work *mwork;
1984 
1985 	if (nodes_equal(*from, *to)) {
1986 		mmput(mm);
1987 		return;
1988 	}
1989 
1990 	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1991 	if (mwork) {
1992 		mwork->mm = mm;
1993 		mwork->from = *from;
1994 		mwork->to = *to;
1995 		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1996 		queue_work(cpuset_migrate_mm_wq, &mwork->work);
1997 	} else {
1998 		mmput(mm);
1999 	}
2000 }
2001 
2002 static void cpuset_post_attach(void)
2003 {
2004 	flush_workqueue(cpuset_migrate_mm_wq);
2005 }
2006 
2007 /*
2008  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2009  * @tsk: the task to change
2010  * @newmems: new nodes that the task will be set
2011  *
2012  * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2013  * and rebind an eventual tasks' mempolicy. If the task is allocating in
2014  * parallel, it might temporarily see an empty intersection, which results in
2015  * a seqlock check and retry before OOM or allocation failure.
2016  */
2017 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2018 					nodemask_t *newmems)
2019 {
2020 	task_lock(tsk);
2021 
2022 	local_irq_disable();
2023 	write_seqcount_begin(&tsk->mems_allowed_seq);
2024 
2025 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2026 	mpol_rebind_task(tsk, newmems);
2027 	tsk->mems_allowed = *newmems;
2028 
2029 	write_seqcount_end(&tsk->mems_allowed_seq);
2030 	local_irq_enable();
2031 
2032 	task_unlock(tsk);
2033 }
2034 
2035 static void *cpuset_being_rebound;
2036 
2037 /**
2038  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2039  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2040  *
2041  * Iterate through each task of @cs updating its mems_allowed to the
2042  * effective cpuset's.  As this function is called with cpuset_mutex held,
2043  * cpuset membership stays stable.
2044  */
2045 static void update_tasks_nodemask(struct cpuset *cs)
2046 {
2047 	static nodemask_t newmems;	/* protected by cpuset_mutex */
2048 	struct css_task_iter it;
2049 	struct task_struct *task;
2050 
2051 	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2052 
2053 	guarantee_online_mems(cs, &newmems);
2054 
2055 	/*
2056 	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2057 	 * take while holding tasklist_lock.  Forks can happen - the
2058 	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2059 	 * and rebind their vma mempolicies too.  Because we still hold
2060 	 * the global cpuset_mutex, we know that no other rebind effort
2061 	 * will be contending for the global variable cpuset_being_rebound.
2062 	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2063 	 * is idempotent.  Also migrate pages in each mm to new nodes.
2064 	 */
2065 	css_task_iter_start(&cs->css, 0, &it);
2066 	while ((task = css_task_iter_next(&it))) {
2067 		struct mm_struct *mm;
2068 		bool migrate;
2069 
2070 		cpuset_change_task_nodemask(task, &newmems);
2071 
2072 		mm = get_task_mm(task);
2073 		if (!mm)
2074 			continue;
2075 
2076 		migrate = is_memory_migrate(cs);
2077 
2078 		mpol_rebind_mm(mm, &cs->mems_allowed);
2079 		if (migrate)
2080 			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2081 		else
2082 			mmput(mm);
2083 	}
2084 	css_task_iter_end(&it);
2085 
2086 	/*
2087 	 * All the tasks' nodemasks have been updated, update
2088 	 * cs->old_mems_allowed.
2089 	 */
2090 	cs->old_mems_allowed = newmems;
2091 
2092 	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2093 	cpuset_being_rebound = NULL;
2094 }
2095 
2096 /*
2097  * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2098  * @cs: the cpuset to consider
2099  * @new_mems: a temp variable for calculating new effective_mems
2100  *
2101  * When configured nodemask is changed, the effective nodemasks of this cpuset
2102  * and all its descendants need to be updated.
2103  *
2104  * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2105  *
2106  * Called with cpuset_mutex held
2107  */
2108 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2109 {
2110 	struct cpuset *cp;
2111 	struct cgroup_subsys_state *pos_css;
2112 
2113 	rcu_read_lock();
2114 	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2115 		struct cpuset *parent = parent_cs(cp);
2116 
2117 		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2118 
2119 		/*
2120 		 * If it becomes empty, inherit the effective mask of the
2121 		 * parent, which is guaranteed to have some MEMs.
2122 		 */
2123 		if (is_in_v2_mode() && nodes_empty(*new_mems))
2124 			*new_mems = parent->effective_mems;
2125 
2126 		/* Skip the whole subtree if the nodemask remains the same. */
2127 		if (nodes_equal(*new_mems, cp->effective_mems)) {
2128 			pos_css = css_rightmost_descendant(pos_css);
2129 			continue;
2130 		}
2131 
2132 		if (!css_tryget_online(&cp->css))
2133 			continue;
2134 		rcu_read_unlock();
2135 
2136 		spin_lock_irq(&callback_lock);
2137 		cp->effective_mems = *new_mems;
2138 		spin_unlock_irq(&callback_lock);
2139 
2140 		WARN_ON(!is_in_v2_mode() &&
2141 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2142 
2143 		update_tasks_nodemask(cp);
2144 
2145 		rcu_read_lock();
2146 		css_put(&cp->css);
2147 	}
2148 	rcu_read_unlock();
2149 }
2150 
2151 /*
2152  * Handle user request to change the 'mems' memory placement
2153  * of a cpuset.  Needs to validate the request, update the
2154  * cpusets mems_allowed, and for each task in the cpuset,
2155  * update mems_allowed and rebind task's mempolicy and any vma
2156  * mempolicies and if the cpuset is marked 'memory_migrate',
2157  * migrate the tasks pages to the new memory.
2158  *
2159  * Call with cpuset_mutex held. May take callback_lock during call.
2160  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2161  * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2162  * their mempolicies to the cpusets new mems_allowed.
2163  */
2164 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2165 			   const char *buf)
2166 {
2167 	int retval;
2168 
2169 	/*
2170 	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2171 	 * it's read-only
2172 	 */
2173 	if (cs == &top_cpuset) {
2174 		retval = -EACCES;
2175 		goto done;
2176 	}
2177 
2178 	/*
2179 	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2180 	 * Since nodelist_parse() fails on an empty mask, we special case
2181 	 * that parsing.  The validate_change() call ensures that cpusets
2182 	 * with tasks have memory.
2183 	 */
2184 	if (!*buf) {
2185 		nodes_clear(trialcs->mems_allowed);
2186 	} else {
2187 		retval = nodelist_parse(buf, trialcs->mems_allowed);
2188 		if (retval < 0)
2189 			goto done;
2190 
2191 		if (!nodes_subset(trialcs->mems_allowed,
2192 				  top_cpuset.mems_allowed)) {
2193 			retval = -EINVAL;
2194 			goto done;
2195 		}
2196 	}
2197 
2198 	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2199 		retval = 0;		/* Too easy - nothing to do */
2200 		goto done;
2201 	}
2202 	retval = validate_change(cs, trialcs);
2203 	if (retval < 0)
2204 		goto done;
2205 
2206 	check_insane_mems_config(&trialcs->mems_allowed);
2207 
2208 	spin_lock_irq(&callback_lock);
2209 	cs->mems_allowed = trialcs->mems_allowed;
2210 	spin_unlock_irq(&callback_lock);
2211 
2212 	/* use trialcs->mems_allowed as a temp variable */
2213 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2214 done:
2215 	return retval;
2216 }
2217 
2218 bool current_cpuset_is_being_rebound(void)
2219 {
2220 	bool ret;
2221 
2222 	rcu_read_lock();
2223 	ret = task_cs(current) == cpuset_being_rebound;
2224 	rcu_read_unlock();
2225 
2226 	return ret;
2227 }
2228 
2229 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2230 {
2231 #ifdef CONFIG_SMP
2232 	if (val < -1 || val >= sched_domain_level_max)
2233 		return -EINVAL;
2234 #endif
2235 
2236 	if (val != cs->relax_domain_level) {
2237 		cs->relax_domain_level = val;
2238 		if (!cpumask_empty(cs->cpus_allowed) &&
2239 		    is_sched_load_balance(cs))
2240 			rebuild_sched_domains_locked();
2241 	}
2242 
2243 	return 0;
2244 }
2245 
2246 /**
2247  * update_tasks_flags - update the spread flags of tasks in the cpuset.
2248  * @cs: the cpuset in which each task's spread flags needs to be changed
2249  *
2250  * Iterate through each task of @cs updating its spread flags.  As this
2251  * function is called with cpuset_mutex held, cpuset membership stays
2252  * stable.
2253  */
2254 static void update_tasks_flags(struct cpuset *cs)
2255 {
2256 	struct css_task_iter it;
2257 	struct task_struct *task;
2258 
2259 	css_task_iter_start(&cs->css, 0, &it);
2260 	while ((task = css_task_iter_next(&it)))
2261 		cpuset_update_task_spread_flags(cs, task);
2262 	css_task_iter_end(&it);
2263 }
2264 
2265 /*
2266  * update_flag - read a 0 or a 1 in a file and update associated flag
2267  * bit:		the bit to update (see cpuset_flagbits_t)
2268  * cs:		the cpuset to update
2269  * turning_on: 	whether the flag is being set or cleared
2270  *
2271  * Call with cpuset_mutex held.
2272  */
2273 
2274 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2275 		       int turning_on)
2276 {
2277 	struct cpuset *trialcs;
2278 	int balance_flag_changed;
2279 	int spread_flag_changed;
2280 	int err;
2281 
2282 	trialcs = alloc_trial_cpuset(cs);
2283 	if (!trialcs)
2284 		return -ENOMEM;
2285 
2286 	if (turning_on)
2287 		set_bit(bit, &trialcs->flags);
2288 	else
2289 		clear_bit(bit, &trialcs->flags);
2290 
2291 	err = validate_change(cs, trialcs);
2292 	if (err < 0)
2293 		goto out;
2294 
2295 	balance_flag_changed = (is_sched_load_balance(cs) !=
2296 				is_sched_load_balance(trialcs));
2297 
2298 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2299 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2300 
2301 	spin_lock_irq(&callback_lock);
2302 	cs->flags = trialcs->flags;
2303 	spin_unlock_irq(&callback_lock);
2304 
2305 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2306 		rebuild_sched_domains_locked();
2307 
2308 	if (spread_flag_changed)
2309 		update_tasks_flags(cs);
2310 out:
2311 	free_cpuset(trialcs);
2312 	return err;
2313 }
2314 
2315 /**
2316  * update_prstate - update partition_root_state
2317  * @cs: the cpuset to update
2318  * @new_prs: new partition root state
2319  * Return: 0 if successful, != 0 if error
2320  *
2321  * Call with cpuset_mutex held.
2322  */
2323 static int update_prstate(struct cpuset *cs, int new_prs)
2324 {
2325 	int err = PERR_NONE, old_prs = cs->partition_root_state;
2326 	struct cpuset *parent = parent_cs(cs);
2327 	struct tmpmasks tmpmask;
2328 
2329 	if (old_prs == new_prs)
2330 		return 0;
2331 
2332 	/*
2333 	 * For a previously invalid partition root, leave it at being
2334 	 * invalid if new_prs is not "member".
2335 	 */
2336 	if (new_prs && is_prs_invalid(old_prs)) {
2337 		cs->partition_root_state = -new_prs;
2338 		return 0;
2339 	}
2340 
2341 	if (alloc_cpumasks(NULL, &tmpmask))
2342 		return -ENOMEM;
2343 
2344 	err = update_partition_exclusive(cs, new_prs);
2345 	if (err)
2346 		goto out;
2347 
2348 	if (!old_prs) {
2349 		/*
2350 		 * cpus_allowed cannot be empty.
2351 		 */
2352 		if (cpumask_empty(cs->cpus_allowed)) {
2353 			err = PERR_CPUSEMPTY;
2354 			goto out;
2355 		}
2356 
2357 		err = update_parent_subparts_cpumask(cs, partcmd_enable,
2358 						     NULL, &tmpmask);
2359 	} else if (old_prs && new_prs) {
2360 		/*
2361 		 * A change in load balance state only, no change in cpumasks.
2362 		 */
2363 		;
2364 	} else {
2365 		/*
2366 		 * Switching back to member is always allowed even if it
2367 		 * disables child partitions.
2368 		 */
2369 		update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2370 					       &tmpmask);
2371 
2372 		/*
2373 		 * If there are child partitions, they will all become invalid.
2374 		 */
2375 		if (unlikely(cs->nr_subparts_cpus)) {
2376 			spin_lock_irq(&callback_lock);
2377 			cs->nr_subparts_cpus = 0;
2378 			cpumask_clear(cs->subparts_cpus);
2379 			compute_effective_cpumask(cs->effective_cpus, cs, parent);
2380 			spin_unlock_irq(&callback_lock);
2381 		}
2382 	}
2383 out:
2384 	/*
2385 	 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2386 	 * happens.
2387 	 */
2388 	if (err) {
2389 		new_prs = -new_prs;
2390 		update_partition_exclusive(cs, new_prs);
2391 	}
2392 
2393 	spin_lock_irq(&callback_lock);
2394 	cs->partition_root_state = new_prs;
2395 	WRITE_ONCE(cs->prs_err, err);
2396 	spin_unlock_irq(&callback_lock);
2397 
2398 	/*
2399 	 * Update child cpusets, if present.
2400 	 * Force update if switching back to member.
2401 	 */
2402 	if (!list_empty(&cs->css.children))
2403 		update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
2404 
2405 	/* Update sched domains and load balance flag */
2406 	update_partition_sd_lb(cs, old_prs);
2407 
2408 	notify_partition_change(cs, old_prs);
2409 	free_cpumasks(NULL, &tmpmask);
2410 	return 0;
2411 }
2412 
2413 /*
2414  * Frequency meter - How fast is some event occurring?
2415  *
2416  * These routines manage a digitally filtered, constant time based,
2417  * event frequency meter.  There are four routines:
2418  *   fmeter_init() - initialize a frequency meter.
2419  *   fmeter_markevent() - called each time the event happens.
2420  *   fmeter_getrate() - returns the recent rate of such events.
2421  *   fmeter_update() - internal routine used to update fmeter.
2422  *
2423  * A common data structure is passed to each of these routines,
2424  * which is used to keep track of the state required to manage the
2425  * frequency meter and its digital filter.
2426  *
2427  * The filter works on the number of events marked per unit time.
2428  * The filter is single-pole low-pass recursive (IIR).  The time unit
2429  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2430  * simulate 3 decimal digits of precision (multiplied by 1000).
2431  *
2432  * With an FM_COEF of 933, and a time base of 1 second, the filter
2433  * has a half-life of 10 seconds, meaning that if the events quit
2434  * happening, then the rate returned from the fmeter_getrate()
2435  * will be cut in half each 10 seconds, until it converges to zero.
2436  *
2437  * It is not worth doing a real infinitely recursive filter.  If more
2438  * than FM_MAXTICKS ticks have elapsed since the last filter event,
2439  * just compute FM_MAXTICKS ticks worth, by which point the level
2440  * will be stable.
2441  *
2442  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2443  * arithmetic overflow in the fmeter_update() routine.
2444  *
2445  * Given the simple 32 bit integer arithmetic used, this meter works
2446  * best for reporting rates between one per millisecond (msec) and
2447  * one per 32 (approx) seconds.  At constant rates faster than one
2448  * per msec it maxes out at values just under 1,000,000.  At constant
2449  * rates between one per msec, and one per second it will stabilize
2450  * to a value N*1000, where N is the rate of events per second.
2451  * At constant rates between one per second and one per 32 seconds,
2452  * it will be choppy, moving up on the seconds that have an event,
2453  * and then decaying until the next event.  At rates slower than
2454  * about one in 32 seconds, it decays all the way back to zero between
2455  * each event.
2456  */
2457 
2458 #define FM_COEF 933		/* coefficient for half-life of 10 secs */
2459 #define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
2460 #define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
2461 #define FM_SCALE 1000		/* faux fixed point scale */
2462 
2463 /* Initialize a frequency meter */
2464 static void fmeter_init(struct fmeter *fmp)
2465 {
2466 	fmp->cnt = 0;
2467 	fmp->val = 0;
2468 	fmp->time = 0;
2469 	spin_lock_init(&fmp->lock);
2470 }
2471 
2472 /* Internal meter update - process cnt events and update value */
2473 static void fmeter_update(struct fmeter *fmp)
2474 {
2475 	time64_t now;
2476 	u32 ticks;
2477 
2478 	now = ktime_get_seconds();
2479 	ticks = now - fmp->time;
2480 
2481 	if (ticks == 0)
2482 		return;
2483 
2484 	ticks = min(FM_MAXTICKS, ticks);
2485 	while (ticks-- > 0)
2486 		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2487 	fmp->time = now;
2488 
2489 	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2490 	fmp->cnt = 0;
2491 }
2492 
2493 /* Process any previous ticks, then bump cnt by one (times scale). */
2494 static void fmeter_markevent(struct fmeter *fmp)
2495 {
2496 	spin_lock(&fmp->lock);
2497 	fmeter_update(fmp);
2498 	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2499 	spin_unlock(&fmp->lock);
2500 }
2501 
2502 /* Process any previous ticks, then return current value. */
2503 static int fmeter_getrate(struct fmeter *fmp)
2504 {
2505 	int val;
2506 
2507 	spin_lock(&fmp->lock);
2508 	fmeter_update(fmp);
2509 	val = fmp->val;
2510 	spin_unlock(&fmp->lock);
2511 	return val;
2512 }
2513 
2514 static struct cpuset *cpuset_attach_old_cs;
2515 
2516 /*
2517  * Check to see if a cpuset can accept a new task
2518  * For v1, cpus_allowed and mems_allowed can't be empty.
2519  * For v2, effective_cpus can't be empty.
2520  * Note that in v1, effective_cpus = cpus_allowed.
2521  */
2522 static int cpuset_can_attach_check(struct cpuset *cs)
2523 {
2524 	if (cpumask_empty(cs->effective_cpus) ||
2525 	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2526 		return -ENOSPC;
2527 	return 0;
2528 }
2529 
2530 static void reset_migrate_dl_data(struct cpuset *cs)
2531 {
2532 	cs->nr_migrate_dl_tasks = 0;
2533 	cs->sum_migrate_dl_bw = 0;
2534 }
2535 
2536 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2537 static int cpuset_can_attach(struct cgroup_taskset *tset)
2538 {
2539 	struct cgroup_subsys_state *css;
2540 	struct cpuset *cs, *oldcs;
2541 	struct task_struct *task;
2542 	bool cpus_updated, mems_updated;
2543 	int ret;
2544 
2545 	/* used later by cpuset_attach() */
2546 	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2547 	oldcs = cpuset_attach_old_cs;
2548 	cs = css_cs(css);
2549 
2550 	mutex_lock(&cpuset_mutex);
2551 
2552 	/* Check to see if task is allowed in the cpuset */
2553 	ret = cpuset_can_attach_check(cs);
2554 	if (ret)
2555 		goto out_unlock;
2556 
2557 	cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2558 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2559 
2560 	cgroup_taskset_for_each(task, css, tset) {
2561 		ret = task_can_attach(task);
2562 		if (ret)
2563 			goto out_unlock;
2564 
2565 		/*
2566 		 * Skip rights over task check in v2 when nothing changes,
2567 		 * migration permission derives from hierarchy ownership in
2568 		 * cgroup_procs_write_permission()).
2569 		 */
2570 		if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2571 		    (cpus_updated || mems_updated)) {
2572 			ret = security_task_setscheduler(task);
2573 			if (ret)
2574 				goto out_unlock;
2575 		}
2576 
2577 		if (dl_task(task)) {
2578 			cs->nr_migrate_dl_tasks++;
2579 			cs->sum_migrate_dl_bw += task->dl.dl_bw;
2580 		}
2581 	}
2582 
2583 	if (!cs->nr_migrate_dl_tasks)
2584 		goto out_success;
2585 
2586 	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2587 		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2588 
2589 		if (unlikely(cpu >= nr_cpu_ids)) {
2590 			reset_migrate_dl_data(cs);
2591 			ret = -EINVAL;
2592 			goto out_unlock;
2593 		}
2594 
2595 		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2596 		if (ret) {
2597 			reset_migrate_dl_data(cs);
2598 			goto out_unlock;
2599 		}
2600 	}
2601 
2602 out_success:
2603 	/*
2604 	 * Mark attach is in progress.  This makes validate_change() fail
2605 	 * changes which zero cpus/mems_allowed.
2606 	 */
2607 	cs->attach_in_progress++;
2608 out_unlock:
2609 	mutex_unlock(&cpuset_mutex);
2610 	return ret;
2611 }
2612 
2613 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2614 {
2615 	struct cgroup_subsys_state *css;
2616 	struct cpuset *cs;
2617 
2618 	cgroup_taskset_first(tset, &css);
2619 	cs = css_cs(css);
2620 
2621 	mutex_lock(&cpuset_mutex);
2622 	cs->attach_in_progress--;
2623 	if (!cs->attach_in_progress)
2624 		wake_up(&cpuset_attach_wq);
2625 
2626 	if (cs->nr_migrate_dl_tasks) {
2627 		int cpu = cpumask_any(cs->effective_cpus);
2628 
2629 		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
2630 		reset_migrate_dl_data(cs);
2631 	}
2632 
2633 	mutex_unlock(&cpuset_mutex);
2634 }
2635 
2636 /*
2637  * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
2638  * but we can't allocate it dynamically there.  Define it global and
2639  * allocate from cpuset_init().
2640  */
2641 static cpumask_var_t cpus_attach;
2642 static nodemask_t cpuset_attach_nodemask_to;
2643 
2644 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2645 {
2646 	lockdep_assert_held(&cpuset_mutex);
2647 
2648 	if (cs != &top_cpuset)
2649 		guarantee_online_cpus(task, cpus_attach);
2650 	else
2651 		cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
2652 			       cs->subparts_cpus);
2653 	/*
2654 	 * can_attach beforehand should guarantee that this doesn't
2655 	 * fail.  TODO: have a better way to handle failure here
2656 	 */
2657 	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2658 
2659 	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2660 	cpuset_update_task_spread_flags(cs, task);
2661 }
2662 
2663 static void cpuset_attach(struct cgroup_taskset *tset)
2664 {
2665 	struct task_struct *task;
2666 	struct task_struct *leader;
2667 	struct cgroup_subsys_state *css;
2668 	struct cpuset *cs;
2669 	struct cpuset *oldcs = cpuset_attach_old_cs;
2670 	bool cpus_updated, mems_updated;
2671 
2672 	cgroup_taskset_first(tset, &css);
2673 	cs = css_cs(css);
2674 
2675 	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
2676 	mutex_lock(&cpuset_mutex);
2677 	cpus_updated = !cpumask_equal(cs->effective_cpus,
2678 				      oldcs->effective_cpus);
2679 	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2680 
2681 	/*
2682 	 * In the default hierarchy, enabling cpuset in the child cgroups
2683 	 * will trigger a number of cpuset_attach() calls with no change
2684 	 * in effective cpus and mems. In that case, we can optimize out
2685 	 * by skipping the task iteration and update.
2686 	 */
2687 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2688 	    !cpus_updated && !mems_updated) {
2689 		cpuset_attach_nodemask_to = cs->effective_mems;
2690 		goto out;
2691 	}
2692 
2693 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2694 
2695 	cgroup_taskset_for_each(task, css, tset)
2696 		cpuset_attach_task(cs, task);
2697 
2698 	/*
2699 	 * Change mm for all threadgroup leaders. This is expensive and may
2700 	 * sleep and should be moved outside migration path proper. Skip it
2701 	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
2702 	 * not set.
2703 	 */
2704 	cpuset_attach_nodemask_to = cs->effective_mems;
2705 	if (!is_memory_migrate(cs) && !mems_updated)
2706 		goto out;
2707 
2708 	cgroup_taskset_for_each_leader(leader, css, tset) {
2709 		struct mm_struct *mm = get_task_mm(leader);
2710 
2711 		if (mm) {
2712 			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2713 
2714 			/*
2715 			 * old_mems_allowed is the same with mems_allowed
2716 			 * here, except if this task is being moved
2717 			 * automatically due to hotplug.  In that case
2718 			 * @mems_allowed has been updated and is empty, so
2719 			 * @old_mems_allowed is the right nodesets that we
2720 			 * migrate mm from.
2721 			 */
2722 			if (is_memory_migrate(cs))
2723 				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2724 						  &cpuset_attach_nodemask_to);
2725 			else
2726 				mmput(mm);
2727 		}
2728 	}
2729 
2730 out:
2731 	cs->old_mems_allowed = cpuset_attach_nodemask_to;
2732 
2733 	if (cs->nr_migrate_dl_tasks) {
2734 		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
2735 		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
2736 		reset_migrate_dl_data(cs);
2737 	}
2738 
2739 	cs->attach_in_progress--;
2740 	if (!cs->attach_in_progress)
2741 		wake_up(&cpuset_attach_wq);
2742 
2743 	mutex_unlock(&cpuset_mutex);
2744 }
2745 
2746 /* The various types of files and directories in a cpuset file system */
2747 
2748 typedef enum {
2749 	FILE_MEMORY_MIGRATE,
2750 	FILE_CPULIST,
2751 	FILE_MEMLIST,
2752 	FILE_EFFECTIVE_CPULIST,
2753 	FILE_EFFECTIVE_MEMLIST,
2754 	FILE_SUBPARTS_CPULIST,
2755 	FILE_CPU_EXCLUSIVE,
2756 	FILE_MEM_EXCLUSIVE,
2757 	FILE_MEM_HARDWALL,
2758 	FILE_SCHED_LOAD_BALANCE,
2759 	FILE_PARTITION_ROOT,
2760 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
2761 	FILE_MEMORY_PRESSURE_ENABLED,
2762 	FILE_MEMORY_PRESSURE,
2763 	FILE_SPREAD_PAGE,
2764 	FILE_SPREAD_SLAB,
2765 } cpuset_filetype_t;
2766 
2767 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2768 			    u64 val)
2769 {
2770 	struct cpuset *cs = css_cs(css);
2771 	cpuset_filetype_t type = cft->private;
2772 	int retval = 0;
2773 
2774 	cpus_read_lock();
2775 	mutex_lock(&cpuset_mutex);
2776 	if (!is_cpuset_online(cs)) {
2777 		retval = -ENODEV;
2778 		goto out_unlock;
2779 	}
2780 
2781 	switch (type) {
2782 	case FILE_CPU_EXCLUSIVE:
2783 		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2784 		break;
2785 	case FILE_MEM_EXCLUSIVE:
2786 		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2787 		break;
2788 	case FILE_MEM_HARDWALL:
2789 		retval = update_flag(CS_MEM_HARDWALL, cs, val);
2790 		break;
2791 	case FILE_SCHED_LOAD_BALANCE:
2792 		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2793 		break;
2794 	case FILE_MEMORY_MIGRATE:
2795 		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2796 		break;
2797 	case FILE_MEMORY_PRESSURE_ENABLED:
2798 		cpuset_memory_pressure_enabled = !!val;
2799 		break;
2800 	case FILE_SPREAD_PAGE:
2801 		retval = update_flag(CS_SPREAD_PAGE, cs, val);
2802 		break;
2803 	case FILE_SPREAD_SLAB:
2804 		retval = update_flag(CS_SPREAD_SLAB, cs, val);
2805 		break;
2806 	default:
2807 		retval = -EINVAL;
2808 		break;
2809 	}
2810 out_unlock:
2811 	mutex_unlock(&cpuset_mutex);
2812 	cpus_read_unlock();
2813 	return retval;
2814 }
2815 
2816 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2817 			    s64 val)
2818 {
2819 	struct cpuset *cs = css_cs(css);
2820 	cpuset_filetype_t type = cft->private;
2821 	int retval = -ENODEV;
2822 
2823 	cpus_read_lock();
2824 	mutex_lock(&cpuset_mutex);
2825 	if (!is_cpuset_online(cs))
2826 		goto out_unlock;
2827 
2828 	switch (type) {
2829 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2830 		retval = update_relax_domain_level(cs, val);
2831 		break;
2832 	default:
2833 		retval = -EINVAL;
2834 		break;
2835 	}
2836 out_unlock:
2837 	mutex_unlock(&cpuset_mutex);
2838 	cpus_read_unlock();
2839 	return retval;
2840 }
2841 
2842 /*
2843  * Common handling for a write to a "cpus" or "mems" file.
2844  */
2845 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2846 				    char *buf, size_t nbytes, loff_t off)
2847 {
2848 	struct cpuset *cs = css_cs(of_css(of));
2849 	struct cpuset *trialcs;
2850 	int retval = -ENODEV;
2851 
2852 	buf = strstrip(buf);
2853 
2854 	/*
2855 	 * CPU or memory hotunplug may leave @cs w/o any execution
2856 	 * resources, in which case the hotplug code asynchronously updates
2857 	 * configuration and transfers all tasks to the nearest ancestor
2858 	 * which can execute.
2859 	 *
2860 	 * As writes to "cpus" or "mems" may restore @cs's execution
2861 	 * resources, wait for the previously scheduled operations before
2862 	 * proceeding, so that we don't end up keep removing tasks added
2863 	 * after execution capability is restored.
2864 	 *
2865 	 * cpuset_hotplug_work calls back into cgroup core via
2866 	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2867 	 * operation like this one can lead to a deadlock through kernfs
2868 	 * active_ref protection.  Let's break the protection.  Losing the
2869 	 * protection is okay as we check whether @cs is online after
2870 	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
2871 	 * hierarchies.
2872 	 */
2873 	css_get(&cs->css);
2874 	kernfs_break_active_protection(of->kn);
2875 	flush_work(&cpuset_hotplug_work);
2876 
2877 	cpus_read_lock();
2878 	mutex_lock(&cpuset_mutex);
2879 	if (!is_cpuset_online(cs))
2880 		goto out_unlock;
2881 
2882 	trialcs = alloc_trial_cpuset(cs);
2883 	if (!trialcs) {
2884 		retval = -ENOMEM;
2885 		goto out_unlock;
2886 	}
2887 
2888 	switch (of_cft(of)->private) {
2889 	case FILE_CPULIST:
2890 		retval = update_cpumask(cs, trialcs, buf);
2891 		break;
2892 	case FILE_MEMLIST:
2893 		retval = update_nodemask(cs, trialcs, buf);
2894 		break;
2895 	default:
2896 		retval = -EINVAL;
2897 		break;
2898 	}
2899 
2900 	free_cpuset(trialcs);
2901 out_unlock:
2902 	mutex_unlock(&cpuset_mutex);
2903 	cpus_read_unlock();
2904 	kernfs_unbreak_active_protection(of->kn);
2905 	css_put(&cs->css);
2906 	flush_workqueue(cpuset_migrate_mm_wq);
2907 	return retval ?: nbytes;
2908 }
2909 
2910 /*
2911  * These ascii lists should be read in a single call, by using a user
2912  * buffer large enough to hold the entire map.  If read in smaller
2913  * chunks, there is no guarantee of atomicity.  Since the display format
2914  * used, list of ranges of sequential numbers, is variable length,
2915  * and since these maps can change value dynamically, one could read
2916  * gibberish by doing partial reads while a list was changing.
2917  */
2918 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2919 {
2920 	struct cpuset *cs = css_cs(seq_css(sf));
2921 	cpuset_filetype_t type = seq_cft(sf)->private;
2922 	int ret = 0;
2923 
2924 	spin_lock_irq(&callback_lock);
2925 
2926 	switch (type) {
2927 	case FILE_CPULIST:
2928 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2929 		break;
2930 	case FILE_MEMLIST:
2931 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2932 		break;
2933 	case FILE_EFFECTIVE_CPULIST:
2934 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2935 		break;
2936 	case FILE_EFFECTIVE_MEMLIST:
2937 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2938 		break;
2939 	case FILE_SUBPARTS_CPULIST:
2940 		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2941 		break;
2942 	default:
2943 		ret = -EINVAL;
2944 	}
2945 
2946 	spin_unlock_irq(&callback_lock);
2947 	return ret;
2948 }
2949 
2950 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2951 {
2952 	struct cpuset *cs = css_cs(css);
2953 	cpuset_filetype_t type = cft->private;
2954 	switch (type) {
2955 	case FILE_CPU_EXCLUSIVE:
2956 		return is_cpu_exclusive(cs);
2957 	case FILE_MEM_EXCLUSIVE:
2958 		return is_mem_exclusive(cs);
2959 	case FILE_MEM_HARDWALL:
2960 		return is_mem_hardwall(cs);
2961 	case FILE_SCHED_LOAD_BALANCE:
2962 		return is_sched_load_balance(cs);
2963 	case FILE_MEMORY_MIGRATE:
2964 		return is_memory_migrate(cs);
2965 	case FILE_MEMORY_PRESSURE_ENABLED:
2966 		return cpuset_memory_pressure_enabled;
2967 	case FILE_MEMORY_PRESSURE:
2968 		return fmeter_getrate(&cs->fmeter);
2969 	case FILE_SPREAD_PAGE:
2970 		return is_spread_page(cs);
2971 	case FILE_SPREAD_SLAB:
2972 		return is_spread_slab(cs);
2973 	default:
2974 		BUG();
2975 	}
2976 
2977 	/* Unreachable but makes gcc happy */
2978 	return 0;
2979 }
2980 
2981 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2982 {
2983 	struct cpuset *cs = css_cs(css);
2984 	cpuset_filetype_t type = cft->private;
2985 	switch (type) {
2986 	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2987 		return cs->relax_domain_level;
2988 	default:
2989 		BUG();
2990 	}
2991 
2992 	/* Unreachable but makes gcc happy */
2993 	return 0;
2994 }
2995 
2996 static int sched_partition_show(struct seq_file *seq, void *v)
2997 {
2998 	struct cpuset *cs = css_cs(seq_css(seq));
2999 	const char *err, *type = NULL;
3000 
3001 	switch (cs->partition_root_state) {
3002 	case PRS_ROOT:
3003 		seq_puts(seq, "root\n");
3004 		break;
3005 	case PRS_ISOLATED:
3006 		seq_puts(seq, "isolated\n");
3007 		break;
3008 	case PRS_MEMBER:
3009 		seq_puts(seq, "member\n");
3010 		break;
3011 	case PRS_INVALID_ROOT:
3012 		type = "root";
3013 		fallthrough;
3014 	case PRS_INVALID_ISOLATED:
3015 		if (!type)
3016 			type = "isolated";
3017 		err = perr_strings[READ_ONCE(cs->prs_err)];
3018 		if (err)
3019 			seq_printf(seq, "%s invalid (%s)\n", type, err);
3020 		else
3021 			seq_printf(seq, "%s invalid\n", type);
3022 		break;
3023 	}
3024 	return 0;
3025 }
3026 
3027 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3028 				     size_t nbytes, loff_t off)
3029 {
3030 	struct cpuset *cs = css_cs(of_css(of));
3031 	int val;
3032 	int retval = -ENODEV;
3033 
3034 	buf = strstrip(buf);
3035 
3036 	/*
3037 	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
3038 	 */
3039 	if (!strcmp(buf, "root"))
3040 		val = PRS_ROOT;
3041 	else if (!strcmp(buf, "member"))
3042 		val = PRS_MEMBER;
3043 	else if (!strcmp(buf, "isolated"))
3044 		val = PRS_ISOLATED;
3045 	else
3046 		return -EINVAL;
3047 
3048 	css_get(&cs->css);
3049 	cpus_read_lock();
3050 	mutex_lock(&cpuset_mutex);
3051 	if (!is_cpuset_online(cs))
3052 		goto out_unlock;
3053 
3054 	retval = update_prstate(cs, val);
3055 out_unlock:
3056 	mutex_unlock(&cpuset_mutex);
3057 	cpus_read_unlock();
3058 	css_put(&cs->css);
3059 	return retval ?: nbytes;
3060 }
3061 
3062 /*
3063  * for the common functions, 'private' gives the type of file
3064  */
3065 
3066 static struct cftype legacy_files[] = {
3067 	{
3068 		.name = "cpus",
3069 		.seq_show = cpuset_common_seq_show,
3070 		.write = cpuset_write_resmask,
3071 		.max_write_len = (100U + 6 * NR_CPUS),
3072 		.private = FILE_CPULIST,
3073 	},
3074 
3075 	{
3076 		.name = "mems",
3077 		.seq_show = cpuset_common_seq_show,
3078 		.write = cpuset_write_resmask,
3079 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3080 		.private = FILE_MEMLIST,
3081 	},
3082 
3083 	{
3084 		.name = "effective_cpus",
3085 		.seq_show = cpuset_common_seq_show,
3086 		.private = FILE_EFFECTIVE_CPULIST,
3087 	},
3088 
3089 	{
3090 		.name = "effective_mems",
3091 		.seq_show = cpuset_common_seq_show,
3092 		.private = FILE_EFFECTIVE_MEMLIST,
3093 	},
3094 
3095 	{
3096 		.name = "cpu_exclusive",
3097 		.read_u64 = cpuset_read_u64,
3098 		.write_u64 = cpuset_write_u64,
3099 		.private = FILE_CPU_EXCLUSIVE,
3100 	},
3101 
3102 	{
3103 		.name = "mem_exclusive",
3104 		.read_u64 = cpuset_read_u64,
3105 		.write_u64 = cpuset_write_u64,
3106 		.private = FILE_MEM_EXCLUSIVE,
3107 	},
3108 
3109 	{
3110 		.name = "mem_hardwall",
3111 		.read_u64 = cpuset_read_u64,
3112 		.write_u64 = cpuset_write_u64,
3113 		.private = FILE_MEM_HARDWALL,
3114 	},
3115 
3116 	{
3117 		.name = "sched_load_balance",
3118 		.read_u64 = cpuset_read_u64,
3119 		.write_u64 = cpuset_write_u64,
3120 		.private = FILE_SCHED_LOAD_BALANCE,
3121 	},
3122 
3123 	{
3124 		.name = "sched_relax_domain_level",
3125 		.read_s64 = cpuset_read_s64,
3126 		.write_s64 = cpuset_write_s64,
3127 		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3128 	},
3129 
3130 	{
3131 		.name = "memory_migrate",
3132 		.read_u64 = cpuset_read_u64,
3133 		.write_u64 = cpuset_write_u64,
3134 		.private = FILE_MEMORY_MIGRATE,
3135 	},
3136 
3137 	{
3138 		.name = "memory_pressure",
3139 		.read_u64 = cpuset_read_u64,
3140 		.private = FILE_MEMORY_PRESSURE,
3141 	},
3142 
3143 	{
3144 		.name = "memory_spread_page",
3145 		.read_u64 = cpuset_read_u64,
3146 		.write_u64 = cpuset_write_u64,
3147 		.private = FILE_SPREAD_PAGE,
3148 	},
3149 
3150 	{
3151 		.name = "memory_spread_slab",
3152 		.read_u64 = cpuset_read_u64,
3153 		.write_u64 = cpuset_write_u64,
3154 		.private = FILE_SPREAD_SLAB,
3155 	},
3156 
3157 	{
3158 		.name = "memory_pressure_enabled",
3159 		.flags = CFTYPE_ONLY_ON_ROOT,
3160 		.read_u64 = cpuset_read_u64,
3161 		.write_u64 = cpuset_write_u64,
3162 		.private = FILE_MEMORY_PRESSURE_ENABLED,
3163 	},
3164 
3165 	{ }	/* terminate */
3166 };
3167 
3168 /*
3169  * This is currently a minimal set for the default hierarchy. It can be
3170  * expanded later on by migrating more features and control files from v1.
3171  */
3172 static struct cftype dfl_files[] = {
3173 	{
3174 		.name = "cpus",
3175 		.seq_show = cpuset_common_seq_show,
3176 		.write = cpuset_write_resmask,
3177 		.max_write_len = (100U + 6 * NR_CPUS),
3178 		.private = FILE_CPULIST,
3179 		.flags = CFTYPE_NOT_ON_ROOT,
3180 	},
3181 
3182 	{
3183 		.name = "mems",
3184 		.seq_show = cpuset_common_seq_show,
3185 		.write = cpuset_write_resmask,
3186 		.max_write_len = (100U + 6 * MAX_NUMNODES),
3187 		.private = FILE_MEMLIST,
3188 		.flags = CFTYPE_NOT_ON_ROOT,
3189 	},
3190 
3191 	{
3192 		.name = "cpus.effective",
3193 		.seq_show = cpuset_common_seq_show,
3194 		.private = FILE_EFFECTIVE_CPULIST,
3195 	},
3196 
3197 	{
3198 		.name = "mems.effective",
3199 		.seq_show = cpuset_common_seq_show,
3200 		.private = FILE_EFFECTIVE_MEMLIST,
3201 	},
3202 
3203 	{
3204 		.name = "cpus.partition",
3205 		.seq_show = sched_partition_show,
3206 		.write = sched_partition_write,
3207 		.private = FILE_PARTITION_ROOT,
3208 		.flags = CFTYPE_NOT_ON_ROOT,
3209 		.file_offset = offsetof(struct cpuset, partition_file),
3210 	},
3211 
3212 	{
3213 		.name = "cpus.subpartitions",
3214 		.seq_show = cpuset_common_seq_show,
3215 		.private = FILE_SUBPARTS_CPULIST,
3216 		.flags = CFTYPE_DEBUG,
3217 	},
3218 
3219 	{ }	/* terminate */
3220 };
3221 
3222 
3223 /**
3224  * cpuset_css_alloc - Allocate a cpuset css
3225  * @parent_css: Parent css of the control group that the new cpuset will be
3226  *              part of
3227  * Return: cpuset css on success, -ENOMEM on failure.
3228  *
3229  * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3230  * top cpuset css otherwise.
3231  */
3232 static struct cgroup_subsys_state *
3233 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3234 {
3235 	struct cpuset *cs;
3236 
3237 	if (!parent_css)
3238 		return &top_cpuset.css;
3239 
3240 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3241 	if (!cs)
3242 		return ERR_PTR(-ENOMEM);
3243 
3244 	if (alloc_cpumasks(cs, NULL)) {
3245 		kfree(cs);
3246 		return ERR_PTR(-ENOMEM);
3247 	}
3248 
3249 	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3250 	nodes_clear(cs->mems_allowed);
3251 	nodes_clear(cs->effective_mems);
3252 	fmeter_init(&cs->fmeter);
3253 	cs->relax_domain_level = -1;
3254 
3255 	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3256 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3257 		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3258 
3259 	return &cs->css;
3260 }
3261 
3262 static int cpuset_css_online(struct cgroup_subsys_state *css)
3263 {
3264 	struct cpuset *cs = css_cs(css);
3265 	struct cpuset *parent = parent_cs(cs);
3266 	struct cpuset *tmp_cs;
3267 	struct cgroup_subsys_state *pos_css;
3268 
3269 	if (!parent)
3270 		return 0;
3271 
3272 	cpus_read_lock();
3273 	mutex_lock(&cpuset_mutex);
3274 
3275 	set_bit(CS_ONLINE, &cs->flags);
3276 	if (is_spread_page(parent))
3277 		set_bit(CS_SPREAD_PAGE, &cs->flags);
3278 	if (is_spread_slab(parent))
3279 		set_bit(CS_SPREAD_SLAB, &cs->flags);
3280 
3281 	cpuset_inc();
3282 
3283 	spin_lock_irq(&callback_lock);
3284 	if (is_in_v2_mode()) {
3285 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3286 		cs->effective_mems = parent->effective_mems;
3287 		cs->use_parent_ecpus = true;
3288 		parent->child_ecpus_count++;
3289 	}
3290 
3291 	/*
3292 	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3293 	 */
3294 	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3295 	    !is_sched_load_balance(parent))
3296 		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3297 
3298 	spin_unlock_irq(&callback_lock);
3299 
3300 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3301 		goto out_unlock;
3302 
3303 	/*
3304 	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3305 	 * set.  This flag handling is implemented in cgroup core for
3306 	 * historical reasons - the flag may be specified during mount.
3307 	 *
3308 	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3309 	 * refuse to clone the configuration - thereby refusing the task to
3310 	 * be entered, and as a result refusing the sys_unshare() or
3311 	 * clone() which initiated it.  If this becomes a problem for some
3312 	 * users who wish to allow that scenario, then this could be
3313 	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3314 	 * (and likewise for mems) to the new cgroup.
3315 	 */
3316 	rcu_read_lock();
3317 	cpuset_for_each_child(tmp_cs, pos_css, parent) {
3318 		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3319 			rcu_read_unlock();
3320 			goto out_unlock;
3321 		}
3322 	}
3323 	rcu_read_unlock();
3324 
3325 	spin_lock_irq(&callback_lock);
3326 	cs->mems_allowed = parent->mems_allowed;
3327 	cs->effective_mems = parent->mems_allowed;
3328 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3329 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3330 	spin_unlock_irq(&callback_lock);
3331 out_unlock:
3332 	mutex_unlock(&cpuset_mutex);
3333 	cpus_read_unlock();
3334 	return 0;
3335 }
3336 
3337 /*
3338  * If the cpuset being removed has its flag 'sched_load_balance'
3339  * enabled, then simulate turning sched_load_balance off, which
3340  * will call rebuild_sched_domains_locked(). That is not needed
3341  * in the default hierarchy where only changes in partition
3342  * will cause repartitioning.
3343  *
3344  * If the cpuset has the 'sched.partition' flag enabled, simulate
3345  * turning 'sched.partition" off.
3346  */
3347 
3348 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3349 {
3350 	struct cpuset *cs = css_cs(css);
3351 
3352 	cpus_read_lock();
3353 	mutex_lock(&cpuset_mutex);
3354 
3355 	if (is_partition_valid(cs))
3356 		update_prstate(cs, 0);
3357 
3358 	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3359 	    is_sched_load_balance(cs))
3360 		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3361 
3362 	if (cs->use_parent_ecpus) {
3363 		struct cpuset *parent = parent_cs(cs);
3364 
3365 		cs->use_parent_ecpus = false;
3366 		parent->child_ecpus_count--;
3367 	}
3368 
3369 	cpuset_dec();
3370 	clear_bit(CS_ONLINE, &cs->flags);
3371 
3372 	mutex_unlock(&cpuset_mutex);
3373 	cpus_read_unlock();
3374 }
3375 
3376 static void cpuset_css_free(struct cgroup_subsys_state *css)
3377 {
3378 	struct cpuset *cs = css_cs(css);
3379 
3380 	free_cpuset(cs);
3381 }
3382 
3383 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3384 {
3385 	mutex_lock(&cpuset_mutex);
3386 	spin_lock_irq(&callback_lock);
3387 
3388 	if (is_in_v2_mode()) {
3389 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3390 		top_cpuset.mems_allowed = node_possible_map;
3391 	} else {
3392 		cpumask_copy(top_cpuset.cpus_allowed,
3393 			     top_cpuset.effective_cpus);
3394 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3395 	}
3396 
3397 	spin_unlock_irq(&callback_lock);
3398 	mutex_unlock(&cpuset_mutex);
3399 }
3400 
3401 /*
3402  * In case the child is cloned into a cpuset different from its parent,
3403  * additional checks are done to see if the move is allowed.
3404  */
3405 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3406 {
3407 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3408 	bool same_cs;
3409 	int ret;
3410 
3411 	rcu_read_lock();
3412 	same_cs = (cs == task_cs(current));
3413 	rcu_read_unlock();
3414 
3415 	if (same_cs)
3416 		return 0;
3417 
3418 	lockdep_assert_held(&cgroup_mutex);
3419 	mutex_lock(&cpuset_mutex);
3420 
3421 	/* Check to see if task is allowed in the cpuset */
3422 	ret = cpuset_can_attach_check(cs);
3423 	if (ret)
3424 		goto out_unlock;
3425 
3426 	ret = task_can_attach(task);
3427 	if (ret)
3428 		goto out_unlock;
3429 
3430 	ret = security_task_setscheduler(task);
3431 	if (ret)
3432 		goto out_unlock;
3433 
3434 	/*
3435 	 * Mark attach is in progress.  This makes validate_change() fail
3436 	 * changes which zero cpus/mems_allowed.
3437 	 */
3438 	cs->attach_in_progress++;
3439 out_unlock:
3440 	mutex_unlock(&cpuset_mutex);
3441 	return ret;
3442 }
3443 
3444 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3445 {
3446 	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3447 	bool same_cs;
3448 
3449 	rcu_read_lock();
3450 	same_cs = (cs == task_cs(current));
3451 	rcu_read_unlock();
3452 
3453 	if (same_cs)
3454 		return;
3455 
3456 	mutex_lock(&cpuset_mutex);
3457 	cs->attach_in_progress--;
3458 	if (!cs->attach_in_progress)
3459 		wake_up(&cpuset_attach_wq);
3460 	mutex_unlock(&cpuset_mutex);
3461 }
3462 
3463 /*
3464  * Make sure the new task conform to the current state of its parent,
3465  * which could have been changed by cpuset just after it inherits the
3466  * state from the parent and before it sits on the cgroup's task list.
3467  */
3468 static void cpuset_fork(struct task_struct *task)
3469 {
3470 	struct cpuset *cs;
3471 	bool same_cs;
3472 
3473 	rcu_read_lock();
3474 	cs = task_cs(task);
3475 	same_cs = (cs == task_cs(current));
3476 	rcu_read_unlock();
3477 
3478 	if (same_cs) {
3479 		if (cs == &top_cpuset)
3480 			return;
3481 
3482 		set_cpus_allowed_ptr(task, current->cpus_ptr);
3483 		task->mems_allowed = current->mems_allowed;
3484 		return;
3485 	}
3486 
3487 	/* CLONE_INTO_CGROUP */
3488 	mutex_lock(&cpuset_mutex);
3489 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3490 	cpuset_attach_task(cs, task);
3491 
3492 	cs->attach_in_progress--;
3493 	if (!cs->attach_in_progress)
3494 		wake_up(&cpuset_attach_wq);
3495 
3496 	mutex_unlock(&cpuset_mutex);
3497 }
3498 
3499 struct cgroup_subsys cpuset_cgrp_subsys = {
3500 	.css_alloc	= cpuset_css_alloc,
3501 	.css_online	= cpuset_css_online,
3502 	.css_offline	= cpuset_css_offline,
3503 	.css_free	= cpuset_css_free,
3504 	.can_attach	= cpuset_can_attach,
3505 	.cancel_attach	= cpuset_cancel_attach,
3506 	.attach		= cpuset_attach,
3507 	.post_attach	= cpuset_post_attach,
3508 	.bind		= cpuset_bind,
3509 	.can_fork	= cpuset_can_fork,
3510 	.cancel_fork	= cpuset_cancel_fork,
3511 	.fork		= cpuset_fork,
3512 	.legacy_cftypes	= legacy_files,
3513 	.dfl_cftypes	= dfl_files,
3514 	.early_init	= true,
3515 	.threaded	= true,
3516 };
3517 
3518 /**
3519  * cpuset_init - initialize cpusets at system boot
3520  *
3521  * Description: Initialize top_cpuset
3522  **/
3523 
3524 int __init cpuset_init(void)
3525 {
3526 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3527 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3528 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
3529 
3530 	cpumask_setall(top_cpuset.cpus_allowed);
3531 	nodes_setall(top_cpuset.mems_allowed);
3532 	cpumask_setall(top_cpuset.effective_cpus);
3533 	nodes_setall(top_cpuset.effective_mems);
3534 
3535 	fmeter_init(&top_cpuset.fmeter);
3536 	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3537 	top_cpuset.relax_domain_level = -1;
3538 
3539 	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3540 
3541 	return 0;
3542 }
3543 
3544 /*
3545  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3546  * or memory nodes, we need to walk over the cpuset hierarchy,
3547  * removing that CPU or node from all cpusets.  If this removes the
3548  * last CPU or node from a cpuset, then move the tasks in the empty
3549  * cpuset to its next-highest non-empty parent.
3550  */
3551 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3552 {
3553 	struct cpuset *parent;
3554 
3555 	/*
3556 	 * Find its next-highest non-empty parent, (top cpuset
3557 	 * has online cpus, so can't be empty).
3558 	 */
3559 	parent = parent_cs(cs);
3560 	while (cpumask_empty(parent->cpus_allowed) ||
3561 			nodes_empty(parent->mems_allowed))
3562 		parent = parent_cs(parent);
3563 
3564 	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3565 		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3566 		pr_cont_cgroup_name(cs->css.cgroup);
3567 		pr_cont("\n");
3568 	}
3569 }
3570 
3571 static void
3572 hotplug_update_tasks_legacy(struct cpuset *cs,
3573 			    struct cpumask *new_cpus, nodemask_t *new_mems,
3574 			    bool cpus_updated, bool mems_updated)
3575 {
3576 	bool is_empty;
3577 
3578 	spin_lock_irq(&callback_lock);
3579 	cpumask_copy(cs->cpus_allowed, new_cpus);
3580 	cpumask_copy(cs->effective_cpus, new_cpus);
3581 	cs->mems_allowed = *new_mems;
3582 	cs->effective_mems = *new_mems;
3583 	spin_unlock_irq(&callback_lock);
3584 
3585 	/*
3586 	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3587 	 * as the tasks will be migrated to an ancestor.
3588 	 */
3589 	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3590 		update_tasks_cpumask(cs, new_cpus);
3591 	if (mems_updated && !nodes_empty(cs->mems_allowed))
3592 		update_tasks_nodemask(cs);
3593 
3594 	is_empty = cpumask_empty(cs->cpus_allowed) ||
3595 		   nodes_empty(cs->mems_allowed);
3596 
3597 	/*
3598 	 * Move tasks to the nearest ancestor with execution resources,
3599 	 * This is full cgroup operation which will also call back into
3600 	 * cpuset. Should be done outside any lock.
3601 	 */
3602 	if (is_empty) {
3603 		mutex_unlock(&cpuset_mutex);
3604 		remove_tasks_in_empty_cpuset(cs);
3605 		mutex_lock(&cpuset_mutex);
3606 	}
3607 }
3608 
3609 static void
3610 hotplug_update_tasks(struct cpuset *cs,
3611 		     struct cpumask *new_cpus, nodemask_t *new_mems,
3612 		     bool cpus_updated, bool mems_updated)
3613 {
3614 	/* A partition root is allowed to have empty effective cpus */
3615 	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3616 		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3617 	if (nodes_empty(*new_mems))
3618 		*new_mems = parent_cs(cs)->effective_mems;
3619 
3620 	spin_lock_irq(&callback_lock);
3621 	cpumask_copy(cs->effective_cpus, new_cpus);
3622 	cs->effective_mems = *new_mems;
3623 	spin_unlock_irq(&callback_lock);
3624 
3625 	if (cpus_updated)
3626 		update_tasks_cpumask(cs, new_cpus);
3627 	if (mems_updated)
3628 		update_tasks_nodemask(cs);
3629 }
3630 
3631 static bool force_rebuild;
3632 
3633 void cpuset_force_rebuild(void)
3634 {
3635 	force_rebuild = true;
3636 }
3637 
3638 /**
3639  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3640  * @cs: cpuset in interest
3641  * @tmp: the tmpmasks structure pointer
3642  *
3643  * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3644  * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3645  * all its tasks are moved to the nearest ancestor with both resources.
3646  */
3647 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3648 {
3649 	static cpumask_t new_cpus;
3650 	static nodemask_t new_mems;
3651 	bool cpus_updated;
3652 	bool mems_updated;
3653 	struct cpuset *parent;
3654 retry:
3655 	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3656 
3657 	mutex_lock(&cpuset_mutex);
3658 
3659 	/*
3660 	 * We have raced with task attaching. We wait until attaching
3661 	 * is finished, so we won't attach a task to an empty cpuset.
3662 	 */
3663 	if (cs->attach_in_progress) {
3664 		mutex_unlock(&cpuset_mutex);
3665 		goto retry;
3666 	}
3667 
3668 	parent = parent_cs(cs);
3669 	compute_effective_cpumask(&new_cpus, cs, parent);
3670 	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3671 
3672 	if (cs->nr_subparts_cpus)
3673 		/*
3674 		 * Make sure that CPUs allocated to child partitions
3675 		 * do not show up in effective_cpus.
3676 		 */
3677 		cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3678 
3679 	if (!tmp || !cs->partition_root_state)
3680 		goto update_tasks;
3681 
3682 	/*
3683 	 * In the unlikely event that a partition root has empty
3684 	 * effective_cpus with tasks, we will have to invalidate child
3685 	 * partitions, if present, by setting nr_subparts_cpus to 0 to
3686 	 * reclaim their cpus.
3687 	 */
3688 	if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3689 	    cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3690 		spin_lock_irq(&callback_lock);
3691 		cs->nr_subparts_cpus = 0;
3692 		cpumask_clear(cs->subparts_cpus);
3693 		spin_unlock_irq(&callback_lock);
3694 		compute_effective_cpumask(&new_cpus, cs, parent);
3695 	}
3696 
3697 	/*
3698 	 * Force the partition to become invalid if either one of
3699 	 * the following conditions hold:
3700 	 * 1) empty effective cpus but not valid empty partition.
3701 	 * 2) parent is invalid or doesn't grant any cpus to child
3702 	 *    partitions.
3703 	 */
3704 	if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3705 	   (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3706 		int old_prs, parent_prs;
3707 
3708 		update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3709 		if (cs->nr_subparts_cpus) {
3710 			spin_lock_irq(&callback_lock);
3711 			cs->nr_subparts_cpus = 0;
3712 			cpumask_clear(cs->subparts_cpus);
3713 			spin_unlock_irq(&callback_lock);
3714 			compute_effective_cpumask(&new_cpus, cs, parent);
3715 		}
3716 
3717 		old_prs = cs->partition_root_state;
3718 		parent_prs = parent->partition_root_state;
3719 		if (is_partition_valid(cs)) {
3720 			spin_lock_irq(&callback_lock);
3721 			make_partition_invalid(cs);
3722 			spin_unlock_irq(&callback_lock);
3723 			if (is_prs_invalid(parent_prs))
3724 				WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3725 			else if (!parent_prs)
3726 				WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3727 			else
3728 				WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3729 			notify_partition_change(cs, old_prs);
3730 		}
3731 		cpuset_force_rebuild();
3732 	}
3733 
3734 	/*
3735 	 * On the other hand, an invalid partition root may be transitioned
3736 	 * back to a regular one.
3737 	 */
3738 	else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3739 		update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3740 		if (is_partition_valid(cs))
3741 			cpuset_force_rebuild();
3742 	}
3743 
3744 update_tasks:
3745 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3746 	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3747 	if (!cpus_updated && !mems_updated)
3748 		goto unlock;	/* Hotplug doesn't affect this cpuset */
3749 
3750 	if (mems_updated)
3751 		check_insane_mems_config(&new_mems);
3752 
3753 	if (is_in_v2_mode())
3754 		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3755 				     cpus_updated, mems_updated);
3756 	else
3757 		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3758 					    cpus_updated, mems_updated);
3759 
3760 unlock:
3761 	mutex_unlock(&cpuset_mutex);
3762 }
3763 
3764 /**
3765  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3766  * @work: unused
3767  *
3768  * This function is called after either CPU or memory configuration has
3769  * changed and updates cpuset accordingly.  The top_cpuset is always
3770  * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3771  * order to make cpusets transparent (of no affect) on systems that are
3772  * actively using CPU hotplug but making no active use of cpusets.
3773  *
3774  * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3775  * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3776  * all descendants.
3777  *
3778  * Note that CPU offlining during suspend is ignored.  We don't modify
3779  * cpusets across suspend/resume cycles at all.
3780  */
3781 static void cpuset_hotplug_workfn(struct work_struct *work)
3782 {
3783 	static cpumask_t new_cpus;
3784 	static nodemask_t new_mems;
3785 	bool cpus_updated, mems_updated;
3786 	bool on_dfl = is_in_v2_mode();
3787 	struct tmpmasks tmp, *ptmp = NULL;
3788 
3789 	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3790 		ptmp = &tmp;
3791 
3792 	mutex_lock(&cpuset_mutex);
3793 
3794 	/* fetch the available cpus/mems and find out which changed how */
3795 	cpumask_copy(&new_cpus, cpu_active_mask);
3796 	new_mems = node_states[N_MEMORY];
3797 
3798 	/*
3799 	 * If subparts_cpus is populated, it is likely that the check below
3800 	 * will produce a false positive on cpus_updated when the cpu list
3801 	 * isn't changed. It is extra work, but it is better to be safe.
3802 	 */
3803 	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3804 	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3805 
3806 	/*
3807 	 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3808 	 * we assumed that cpus are updated.
3809 	 */
3810 	if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3811 		cpus_updated = true;
3812 
3813 	/* synchronize cpus_allowed to cpu_active_mask */
3814 	if (cpus_updated) {
3815 		spin_lock_irq(&callback_lock);
3816 		if (!on_dfl)
3817 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3818 		/*
3819 		 * Make sure that CPUs allocated to child partitions
3820 		 * do not show up in effective_cpus. If no CPU is left,
3821 		 * we clear the subparts_cpus & let the child partitions
3822 		 * fight for the CPUs again.
3823 		 */
3824 		if (top_cpuset.nr_subparts_cpus) {
3825 			if (cpumask_subset(&new_cpus,
3826 					   top_cpuset.subparts_cpus)) {
3827 				top_cpuset.nr_subparts_cpus = 0;
3828 				cpumask_clear(top_cpuset.subparts_cpus);
3829 			} else {
3830 				cpumask_andnot(&new_cpus, &new_cpus,
3831 					       top_cpuset.subparts_cpus);
3832 			}
3833 		}
3834 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3835 		spin_unlock_irq(&callback_lock);
3836 		/* we don't mess with cpumasks of tasks in top_cpuset */
3837 	}
3838 
3839 	/* synchronize mems_allowed to N_MEMORY */
3840 	if (mems_updated) {
3841 		spin_lock_irq(&callback_lock);
3842 		if (!on_dfl)
3843 			top_cpuset.mems_allowed = new_mems;
3844 		top_cpuset.effective_mems = new_mems;
3845 		spin_unlock_irq(&callback_lock);
3846 		update_tasks_nodemask(&top_cpuset);
3847 	}
3848 
3849 	mutex_unlock(&cpuset_mutex);
3850 
3851 	/* if cpus or mems changed, we need to propagate to descendants */
3852 	if (cpus_updated || mems_updated) {
3853 		struct cpuset *cs;
3854 		struct cgroup_subsys_state *pos_css;
3855 
3856 		rcu_read_lock();
3857 		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3858 			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3859 				continue;
3860 			rcu_read_unlock();
3861 
3862 			cpuset_hotplug_update_tasks(cs, ptmp);
3863 
3864 			rcu_read_lock();
3865 			css_put(&cs->css);
3866 		}
3867 		rcu_read_unlock();
3868 	}
3869 
3870 	/* rebuild sched domains if cpus_allowed has changed */
3871 	if (cpus_updated || force_rebuild) {
3872 		force_rebuild = false;
3873 		rebuild_sched_domains();
3874 	}
3875 
3876 	free_cpumasks(NULL, ptmp);
3877 }
3878 
3879 void cpuset_update_active_cpus(void)
3880 {
3881 	/*
3882 	 * We're inside cpu hotplug critical region which usually nests
3883 	 * inside cgroup synchronization.  Bounce actual hotplug processing
3884 	 * to a work item to avoid reverse locking order.
3885 	 */
3886 	schedule_work(&cpuset_hotplug_work);
3887 }
3888 
3889 void cpuset_wait_for_hotplug(void)
3890 {
3891 	flush_work(&cpuset_hotplug_work);
3892 }
3893 
3894 /*
3895  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3896  * Call this routine anytime after node_states[N_MEMORY] changes.
3897  * See cpuset_update_active_cpus() for CPU hotplug handling.
3898  */
3899 static int cpuset_track_online_nodes(struct notifier_block *self,
3900 				unsigned long action, void *arg)
3901 {
3902 	schedule_work(&cpuset_hotplug_work);
3903 	return NOTIFY_OK;
3904 }
3905 
3906 /**
3907  * cpuset_init_smp - initialize cpus_allowed
3908  *
3909  * Description: Finish top cpuset after cpu, node maps are initialized
3910  */
3911 void __init cpuset_init_smp(void)
3912 {
3913 	/*
3914 	 * cpus_allowd/mems_allowed set to v2 values in the initial
3915 	 * cpuset_bind() call will be reset to v1 values in another
3916 	 * cpuset_bind() call when v1 cpuset is mounted.
3917 	 */
3918 	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3919 
3920 	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3921 	top_cpuset.effective_mems = node_states[N_MEMORY];
3922 
3923 	hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3924 
3925 	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3926 	BUG_ON(!cpuset_migrate_mm_wq);
3927 }
3928 
3929 /**
3930  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3931  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3932  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3933  *
3934  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3935  * attached to the specified @tsk.  Guaranteed to return some non-empty
3936  * subset of cpu_online_mask, even if this means going outside the
3937  * tasks cpuset, except when the task is in the top cpuset.
3938  **/
3939 
3940 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3941 {
3942 	unsigned long flags;
3943 	struct cpuset *cs;
3944 
3945 	spin_lock_irqsave(&callback_lock, flags);
3946 	rcu_read_lock();
3947 
3948 	cs = task_cs(tsk);
3949 	if (cs != &top_cpuset)
3950 		guarantee_online_cpus(tsk, pmask);
3951 	/*
3952 	 * Tasks in the top cpuset won't get update to their cpumasks
3953 	 * when a hotplug online/offline event happens. So we include all
3954 	 * offline cpus in the allowed cpu list.
3955 	 */
3956 	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3957 		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3958 
3959 		/*
3960 		 * We first exclude cpus allocated to partitions. If there is no
3961 		 * allowable online cpu left, we fall back to all possible cpus.
3962 		 */
3963 		cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
3964 		if (!cpumask_intersects(pmask, cpu_online_mask))
3965 			cpumask_copy(pmask, possible_mask);
3966 	}
3967 
3968 	rcu_read_unlock();
3969 	spin_unlock_irqrestore(&callback_lock, flags);
3970 }
3971 
3972 /**
3973  * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3974  * @tsk: pointer to task_struct with which the scheduler is struggling
3975  *
3976  * Description: In the case that the scheduler cannot find an allowed cpu in
3977  * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3978  * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3979  * which will not contain a sane cpumask during cases such as cpu hotplugging.
3980  * This is the absolute last resort for the scheduler and it is only used if
3981  * _every_ other avenue has been traveled.
3982  *
3983  * Returns true if the affinity of @tsk was changed, false otherwise.
3984  **/
3985 
3986 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3987 {
3988 	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3989 	const struct cpumask *cs_mask;
3990 	bool changed = false;
3991 
3992 	rcu_read_lock();
3993 	cs_mask = task_cs(tsk)->cpus_allowed;
3994 	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3995 		do_set_cpus_allowed(tsk, cs_mask);
3996 		changed = true;
3997 	}
3998 	rcu_read_unlock();
3999 
4000 	/*
4001 	 * We own tsk->cpus_allowed, nobody can change it under us.
4002 	 *
4003 	 * But we used cs && cs->cpus_allowed lockless and thus can
4004 	 * race with cgroup_attach_task() or update_cpumask() and get
4005 	 * the wrong tsk->cpus_allowed. However, both cases imply the
4006 	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4007 	 * which takes task_rq_lock().
4008 	 *
4009 	 * If we are called after it dropped the lock we must see all
4010 	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4011 	 * set any mask even if it is not right from task_cs() pov,
4012 	 * the pending set_cpus_allowed_ptr() will fix things.
4013 	 *
4014 	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4015 	 * if required.
4016 	 */
4017 	return changed;
4018 }
4019 
4020 void __init cpuset_init_current_mems_allowed(void)
4021 {
4022 	nodes_setall(current->mems_allowed);
4023 }
4024 
4025 /**
4026  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4027  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4028  *
4029  * Description: Returns the nodemask_t mems_allowed of the cpuset
4030  * attached to the specified @tsk.  Guaranteed to return some non-empty
4031  * subset of node_states[N_MEMORY], even if this means going outside the
4032  * tasks cpuset.
4033  **/
4034 
4035 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4036 {
4037 	nodemask_t mask;
4038 	unsigned long flags;
4039 
4040 	spin_lock_irqsave(&callback_lock, flags);
4041 	rcu_read_lock();
4042 	guarantee_online_mems(task_cs(tsk), &mask);
4043 	rcu_read_unlock();
4044 	spin_unlock_irqrestore(&callback_lock, flags);
4045 
4046 	return mask;
4047 }
4048 
4049 /**
4050  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4051  * @nodemask: the nodemask to be checked
4052  *
4053  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4054  */
4055 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4056 {
4057 	return nodes_intersects(*nodemask, current->mems_allowed);
4058 }
4059 
4060 /*
4061  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4062  * mem_hardwall ancestor to the specified cpuset.  Call holding
4063  * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4064  * (an unusual configuration), then returns the root cpuset.
4065  */
4066 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4067 {
4068 	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4069 		cs = parent_cs(cs);
4070 	return cs;
4071 }
4072 
4073 /*
4074  * cpuset_node_allowed - Can we allocate on a memory node?
4075  * @node: is this an allowed node?
4076  * @gfp_mask: memory allocation flags
4077  *
4078  * If we're in interrupt, yes, we can always allocate.  If @node is set in
4079  * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4080  * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4081  * yes.  If current has access to memory reserves as an oom victim, yes.
4082  * Otherwise, no.
4083  *
4084  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4085  * and do not allow allocations outside the current tasks cpuset
4086  * unless the task has been OOM killed.
4087  * GFP_KERNEL allocations are not so marked, so can escape to the
4088  * nearest enclosing hardwalled ancestor cpuset.
4089  *
4090  * Scanning up parent cpusets requires callback_lock.  The
4091  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4092  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4093  * current tasks mems_allowed came up empty on the first pass over
4094  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4095  * cpuset are short of memory, might require taking the callback_lock.
4096  *
4097  * The first call here from mm/page_alloc:get_page_from_freelist()
4098  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4099  * so no allocation on a node outside the cpuset is allowed (unless
4100  * in interrupt, of course).
4101  *
4102  * The second pass through get_page_from_freelist() doesn't even call
4103  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4104  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4105  * in alloc_flags.  That logic and the checks below have the combined
4106  * affect that:
4107  *	in_interrupt - any node ok (current task context irrelevant)
4108  *	GFP_ATOMIC   - any node ok
4109  *	tsk_is_oom_victim   - any node ok
4110  *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4111  *	GFP_USER     - only nodes in current tasks mems allowed ok.
4112  */
4113 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4114 {
4115 	struct cpuset *cs;		/* current cpuset ancestors */
4116 	bool allowed;			/* is allocation in zone z allowed? */
4117 	unsigned long flags;
4118 
4119 	if (in_interrupt())
4120 		return true;
4121 	if (node_isset(node, current->mems_allowed))
4122 		return true;
4123 	/*
4124 	 * Allow tasks that have access to memory reserves because they have
4125 	 * been OOM killed to get memory anywhere.
4126 	 */
4127 	if (unlikely(tsk_is_oom_victim(current)))
4128 		return true;
4129 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4130 		return false;
4131 
4132 	if (current->flags & PF_EXITING) /* Let dying task have memory */
4133 		return true;
4134 
4135 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4136 	spin_lock_irqsave(&callback_lock, flags);
4137 
4138 	rcu_read_lock();
4139 	cs = nearest_hardwall_ancestor(task_cs(current));
4140 	allowed = node_isset(node, cs->mems_allowed);
4141 	rcu_read_unlock();
4142 
4143 	spin_unlock_irqrestore(&callback_lock, flags);
4144 	return allowed;
4145 }
4146 
4147 /**
4148  * cpuset_spread_node() - On which node to begin search for a page
4149  * @rotor: round robin rotor
4150  *
4151  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4152  * tasks in a cpuset with is_spread_page or is_spread_slab set),
4153  * and if the memory allocation used cpuset_mem_spread_node()
4154  * to determine on which node to start looking, as it will for
4155  * certain page cache or slab cache pages such as used for file
4156  * system buffers and inode caches, then instead of starting on the
4157  * local node to look for a free page, rather spread the starting
4158  * node around the tasks mems_allowed nodes.
4159  *
4160  * We don't have to worry about the returned node being offline
4161  * because "it can't happen", and even if it did, it would be ok.
4162  *
4163  * The routines calling guarantee_online_mems() are careful to
4164  * only set nodes in task->mems_allowed that are online.  So it
4165  * should not be possible for the following code to return an
4166  * offline node.  But if it did, that would be ok, as this routine
4167  * is not returning the node where the allocation must be, only
4168  * the node where the search should start.  The zonelist passed to
4169  * __alloc_pages() will include all nodes.  If the slab allocator
4170  * is passed an offline node, it will fall back to the local node.
4171  * See kmem_cache_alloc_node().
4172  */
4173 static int cpuset_spread_node(int *rotor)
4174 {
4175 	return *rotor = next_node_in(*rotor, current->mems_allowed);
4176 }
4177 
4178 /**
4179  * cpuset_mem_spread_node() - On which node to begin search for a file page
4180  */
4181 int cpuset_mem_spread_node(void)
4182 {
4183 	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4184 		current->cpuset_mem_spread_rotor =
4185 			node_random(&current->mems_allowed);
4186 
4187 	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4188 }
4189 
4190 /**
4191  * cpuset_slab_spread_node() - On which node to begin search for a slab page
4192  */
4193 int cpuset_slab_spread_node(void)
4194 {
4195 	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4196 		current->cpuset_slab_spread_rotor =
4197 			node_random(&current->mems_allowed);
4198 
4199 	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
4200 }
4201 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4202 
4203 /**
4204  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4205  * @tsk1: pointer to task_struct of some task.
4206  * @tsk2: pointer to task_struct of some other task.
4207  *
4208  * Description: Return true if @tsk1's mems_allowed intersects the
4209  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
4210  * one of the task's memory usage might impact the memory available
4211  * to the other.
4212  **/
4213 
4214 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4215 				   const struct task_struct *tsk2)
4216 {
4217 	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4218 }
4219 
4220 /**
4221  * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4222  *
4223  * Description: Prints current's name, cpuset name, and cached copy of its
4224  * mems_allowed to the kernel log.
4225  */
4226 void cpuset_print_current_mems_allowed(void)
4227 {
4228 	struct cgroup *cgrp;
4229 
4230 	rcu_read_lock();
4231 
4232 	cgrp = task_cs(current)->css.cgroup;
4233 	pr_cont(",cpuset=");
4234 	pr_cont_cgroup_name(cgrp);
4235 	pr_cont(",mems_allowed=%*pbl",
4236 		nodemask_pr_args(&current->mems_allowed));
4237 
4238 	rcu_read_unlock();
4239 }
4240 
4241 /*
4242  * Collection of memory_pressure is suppressed unless
4243  * this flag is enabled by writing "1" to the special
4244  * cpuset file 'memory_pressure_enabled' in the root cpuset.
4245  */
4246 
4247 int cpuset_memory_pressure_enabled __read_mostly;
4248 
4249 /*
4250  * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4251  *
4252  * Keep a running average of the rate of synchronous (direct)
4253  * page reclaim efforts initiated by tasks in each cpuset.
4254  *
4255  * This represents the rate at which some task in the cpuset
4256  * ran low on memory on all nodes it was allowed to use, and
4257  * had to enter the kernels page reclaim code in an effort to
4258  * create more free memory by tossing clean pages or swapping
4259  * or writing dirty pages.
4260  *
4261  * Display to user space in the per-cpuset read-only file
4262  * "memory_pressure".  Value displayed is an integer
4263  * representing the recent rate of entry into the synchronous
4264  * (direct) page reclaim by any task attached to the cpuset.
4265  */
4266 
4267 void __cpuset_memory_pressure_bump(void)
4268 {
4269 	rcu_read_lock();
4270 	fmeter_markevent(&task_cs(current)->fmeter);
4271 	rcu_read_unlock();
4272 }
4273 
4274 #ifdef CONFIG_PROC_PID_CPUSET
4275 /*
4276  * proc_cpuset_show()
4277  *  - Print tasks cpuset path into seq_file.
4278  *  - Used for /proc/<pid>/cpuset.
4279  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4280  *    doesn't really matter if tsk->cpuset changes after we read it,
4281  *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
4282  *    anyway.
4283  */
4284 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4285 		     struct pid *pid, struct task_struct *tsk)
4286 {
4287 	char *buf;
4288 	struct cgroup_subsys_state *css;
4289 	int retval;
4290 
4291 	retval = -ENOMEM;
4292 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4293 	if (!buf)
4294 		goto out;
4295 
4296 	css = task_get_css(tsk, cpuset_cgrp_id);
4297 	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
4298 				current->nsproxy->cgroup_ns);
4299 	css_put(css);
4300 	if (retval >= PATH_MAX)
4301 		retval = -ENAMETOOLONG;
4302 	if (retval < 0)
4303 		goto out_free;
4304 	seq_puts(m, buf);
4305 	seq_putc(m, '\n');
4306 	retval = 0;
4307 out_free:
4308 	kfree(buf);
4309 out:
4310 	return retval;
4311 }
4312 #endif /* CONFIG_PROC_PID_CPUSET */
4313 
4314 /* Display task mems_allowed in /proc/<pid>/status file. */
4315 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4316 {
4317 	seq_printf(m, "Mems_allowed:\t%*pb\n",
4318 		   nodemask_pr_args(&task->mems_allowed));
4319 	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4320 		   nodemask_pr_args(&task->mems_allowed));
4321 }
4322